def test_regular_array2rgbx_cordermask_from_cmasked_slices(self): d = rt.regular_array2rgbx(self.data_masked[0:1, ...]) nt.assert_is_instance(d, np.ma.MaskedArray) nt.assert_true(d.flags['C_CONTIGUOUS']) d = rt.regular_array2rgbx(self.data_masked[:, 0:1, :]) nt.assert_is_instance(d, np.ma.MaskedArray) nt.assert_true(d.flags['C_CONTIGUOUS'])
def test_regular_array2rgbx_cordermask_from_cmasked_slices(self): d = rt.regular_array2rgbx(self.data_masked[0:1, ...]) assert isinstance(d, np.ma.MaskedArray) assert d.flags['C_CONTIGUOUS'] d = rt.regular_array2rgbx(self.data_masked[:, 0:1, :]) assert isinstance(d, np.ma.MaskedArray) assert d.flags['C_CONTIGUOUS']
def file_reader(filename, **kwds): '''Read data from any format supported by PIL. Parameters ---------- filename: str ''' dc = imread(filename) if len(dc.shape) > 2: # It may be a grayscale image that was saved in the RGB or RGBA # format if (dc[:, :, 1] == dc[:, :, 2]).all() and \ (dc[:, :, 1] == dc[:, :, 2]).all(): dc = dc[:, :, 0] else: dc = regular_array2rgbx(dc) return [{'data': dc, 'metadata': { 'General': {'original_filename': os.path.split(filename)[1]}, "Signal": {'signal_type': "", 'record_by': 'image', }, } }]
def file_reader(filename, **kwds): '''Read data from any format supported by PIL. Parameters ---------- filename: str ''' dc = imread(filename) if len(dc.shape) > 2: # It may be a grayscale image that was saved in the RGB or RGBA # format if (dc[:, :, 1] == dc[:, :, 2]).all() and \ (dc[:, :, 1] == dc[:, :, 2]).all(): dc = dc[:, :, 0] else: dc = regular_array2rgbx(dc) return [{ 'data': dc, 'metadata': { 'General': { 'original_filename': os.path.split(filename)[1] }, "Signal": { 'signal_type': "", 'record_by': 'image', }, } }]
def _on_update(self, histogram, rois): if histogram not in self.map: return source, s_out = self.map[histogram] N = len(rois) data = source() gray = self._make_gray(data) s_out.data = regular_array2rgbx(gray) for i in range(N): color = (255 * plt_cm.hsv([float(i) / max(N, 10)])).astype('uint8') color = regular_array2rgbx(color) r = rois[i] mask = (data < r.right) & (data >= r.left) s_out.data[mask] = color s_out.update_plot()
def _load_data(serie, is_rgb, sl=None, memmap=None, **kwds): dc = serie.asarray(out=memmap) _logger.debug("data shape: {0}".format(dc.shape)) if is_rgb: dc = rgb_tools.regular_array2rgbx(dc) if sl is not None: dc = dc[tuple(sl)] return dc
def _load_data(TF, filename, is_rgb, sl=None, memmap=None, **kwds): with TF(filename, **kwds) as tiff: dc = tiff.asarray(out=memmap) _logger.debug("data shape: {0}".format(dc.shape)) if is_rgb: dc = rgb_tools.regular_array2rgbx(dc) if sl is not None: dc = dc[tuple(sl)] return dc
def _load_data(TF, filename, is_rgb, sl=None, memmap=False, **kwds): with TF(filename, **kwds) as tiff: dc = tiff.asarray(memmap=memmap) _logger.debug("data shape: {0}".format(dc.shape)) if is_rgb: dc = rgb_tools.regular_array2rgbx(dc) if sl is not None: dc = dc[sl] return dc
def _read_data(filename): dc = imread(filename) if len(dc.shape) > 2: # It may be a grayscale image that was saved in the RGB or RGBA # format if (dc[:, :, 1] == dc[:, :, 2]).all() and \ (dc[:, :, 1] == dc[:, :, 2]).all(): dc = dc[:, :, 0] else: dc = rgb_tools.regular_array2rgbx(dc) return dc
def file_reader(filename, record_by='image', **kwds): '''Read data from tif files using Christoph Gohlke's tifffile library Parameters ---------- filename: str record_by: {'image'} Has no effect because this format only supports recording by image. ''' with TiffFile(filename, **kwds) as tiff: dc = tiff.asarray() axes = tiff.series[0]['axes'] if tiff.is_rgb: dc = rgb_tools.regular_array2rgbx(dc) axes = axes[:-1] op = {} names = [axes_label_codes[axis] for axis in axes] axes = [ { 'size': size, 'name': unicode(name), #'scale': scales[i], #'offset' : origins[i], #'units' : unicode(units[i]), } for size, name in zip(dc.shape, names) ] op = {} for key, tag in tiff[0].tags.iteritems(): op[key] = tag.value return [{ 'data': dc, 'original_metadata': op, 'metadata': { 'General': { 'original_filename': os.path.split(filename)[1] }, "Signal": { 'signal_type': "", 'record_by': "image", }, }, }]
def file_reader(filename, record_by='image', **kwds): """Read data from tif files using Christoph Gohlke's tifffile library Parameters ---------- filename: str record_by: {'image'} Has no effect because this format only supports recording by image. """ with TiffFile(filename, **kwds) as tiff: dc = tiff.asarray() axes = tiff.series[0]['axes'] if tiff.is_rgb: dc = rgb_tools.regular_array2rgbx(dc) axes = axes[:-1] op = {} names = [axes_label_codes[axis] for axis in axes] axes = [{'size': size, 'name': unicode(name), #'scale': scales[i], #'offset' : origins[i], #'units' : unicode(units[i]), } for size, name in zip(dc.shape, names)] op = {} for key, tag in tiff[0].tags.iteritems(): op[key] = tag.value return [ { 'data': dc, 'original_metadata': op, 'metadata': { 'General': { 'original_filename': os.path.split(filename)[1]}, "Signal": { 'signal_type': "", 'record_by': "image", }, }, }]
def _read_tiff(self): def xml_element_to_dict(element): dict = {} if len(element) == 0: if len(element.items()) > 0: dict[element.tag] = {'value': element.text} for attrib, value in element.items(): dict[element.tag].update({attrib: value}) else: dict[element.tag] = element.text else: dict[element.tag] = {} for child in element: dict[element.tag].update(xml_element_to_dict(child)) return dict def make_metadata_dict(xml): dict = xml_element_to_dict(ET.fromstring(xml)) return dict['FeiImage'] if dict else {} n = self._read_uint32() if n == 0: return (None, None) bytes = io.BytesIO(self._read(n)) with tifffile.TiffFile(bytes) as tiff: data = tiff.asarray() if len(data.shape) > 2: data = rgb_tools.regular_array2rgbx(data) tags = tiff.pages[0].tags if 'FEI_TITAN' in tags: metadata = make_metadata_dict(tags['FEI_TITAN'].value) metadata['acquisition']['scan']['fieldSize'] = max( self._get_value_with_unit(metadata['pixelHeight']) * data.shape[0], self._get_value_with_unit(metadata['pixelWidth']) * data.shape[1]) else: metadata = {} return (metadata, data)
def test_regular_array2rgbx_cordermask_from_cmasked(self): d = rt.regular_array2rgbx(self.data_masked) assert isinstance(d, np.ma.MaskedArray) assert d.flags['C_CONTIGUOUS']
def test_regular_array2rgbx_corder_from_c_slices(self): d = rt.regular_array2rgbx(self.data_c[0:1, ...]) assert d.flags['C_CONTIGUOUS'] d = rt.regular_array2rgbx(self.data_c[:, 0:1, :]) assert d.flags['C_CONTIGUOUS']
def test_regular_array2rgbx_corder_from_f(self): d = rt.regular_array2rgbx(self.data_f) assert d.flags['C_CONTIGUOUS']
def test_regular_array2rgbx_corder_from_c(self): d = rt.regular_array2rgbx(self.data_c) assert d.flags['C_CONTIGUOUS']
def test_regular_array2rgbx_corder_from_c_slices(self): d = rt.regular_array2rgbx(self.data_c[0:1, ...]) nt.assert_true(d.flags['C_CONTIGUOUS']) d = rt.regular_array2rgbx(self.data_c[:, 0:1, :]) nt.assert_true(d.flags['C_CONTIGUOUS'])
def test_regular_array2rgbx_corder_from_f(self): d = rt.regular_array2rgbx(self.data_f) nt.assert_true(d.flags['C_CONTIGUOUS'])
def test_regular_array2rgbx_cordermask_from_cmasked(self): d = rt.regular_array2rgbx(self.data_masked) nt.assert_is_instance(d, np.ma.MaskedArray) nt.assert_true(d.flags["C_CONTIGUOUS"])
def file_reader(filename, record_by='image', force_read_resolution=False, **kwds): """ Read data from tif files using Christoph Gohlke's tifffile library. The units and the scale of images saved with ImageJ or Digital Micrograph is read. There is limited support for reading the scale of files created with Zeiss and FEI SEMs. Parameters ---------- filename: str record_by: {'image'} Has no effect because this format only supports recording by image. force_read_resolution: Bool Default: False. Force reading the x_resolution, y_resolution and the resolution_unit of the tiff tags. See http://www.awaresystems.be/imaging/tiff/tifftags/resolutionunit.html **kwds, optional """ _logger.debug('************* Loading *************') # For testing the use of local and skimage tifffile library import_local_tifffile = False if 'import_local_tifffile' in kwds.keys(): import_local_tifffile = kwds.pop('import_local_tifffile') imsave, TiffFile = _import_tifffile_library(import_local_tifffile) with TiffFile(filename, **kwds) as tiff: dc = tiff.asarray() # change in the Tifffiles API if hasattr(tiff.series[0], 'axes'): # in newer version the axes is an attribute axes = tiff.series[0].axes else: # old version axes = tiff.series[0]['axes'] _logger.debug("Is RGB: %s" % tiff.is_rgb) if tiff.is_rgb: dc = rgb_tools.regular_array2rgbx(dc) axes = axes[:-1] op = {} for key, tag in tiff[0].tags.items(): op[key] = tag.value names = [axes_label_codes[axis] for axis in axes] _logger.debug('Tiff tags list: %s' % op.keys()) _logger.debug("Photometric: %s" % op['photometric']) _logger.debug('is_imagej: {}'.format(tiff[0].is_imagej)) _logger.debug("data shape: {0}".format(dc.shape)) # workaround for 'palette' photometric, keep only 'X' and 'Y' axes if op['photometric'] == 3: sl = [0] * dc.ndim names = [] for i, axis in enumerate(axes): if axis == 'X' or axis == 'Y': sl[i] = slice(None) names.append(axes_label_codes[axis]) else: axes.replace(axis, '') dc = dc[sl] _logger.debug("names: {0}".format(names)) scales = [1.0] * len(names) offsets = [0.0] * len(names) units = [t.Undefined] * len(names) try: scales_d, units_d, offsets_d = \ _parse_scale_unit(tiff, op, dc, force_read_resolution) for i, name in enumerate(names): if name == 'height': scales[i], units[i] = scales_d['x'], units_d['x'] offsets[i] = offsets_d['x'] elif name == 'width': scales[i], units[i] = scales_d['y'], units_d['y'] offsets[i] = offsets_d['y'] elif name in ['depth', 'image series', 'time']: scales[i], units[i] = scales_d['z'], units_d['z'] offsets[i] = offsets_d['z'] except: _logger.info("Scale and units could not be imported") axes = [{'size': size, 'name': str(name), 'scale': scale, 'offset': offset, 'units': unit, } for size, name, scale, offset, unit in zip(dc.shape, names, scales, offsets, units)] return [{'data': dc, 'original_metadata': op, 'axes': axes, 'metadata': {'General': {'original_filename': os.path.split(filename)[1]}, 'Signal': {'signal_type': "", 'record_by': "image", }, }, }]
def file_reader(filename, record_by='image', force_read_resolution=False, **kwds): """ Read data from tif files using Christoph Gohlke's tifffile library. The units and the scale of images saved with ImageJ or Digital Micrograph is read. There is limited support for reading the scale of files created with Zeiss and FEI SEMs. Parameters ---------- filename: str record_by: {'image'} Has no effect because this format only supports recording by image. force_read_resolution: Bool Default: False. Force reading the x_resolution, y_resolution and the resolution_unit of the tiff tags. See http://www.awaresystems.be/imaging/tiff/tifftags/resolutionunit.html **kwds, optional """ _logger.debug('************* Loading *************') # For testing the use of local and skimage tifffile library import_local_tifffile = False if 'import_local_tifffile' in kwds.keys(): import_local_tifffile = kwds.pop('import_local_tifffile') imsave, TiffFile = _import_tifffile_library(import_local_tifffile) with TiffFile(filename, **kwds) as tiff: dc = tiff.asarray() # change in the Tifffiles API if hasattr(tiff.series[0], 'axes'): # in newer version the axes is an attribute axes = tiff.series[0].axes else: # old version axes = tiff.series[0]['axes'] _logger.debug("Is RGB: %s" % tiff.is_rgb) if tiff.is_rgb: dc = rgb_tools.regular_array2rgbx(dc) axes = axes[:-1] op = {} for key, tag in tiff[0].tags.items(): op[key] = tag.value names = [axes_label_codes[axis] for axis in axes] _logger.debug('Tiff tags list: %s' % op.keys()) _logger.debug("Photometric: %s" % op['photometric']) _logger.debug('is_imagej: {}'.format(tiff[0].is_imagej)) _logger.debug("data shape: {0}".format(dc.shape)) # workaround for 'palette' photometric, keep only 'X' and 'Y' axes if op['photometric'] == 3: sl = [0] * dc.ndim names = [] for i, axis in enumerate(axes): if axis == 'X' or axis == 'Y': sl[i] = slice(None) names.append(axes_label_codes[axis]) else: axes.replace(axis, '') dc = dc[sl] _logger.debug("names: {0}".format(names)) scales = [1.0] * len(names) offsets = [0.0] * len(names) units = [t.Undefined] * len(names) try: scales_d, units_d, offsets_d = \ _parse_scale_unit(tiff, op, dc, force_read_resolution) for i, name in enumerate(names): if name == 'height': scales[i], units[i] = scales_d['x'], units_d['x'] offsets[i] = offsets_d['x'] elif name == 'width': scales[i], units[i] = scales_d['y'], units_d['y'] offsets[i] = offsets_d['y'] elif name in ['depth', 'image series', 'time']: scales[i], units[i] = scales_d['z'], units_d['z'] offsets[i] = offsets_d['z'] except: _logger.info("Scale and units could not be imported") axes = [{ 'size': size, 'name': str(name), 'scale': scale, 'offset': offset, 'units': unit, } for size, name, scale, offset, unit in zip( dc.shape, names, scales, offsets, units)] return [{ 'data': dc, 'original_metadata': op, 'axes': axes, 'metadata': { 'General': { 'original_filename': os.path.split(filename)[1] }, 'Signal': { 'signal_type': "", 'record_by': "image", }, }, }]
def test_regular_array2rgbx_corder_from_c(self): d = rt.regular_array2rgbx(self.data_c) nt.assert_true(d.flags['C_CONTIGUOUS'])
def file_reader(filename, record_by='image', **kwds): """ Read data from tif files using Christoph Gohlke's tifffile library. The units and the scale of images saved with ImageJ or Digital Micrograph is read. There is limited support for reading the scale of files created with Zeiss and FEI SEMs. Parameters ---------- filename: str record_by: {'image'} Has no effect because this format only supports recording by image. force_read_resolution: Bool Default: False. Force reading the x_resolution, y_resolution and the resolution_unit of the tiff tags. See http://www.awaresystems.be/imaging/tiff/tifftags/resolutionunit.html """ force_read_resolution = False if 'force_read_resolution' in kwds.keys(): force_read_resolution = kwds.pop('force_read_resolution') # For testing the use of local and skimage tifffile library import_local_tifffile = False if 'import_local_tifffile' in kwds.keys(): import_local_tifffile = kwds.pop('import_local_tifffile') imsave, TiffFile = _import_tifffile_library(import_local_tifffile) with TiffFile(filename, **kwds) as tiff: dc = tiff.asarray() # change in the Tifffiles API if hasattr(tiff.series[0], 'axes'): # in newer version the axes is an attribute axes = tiff.series[0].axes else: # old version axes = tiff.series[0]['axes'] _logger.info("Is RGB: %s" % tiff.is_rgb) if tiff.is_rgb: dc = rgb_tools.regular_array2rgbx(dc) axes = axes[:-1] op = {} for key, tag in tiff[0].tags.items(): op[key] = tag.value names = [axes_label_codes[axis] for axis in axes] units = t.Undefined scales = [] _logger.info('Tiff tags list: %s' % op.keys()) _logger.info("Photometric: %s" % op['photometric']) # for files created with imageJ if 'image_description' in op.keys(): image_description = _decode_string(op["image_description"]) _logger.info( "Image_description tag: {0}".format(image_description)) if 'ImageJ' in image_description: _logger.info("Reading ImageJ tif metadata") # ImageJ write the unit in the image description units = image_description.split('unit=')[1].split('\n')[0] scales = _get_scales_from_x_y_resolution(op) # for files created with DM if '65003' in op.keys(): _logger.info("Reading DM tif metadata") units = [] units.extend([_decode_string(op['65003']), # x unit _decode_string(op['65004'])]) # y unit scales = [] scales.extend([op['65009'], # x scale op['65010']]) # y scale # for FEI SEM tiff files: if '34682' in op.keys(): _logger.info("Reading FEI tif metadata") op = _read_original_metadata_FEI(op) scales = _get_scale_FEI(op) units = 'm' # for Zeiss SEM tiff files: if '34118' in op.keys(): _logger.info("Reading Zeiss tif metadata") op = _read_original_metadata_Zeiss(op) # It seems that Zeiss software doesn't store/compute correctly the # scale in the metadata... it needs to be corrected by the image # resolution. corr = 1024 / max(size for size in dc.shape) scales = _get_scale_Zeiss(op, corr) units = 'm' if force_read_resolution and 'resolution_unit' in op.keys() \ and 'x_resolution' in op.keys(): res_unit_tag = op['resolution_unit'] if res_unit_tag != 1 and len(scales) == 0: _logger.info("Resolution unit: %s" % res_unit_tag) scales = _get_scales_from_x_y_resolution(op) if res_unit_tag == 2: # unit is in inch, conversion to um scales = [scale * 25400 for scale in scales] units = 'µm' if res_unit_tag == 3: # unit is in cm, conversion to um scales = [scale * 10000 for scale in scales] units = 'µm' _logger.info("data shape: {0}".format(dc.shape)) # workaround for 'palette' photometric, keep only 'X' and 'Y' axes if op['photometric'] == 3: sl = [0] * dc.ndim names = [] for i, axis in enumerate(axes): if axis == 'X' or axis == 'Y': sl[i] = slice(None) names.append(axes_label_codes[axis]) else: axes.replace(axis, '') dc = dc[sl] _logger.info("names: {0}".format(names)) # add the scale for the missing axes when necessary for i in dc.shape[len(scales):]: if op['photometric'] == 0 or op['photometric'] == 1: scales.append(1.0) elif op['photometric'] == 2: scales.insert(0, 1.0) if len(scales) == 0: scales = [1.0] * dc.ndim if isinstance(units, str) or units == t.Undefined: units = [units for i in dc.shape] if len(dc.shape) == 3: units[0] = t.Undefined axes = [{'size': size, 'name': str(name), 'scale': scale, #'offset' : origins[i], 'units': unit, } for size, name, scale, unit in zip(dc.shape, names, scales, units)] return [{'data': dc, 'original_metadata': op, 'axes': axes, 'metadata': {'General': {'original_filename': os.path.split(filename)[1]}, 'Signal': {'signal_type': "", 'record_by': "image", }, }, }]
def test_regular_array2rgbx_corder_from_c(self): d = rt.regular_array2rgbx(self.data_c) nt.assert_true(d.flags["C_CONTIGUOUS"])