Example #1
0
def load_tiff_stack(fname, use_lib_tiff=False):
    """
    Read a TIFF stack.
    We're using tifflib by default as, right now, only this works when the application is compile on Windows. [17/08/15]
    Bugs: known to fail with tiffs produced by Icy [23/07/15]
    """
    if not check_file_exists(fname, "load_tiff_stack"):
        return

    if use_lib_tiff:
        try:
            from libtiff import TIFFfile
        except ImportError:  # Suppresses error in IDE when libtiff not installed
            raise
        tiff = TIFFfile(fname)
        samples, sample_names = tiff.get_samples()  # we should have just one
        print("Loading: " + tiff.get_info() + " with libtiff\n")
        im = np.asarray(samples[0])
    else:
        print("Loading: " + fname + " with tifffile\n")
        from tifffile import imread

        im = imread(fname)

    im = im.swapaxes(1, 2)
    print("read image of size: cols: %d, rows: %d, layers: %d" %
          (im.shape[1], im.shape[2], im.shape[0]))
    return im
Example #2
0
def test_rw_rgb():
    itype = uint8
    dt = dtype(dict(names=list('rgb'), formats=[itype] * 3))

    image = zeros((2, 3), dtype=dt)
    image['r'][:, 0] = 250
    image['g'][:, 1] = 251
    image['b'][:, 2] = 252

    fn = mktemp('.tif')
    tif = TIFFimage(image)
    tif.write_file(fn, compression='lzw')  #, samples='rgb')
    del tif

    tif = TIFFfile(fn)
    data, names = tif.get_samples()
    #os.remove(fn)
    atexit.register(os.remove, fn)
    print image
    print data

    assert itype == data[0].dtype, ` itype, data[0].dtype `
    assert (image['r'] == data[0]).all()
    assert (image['g'] == data[1]).all()
    assert (image['b'] == data[2]).all()
Example #3
0
def test_write_read():

    for compression in [None, 'lzw']:
        for itype in [
                uint8, uint16, uint32, uint64, int8, int16, int32, int64,
                float32, float64, complex64, complex128
        ]:
            image = array([[1, 2, 3], [4, 5, 6]], itype)
            fn = mktemp('.tif')

            if 0:
                tif = TIFF.open(fn, 'w')
                tif.write_image(image, compression=compression)
                tif.close()
            else:
                tif = TIFFimage(image)
                tif.write_file(fn, compression=compression)
                del tif

            tif = TIFFfile(fn)
            data, names = tif.get_samples()
            assert names == ['sample0'], repr(names)
            assert len(data) == 1, repr(len(data))
            assert image.dtype == data[0].dtype, repr(
                (image.dtype, data[0].dtype))
            assert (image == data[0]).all()

            #os.remove(fn)
            atexit.register(os.remove, fn)
Example #4
0
def loadTiffStack(fname,useLibTiff=False):
  """
  Read a TIFF stack.
  We're using tifflib by default as, right now, only this works when the application is compile on Windows. [17/08/15]
  Bugs: known to fail with tiffs produced by Icy [23/07/15]

  """
  if not os.path.exists(fname):
    print "imageStackLoader.loadTiffStack can not find %s" % fname
    return

  purePython = True
  if useLibTiff:
    from libtiff import TIFFfile
    import numpy as np
    tiff = TIFFfile(fname)
    samples, sample_names = tiff.get_samples() #we should have just one
    print "Loading:\n" + tiff.get_info() + " with libtiff\n"
    im = np.asarray(samples[0])
  else:
    print "Loading:\n" + fname + " with tifffile\n"
    from tifffile import imread 
    im = imread(fname)

  im=im.swapaxes(1,2) 
  print "read image of size: cols: %d, rows: %d, layers: %d" % (im.shape[1],im.shape[2],im.shape[0])
  return im
def run():
    images = TIFFfile(args.image)
    labels = TIFFfile(args.label)
    samples, _ = images.get_samples()
    images = np.array(samples).transpose([1, 2, 3, 0])
    images = np.array([cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) for im in images])
    samples, _ = labels.get_samples()
    labels = np.array(samples).transpose([1, 2, 3, 0])
    labels = np.array(
        [cv2.cvtColor(label, cv2.COLOR_BGR2GRAY) for label in labels])

    m = np.array([int(x / 2) for x in args.shape])
    seg = labels.copy()
    corner, partitions = compute_partitions(seg[...],
                                            [float(x) for x in args.thr], m,
                                            args.min_size)
    print(corner)
    totals = defaultdict(int)  # partition -> voxel count
    indices = defaultdict(list)  # partition -> [(vol_id, 1d index)]
    vol_shapes = partitions.shape
    uniques, counts = np.unique(partitions, return_counts=True)
    for val, cnt in zip(uniques, counts):
        if val == 255:
            continue

        totals[val] += cnt
        indices[val].extend(
            [flat_index for flat_index in np.flatnonzero(partitions == val)])

    max_count = max(totals.values())
    indices = np.concatenate([
        np.resize(np.random.permutation(v), max_count)
        for v in indices.values()
    ],
                             axis=0)
    np.random.shuffle(indices)
    coor = []
    for coord_idx in indices:
        z, y, x = np.unravel_index(coord_idx, vol_shapes)
        coor.append([z + m[2], y + m[1], x + m[0]])

    with h5py.File(args.save, 'w') as f:
        f.create_dataset('image', data=images, compression='gzip')
        f.create_dataset('label', data=labels, compression='gzip')
        f.create_dataset('coor', data=coor, compression='gzip')
Example #6
0
def readTiff(fileName):
    """
    Read a tiff file into a numpy array
    Usage: img = readTiff(fileName)
    """
    tiff = TIFFfile(fileName)
    samples, sample_names = tiff.get_samples()

    outList = []
    for sample in samples:
        outList.append(np.copy(sample))

    out = np.concatenate(outList, axis=-1)

    tiff.close()

    return out
Example #7
0
def test_write_read():
    for compression in ['none', 'lzw']:
        for itype in [
                uint8, uint16, uint32, uint64, int8, int16, int32, int64,
                float32, float64, complex64, complex128
        ]:
            image = array([[1, 2, 3], [4, 5, 6]], itype)
            fn = mktemp('.tif')
            tif = TIFFimage(image)
            tif.write_file(fn, compression=compression)
            del tif

            tif = TIFFfile(fn)
            data, names = tif.get_samples()
            #os.remove(fn)
            atexit.register(os.remove, fn)
            assert names == ['sample0'], ` names `
            assert len(data) == 1, ` len(data) `
            assert image.dtype == data[0].dtype, ` image.dtype, data[0].dtype `
            assert (image == data[0]).all()
Example #8
0
def read(fileName):
    """
    Script to import tif file from imageJ,
    usage: zstack =  tiff.read(inFileName)
    PTW 2015/01/29
    """
    tiff = TIFFfile(fileName)
    samples, sample_names = tiff.get_samples()

    outList = []
    for sample in samples:
        outList.append(np.copy(sample)[..., np.newaxis])

    out = np.concatenate(outList, axis=-1)
    out = np.rollaxis(out, 0, 3)
    out = np.flipud(out)

    tiff.close()

    return out
Example #9
0
 def _get_description(self):
     self.is_ok = False
     if not os.path.isfile (self.file_name):
         if os.path.exists (self.file_name):
             if os.path.isdir (self.file_name):
                 files = []
                 for ext in ['tif', 'lsm']:
                     files += glob.glob(self.file_name+'/*.'+ext)
                 n = len (self.file_name)
                 files = sorted([f[n+1:] for f in files])
                 return 'Directory contains:\n%s' % ('\n'.join (files))
             return 'not a file'
         return 'file does not exists'
     if os.path.basename(self.file_name)=='configuration.txt':
         return unicode(open(self.file_name).read(), errors='ignore')
         raise NotImplementedError('opening configuration.txt data')
     try:
         tiff = TIFFfile(self.file_name, verbose=True)
     except ValueError, msg:
         return 'not a TIFF file\n%s' % (msg)
def test_simple_slicing():
    for planar_config in [1, 2]:
        for compression in [None, 'lzw']:
            for itype in [
                    uint8, uint16, uint32, uint64, int8, int16, int32, int64,
                    float32, float64, complex64, complex128
            ]:
                image = random.randint(0, 100, size=(10, 6, 7)).astype(itype)
                fn = mktemp('.tif')

                if 0:
                    if planar_config == 2:
                        continue
                    tif = TIFF.open(fn, 'w')
                    tif.write_image(image, compression=compression)
                    tif.close()
                else:
                    tif = TIFFimage(image)
                    tif.write_file(fn,
                                   compression=compression,
                                   planar_config=planar_config)
                    del tif

                tif = TIFFfile(fn)
                arr = tif.get_tiff_array()
                data = arr[:]
                assert len(data) == len(image), repr(len(data))
                assert image.dtype == data.dtype, repr(
                    (image.dtype, data[0].dtype))
                assert (image == data).all()
                assert arr.shape == image.shape

                _indices = [0, slice(None), slice(0, 2), slice(0, 5, 2)]
                for _i0 in _indices[:1]:
                    for i1 in _indices:
                        for i2 in _indices:
                            sl = (_i0, i1, i2)
                            assert (arr[sl] == image[sl]).all(), repr(sl)
                tif.close()
                atexit.register(os.remove, fn)
Example #11
0
def read_tif(fn, div2=False, oldmethod=False, offset2=False):
    """Reads a 3D TIFF file and returns a 3D numpy matrix
    from a path
    
    Inputs :
    - fn (string): the path of the 3D TIFF file
    - div2 (bool): if `True`, only one every to planes in $z$ will be loaded

    Returns :
    - a 3D numpy matrix
    """
    if offset2:
        kp = 1
    else:
        kp = 0
    if not oldmethod:
        ti = TIFFfile(fn)
        ti = ti.get_samples()[0][0].swapaxes(0, 2).swapaxes(0, 1)
        if div2:
            I = []
            for i in range(ti.shape[2]):
                j = ti[:, :, i]
                if i % 2 == kp:
                    I.append(j)
            ti = np.zeros((I[0].shape[0], I[0].shape[1], len(I)))
            for (i, j) in enumerate(I):
                ti[:, :, i] = j
        return ti
    else:  # Kept for compatibility. Fails to load some 16 bits images.
        im = TIFF.open(fn)
        I = []
        for (j, i) in enumerate(im.iter_images()):
            if div2 and (j + 1) % 2 == 0:
                pass
            else:
                I.append(i)
        ret = np.zeros((I[0].shape[0], I[1].shape[1], len(I)))
        for (i, ii) in enumerate(I):
            ret[:, :, i] = ii
            return ret
Example #12
0
def test_issue19():
    size = 1024 * 32  # 1GB

    # size = 1024*63  # almost 4GB, test takes about 60 seconds but succeeds
    image = ones((size, size), dtype=uint8)
    print('image size:', image.nbytes / 1024**2, 'MB')
    fn = mktemp('issue19.tif')
    tif = TIFFimage(image)
    try:
        tif.write_file(fn)
    except OSError as msg:
        if 'Not enough storage is available to process this command'\
           in str(msg):
            # Happens in Appveyour CI
            del tif
            atexit.register(os.remove, fn)
            return
        else:
            raise
    del tif
    tif = TIFFfile(fn)
    tif.get_tiff_array()[:]  # expected failure
    tif.close()
    atexit.register(os.remove, fn)
Example #13
0
    def _file_name_changed(self):
        print self.file_name
        if not os.path.isfile(self.file_name):
            raise ValueError ("File does not exist: %r" % (self.file_name))
        self.reset()

        tiff_array_info = {}
        tables_info = {}
        if os.path.basename(self.file_name)=='configuration.txt':
            tiff_files = {}
            dir_path = os.path.dirname(self.file_name)
            csv_files = glob.glob(os.path.join(dir_path, '*.csv'))
            default_kind = 'image_timeseries'
            for d in ['Imperx', 'Andor', 'Confocal']:
                channel_label = '%s' % (d)
                d_path = os.path.join(dir_path, '%s_index.txt' % (d))

                if not os.path.isfile (d_path): # mari-ism support
                    d_path = os.path.join(dir_path, d, '%s_index.txt' % (d))
                if os.path.isfile (d_path):
                    d_index = {}
                    time_map = defaultdict(lambda:[])
                    file_map = defaultdict(lambda:[])
                    for index, line in enumerate(open (d_path).readlines ()):
                        t, fn = line.strip().split()
                        t = float(t)
                        fn = os.path.join(os.path.dirname(d_path), fn)
                        d_index[t, index] = fn
                        time_map[fn].append(t)
                        file_map[t].append(fn)
                    if len (file_map)<=1:
                        default_kind = 'image_stack'
                    elif len (file_map[t])>1:
                        default_kind = 'image_stack_timeseries'
                    files = [d_index[k] for k in sorted (d_index)]
                    tiff = TiffFiles(files, time_map = time_map)

                    tiff_files[channel_label] = tiff
                    tiff_array_info[channel_label] = dict(channel=channel_label, subfile_type=0, sample_index=0,
                                                          assume_one_image_per_file=True)
            tables = {}
            for csv_path in csv_files:
                print 'Reading',csv_path,'..'
                name = os.path.basename(csv_path)[:-4]
                titles = None
                table_data = defaultdict(lambda:[])
                for line in open(csv_path).readlines():
                    line = line.strip()
                    if not line:
                        continue
                    if titles is None:
                        titles = [title[1:-1] for title in line. split('\t')]
                    else:
                        data = line.split ('\t')
                        for title, value in zip (titles, data):
                            table_data[title].append (float(value))
                tables[name] = table_data
                print 'done'
            for channel_label in tiff_files:
                tables_info[channel_label] = tables
            tiff = TiffChannelsAndFiles(tiff_files)

        else:
            tiff = TIFFfile(self.file_name)
            default_kind = 'image_stack'
            for subfile_type in tiff.get_subfile_types():
                ifd = tiff.get_first_ifd(subfile_type=subfile_type)
                depth = tiff.get_depth(subfile_type=subfile_type)
                width = ifd.get_value('ImageWidth')
                height = ifd.get_value('ImageLength')
                if subfile_type!=0:
                    print '%s._file_name_changed: ignoring subfile_type %r' % (self.__class__.__name__, subfile_type)
                    continue

                for i, (name, dtype) in enumerate(zip (ifd.get_sample_names(), ifd.get_sample_dtypes())):
                    tname = str(dtype)
                    channel_label = '%s: %s [%sx%sx%s %s]' % (subfile_type, name, depth, height, width, tname)
                    tiff_array_info[channel_label] = dict (subfile_type=subfile_type, sample_index=i)

        self.kind = default_kind
        self.tiff = tiff


        try:
            info = tiff.get_info()
        except Exception, msg:
            info = 'failed to get TIFF info: %s' % (msg)
#
# 1.py
# @author bulbasaur
# @description
# @created 2020-01-29T15:06:32.299Z+08:00
# @last-modified 2020-01-31T14:02:20.634Z+08:00
#

from libtiff import TIFFfile

src = TIFFfile("C:\\Users\\sheld\\Desktop\\ms.bsq")

print(src)
Example #15
0
    def iter_Image(self, func):
        sys.stdout.write('iter_Image: reading image data from TIFF files\n')
        for detector in self.data:
            sys.stdout.write('  detector: %s\n' % (detector))
            d_index = self.data[detector]

            # write the content of tiff files to a single raw files
            f,fn,dtype = None, None, None
            time_set = set()
            mn, mx = None, None
            mnz = float(self.config['PROTOCOL_Z_STACKER_Minimum'])
            mxz = float(self.config['PROTOCOL_Z_STACKER_Maximum'])
            nz = int(self.config['PROTOCOL_Z_STACKER_NumberOfFrames'])
            if nz > 1:
                dz = (mxz-mnz)/(nz-1)
            else:
                dz = 0
            plane_l = []
            ti = -1

            exptime = '0'
            if detector=='Confocal':
                exptime = float(self.config['CONFOCAL_PixelAcqusitionTime']) * 1e-6
            elif detector=='Andor':
                exptime = self.config['CAMERA_ANDOR_ExposureTime']
            elif detector=='Imperx':
                for line in  self.config['CAMERA_IMPERX_HardwareInformation'].split('\n'):
                    if line.startswith ('Exposure time:'):
                        v,u = line[14:].lstrip().split()
                        v = v.strip (); u = u.strip ()
                        if u=='usec': exptime = float(v)*1e-6
                        elif u=='msec': exptime = float(v)*1e-3
                        elif u=='sec': exptime = float(v)
                        else:
                            raise NotImplementedError (`v,u,line`)
            else:
                raise NotImplementedError(`detector`)

            for t, index in sorted(d_index):
                if t not in time_set:
                    time_set.add(t)
                    ti += 1
                    zi = 0
                else:
                    zi += 1
                z = mnz + dz * zi
                d = dict(DeltaT=str(t), TheT=str(ti), TheZ = str(zi), PositionZ=str(z), TheC='0', ExposureTime=str(exptime))
                plane_l.append(d)

                tif = TIFFfile(d_index[t, index])
                samples, sample_names = tif.get_samples()
                assert len (sample_names)==1,`sample_names`
                data = samples[0]
                if mn is None:
                    mn, mx = data.min(), data.max()
                else:
                    mn = min (data.min(), mn)
                    mx = min (data.max(), mx)
                if f is None:
                    shape = list(data.shape)
                    dtype = data.dtype
                    fn = tempfile.mktemp(suffix='.raw', prefix='%s_%s_' % (detector, dtype))
                    f = open (fn, 'wb')
                else:
                    assert dtype is data.dtype,`dtype,data.dtype`
                    shape[0] += 1
                data.tofile(f)

                sys.stdout.write('\r  copying TIFF image data to RAW file: %5s%% done' % (int(100.0*(index+1)/len(d_index))))
                sys.stdout.flush()

            if f is None:
                continue
            f.close ()
            shape = tuple (shape)

            xsz = shape[2]
            ysz = shape[1]
            tsz = len(time_set)
            zsz = shape[0] // tsz
            order = 'XYZTC'
            sys.stdout.write("\n  RAW file contains %sx%sx%sx%sx%s [%s] array, dtype=%s, MIN/MAX=%s/%s\n" \
                                 % (xsz, ysz,zsz,tsz,1,order, dtype, mn,mx))
            assert zsz*tsz==shape[0],`zsz,tsz,shape`

            tif_filename = '%s%s.ome.tif' % (self.file_prefix, detector)
            sys.stdout.write("  creating memmap image for OME-TIF file %r..." % (tif_filename))
            sys.stdout.flush()
            mmap = numpy.memmap(fn, dtype=dtype, mode='r', shape=shape)
            tif_image = TIFFimage(mmap)
            atexit.register(os.remove, fn)
            tif_uuid = self._mk_uuid()
            self.tif_images[detector, tif_filename, tif_uuid] = tif_image
            sys.stdout.write (' done\n')
            sys.stdout.flush()


            pixels_d = {}
            channel_d = dict(SamplesPerPixel='1')
            lpath_l = []
            #channel_d todo: ExcitationWavelength, EmissionWavelength, Fluor, NDFilter, PockelCellSetting, Color 
            if detector in ['Confocal']:
                objective = ome.ObjectiveSettings(ID='Objective:%s' % (self.config['olympus_optics_objective']))
                instrument_id = 'Instrument:Airy'
                pixels_d['PhysicalSizeX'] = str(self.config['CONFOCAL_PixelSizeX'])
                pixels_d['PhysicalSizeY'] = str(self.config['CONFOCAL_PixelSizeY'])
                pixels_d['TimeIncrement'] = str(self.config['CONFOCAL_TimeBetweenFrames'])
                channel_d['Name'] = 'Confocal'
                channel_d['IlluminationType'] = 'Epifluorescence'
                channel_d['PinholeSize'] = '180'
                # todo: FluorescenceCorrelationSpectroscopy
                channel_d['AcquisitionMode'] = 'LaserScanningConfocalMicroscopy'

                for i in range (1,5):
                    d1 = 'AOTFLine%s' % i
                    ft = confocal_filters.get(d1)
                    if ft is None:
                        continue
                    fn = ft['ex'][0]
                    fn = get_aotf_filter_name (fn, self.config)
                    if 'OFF' in fn:
                        continue
                    lpath_l.append(ome.ExcitationFilterRef(ID='Filter:%s:%s' % (d1,fn)))
                fn = confocal_filters['OpticalTableSplitter']['di'][0]
                lpath_l.append(ome.DichroicRef(ID='Dichroic:OpticalTableSplitter:%s' % (fn)))
                d1 = 'ThorlabsWheelPosition%s' % (self.config['thorlabs_filter_wheel_position'][3])
                ft = confocal_filters.get(d1)
                if ft is not None:
                    fn = ft['em'][0]
                    lpath_l.append(ome.EmissionFilterRef (ID='Filter:%s:%s' % (d1,fn)))
            elif detector in ['Andor', 'Imperx']:
                objective = ome.ObjectiveSettings(ID='Objective:%s' % (self.config['optics_objective']))
                instrument_id = 'Instrument:Suga'
                channel_d['Name'] = '%s camera' % (detector)
                channel_d['AcquisitionMode'] = 'WideField'
                pixels_d['PhysicalSizeX'] = pixels_d['PhysicalSizeY'] = str(self.config['CAMERA_%s_PixelSize' % (detector.upper ())])
                tbf = float(self.config['CAMERA_%s_TimeBetweenFrames' % (detector.upper ())])
                d1 = 'NikonTurretTopCube%s' % (self.config['top_turret_cube'][3])
                d2 = 'NikonTurretBottomCube%s' % (self.config['bottom_turret_cube'][3])
                top_cube = nikon_filters[d1]
                bottom_cube = nikon_filters[d2]
                if detector=='Andor':
                    channel_d['IlluminationType'] = 'Epifluorescence'
                    if self.config['CAMERA_ANDOR_FrameTransferMode']=='1':
                        m = re.search(r'Kinetic cycle time:\s*(?P<time>\d+[.]\d*)\s*sec', self.config['CAMERA_ANDOR_HardwareInformation'], re.M)
                        pixels_d['TimeIncrement'] = str(m.group('time'))
                    else:
                        pixels_d['TimeIncrement'] = str(tbf)
                    if 'ex' in top_cube:
                        fn = top_cube['ex'][0]
                        lpath_l.append(ome.ExcitationFilterRef(ID='Filter:%s:%s' % (d1,fn)))
                    if 'di' in top_cube:
                        fn = top_cube['di'][0]
                        lpath_l.append(ome.DichroicRef(ID='Dichroic:%s:%s' % (d1,fn)))
                    if 'em' in top_cube:
                        fn = top_cube['em'][0]
                        lpath_l.append(ome.EmissionFilterRef(ID='Filter:%s:%s' % (d1,fn)))
                    if 'ex' in bottom_cube:
                        fn = bottom_cube['ex'][0]
                        lpath_l.append(ome.EmissionFilterRef(ID='Filter:%s:%s' % (d2,fn)))
                else:
                    #m = re.search(r'Exposure time:\s*(?P<time>\d+[.]\d*)\s*msec', self.config['CAMERA_IMPERX_HardwareInformation'], re.M)
                    #exp_time = float (m.group ('time'))
                    if self.config['main_protocol_mode'].startswith('MyocyteMechanicsFluorescence'):
                        tbf = float(self.config['PROTOCOL_MYOCYTE_MECHANICS_TimeBetweenSavedFrames'])
                    pixels_d['TimeIncrement'] = str(tbf)
                    channel_d['IlluminationType'] = 'Transmitted'
                    if self.config['optics_transmission_light_filter']!='Empty':
                        fn = nikon_filters['NikonIllumination']['ex'][0]
                        lpath_l.append(ome.ExcitationFilterRef(ID='Filter:NikonIllumination:%s' % (fn)))
                    if 'di' in top_cube:
                        fn = top_cube['di'][0]
                        lpath_l.append(ome.EmissionFilterRef(ID='Filter:%s:%s' % (d1, fn)))
                    if 'em' in top_cube:
                        fn = top_cube['em'][0]
                        lpath_l.append(ome.EmissionFilterRef(ID='Filter:%s:%s' % (d1,fn)))
                    if 'em' in bottom_cube:
                        fn = bottom_cube['em'][0]
                        lpath_l.append(ome.EmissionFilterRef(ID='Filter:%s:%s' % (d2,fn)))
                    if 'di' in bottom_cube:
                        fn = bottom_cube['di'][0]
                        lpath_l.append(ome.EmissionFilterRef(ID='Filter:%s:%s' % (d2,fn)))
            else:
                raise NotImplementedError (`detector`)

            if zsz>1:
                pixels_d['PhysicalSizeZ'] = str(dz)
            channel = ome.Channel(ID='Channel:%s' % (detector),
                                  **channel_d)

            lpath = ome.LightPath(*lpath_l)
            channel.append (lpath)

            #todo attributes: 
            #todo elements: BIN:BinData, MetadataOnly, SA:AnnotationRef
            tiffdata = ome.TiffData(ome.UUID (tif_uuid, FileName=tif_filename))
            pixels = ome.Pixels(channel,
                                tiffdata, 
                                DimensionOrder=order, ID='Pixels:%s' % (detector),
                                SizeX = str(xsz), SizeY = str(ysz), SizeZ = str(zsz), SizeT=str(tsz), SizeC = str(1),
                                Type = self.dtype2PixelIType (dtype),
                                **pixels_d
                                )
            for d in plane_l:
                pixels.append(ome.Plane(**d))
            #todo attributes: Name
            #todo elements: Description, ExperimentRef, DatasetRef ,
            #               ImagingEnvironment, StageLabel, ROIRef, MicrobeamManipulationRef, AnnotationRef

            image = ome.Image (ome.AcquiredDate (self.get_AcquiredDate()),
                               ome.ExperimenterRef(ID='Experimenter:%s' % (self.current_user)),
                               ome.GroupRef(ID='Group:SysBio'),
                               ome.InstrumentRef(ID=instrument_id),
                               objective,
                               pixels, 
                               ID='Image:%s' % (detector))

            if 0:
                image.append(sa.AnnotationRef (ID='Annotation:configuration.txt'))
            yield image
        return
from libtiff import TIFFfile
import os
import numpy as np
from PIL import Image

input_file = '../data/FIB_segmentaion/grayscale_maps_500.tif'
out_path = '../data/raw'
if not os.path.exists(out_path):
    os.makedirs(out_path)

tif = TIFFfile(input_file)
data, _ = tif.get_samples()
data = data[0]
print('the shape of data: ', data.shape)

for k in range(data.shape[0]):
    Image.fromarray(data[k]).save(
        os.path.join(out_path, 'raw_' + str(k).zfill(4) + '.png'))

print('Done')
Example #17
0
def read_tif(filename,channel=0):
    """Read a tif image

    :Parameters:
    - `filename` (str) - name of the file to read
    """

    # TIF reader
    tif = libtiff.TIFF.open(filename)
    
    if tif.GetField('ImageDescription'):
        tif = TIFFfile(filename)
        arr = tif.get_tiff_array()
        _data = arr[:].T
        info_str = tif.get_info()
    else:
        i = 1
        while not tif.LastDirectory():
            i+=1
            tif.ReadDirectory()
        tif.SetDirectory(0)
        _data = np.zeros((i,)+tif.read_image().shape,dtype=tif.read_image().dtype)
        for ii,i in enumerate(tif.iter_images()):
            _data[ii] = i
        _data = _data.transpose(2, 1, 0)
        info_str = tif.info()

    nx, ny, nz = _data.shape

    # -- prepare metadata dictionnary --
    
    info_dict = dict( filter( lambda x: len(x)==2,
                              (inf.split(':') for inf in info_str.split("\n"))
                              ) )
    for k,v in info_dict.iteritems():
        info_dict[k] = v.strip()

    # -- getting the voxelsizes from the tiff image: sometimes
    # there is a BoundingBox attribute, sometimes there are
    # XResolution, YResolution, ZResolution or spacing.
    # the object returned by get_tiff_array has a "get_voxel_sizes()"
    # method but it fails, so here we go. --
    if "BoundingBox" in info_dict:
        bbox = info_dict["BoundingBox"]
        xm, xM, ym, yM, zm, zM = map(float,bbox.split())
        _vx = (xM-xm)/nx
        _vy = (yM-ym)/ny
        _vz = (zM-zm)/nz
    else:
        # -- When we have [XYZ]Resolution fields, it describes the
        # number of voxels per real unit. In SpatialImage we want the
        # voxelsizes, which is the number of real units per voxels.
        # So we must invert the result. --
        if "XResolution" in info_dict:
            # --resolution is stored in a [(values, precision)] list-of-one-tuple, or
            # sometimes as a single number --
            xres_str = eval(info_dict["XResolution"])
            if isinstance(xres_str, list) and isinstance(xres_str[0], tuple):
                xres_str = xres_str[0]
                _vx = float(xres_str[0])/xres_str[1]
            elif isinstance(xres_str, (int, float)):
                _vx = float(xres_str)
            else:
                _vx = 1.
            _vx = 1./_vx if _vx != 0 else 1.
        else:
            _vx = 1.0 # dumb fallback, maybe we will find something smarter later on
        if "YResolution" in info_dict:
            # --resolution is stored in a [(values, precision)] list-of-one-tuple, or
            # sometimes as a single number --
            yres_str = eval(info_dict["YResolution"])
            if isinstance(yres_str, list) and isinstance(yres_str[0], tuple):
                yres_str = yres_str[0]
                _vy = float(yres_str[0])/yres_str[1]
            elif isinstance(yres_str, (int, float)):
                _vy = float(yres_str)
            else:
                _vy = 1.
            _vy = 1./_vy if _vy != 0 else 1.
        else:
            _vy = 1.0 # dumb fallback, maybe we will find something smarter later on

        if "ZResolution" in info_dict:
            # --resolution is stored in a [(values, precision)] list-of-one-tuple, or
            # sometimes as a single number --
            zres_str = eval(info_dict["ZResolution"])
            if isinstance(zres_str, list) and isinstance(zres_str[0], tuple):
                zres_str = zres_str[0]
                _vz = float(zres_str[0])/zres_str[1]
            elif isinstance(zres_str, (int, float)):
                _vz = float(zres_str)
            else:
                _vz = 1.
            _vz = 1./_vz if _vz != 0 else 1.
        else:
            if "spacing" in info_dict:
                _vz = eval(info_dict["spacing"])
            else:
                _vz = 1.0 # dumb fallback, maybe we will find something smarter later on

    tif.close()
    # -- dtypes are not really stored in a compatible way (">u2" instead of uint16)
    # but we can convert those --
    dt = np.dtype(_data.dtype.name)
    # -- Return a SpatialImage please! --
    im = SpatialImage(_data, dtype=dt)
    im.resolution = _vx,_vy,_vz

    return im
Example #18
0
    def process(self, options=None, validate=default_validate):
        template_xml = list(self.make_xml())
        s = None
        for (detector, fn, uuid), tif_image in self.tif_images.items():
            xml = ome.OME(ATTR(
                'xsi', 'schemaLocation',
                "%s %s/ome.xsd" % ((namespace_map['ome'], ) * 2)),
                          UUID=uuid)
            for item in template_xml:
                if item.tag.endswith(
                        'Image') and item.get('ID') != 'Image:%s' % (detector):
                    continue
                if item.tag.endswith('Instrument'):
                    if detector == 'Confocal':
                        instrument = 'Airy'
                    elif detector in ['Imperx', 'Andor']:
                        instrument = 'Suga'
                    else:
                        instrument = None
                    if instrument and item.get(
                            'ID') != 'Instrument:%s' % (instrument):
                        continue
                xml.append(item)
            if s is None and validate:
                s = etree.tostring(xml,
                                   pretty_print=True,
                                   xml_declaration=True)
                #print s
                validate_xml(xml)
            else:
                s = etree.tostring(xml,
                                   pretty_print=True,
                                   xml_declaration=True)
            tif_image.description = s

            if detector == 'Confocal':
                c = tif_image.write_file(fn, compression='lzw')
                if c < 1.0:
                    print 'Resetting compression to none'
                    tif_image.write_file(fn, compression='none')
            else:
                tif_image.write_file(fn, compression='none')

            if validate and 0:
                print 'Validating written data..',
                from libtiff import TIFFfile
                t = TIFFfile(fn)
                samples, sample_names = t.get_samples()
                assert len(sample_names) == 1, ` sample_names `
                samples = samples[0]
                samples_orig = tif_image.data
                if (samples != samples_orig).any():
                    print 'DATA CORRUPTION DETECTED!!'
                    print 'original data:', samples_orig.dtype, samples_orig.shape, samples_orig.nbytes
                    print 'written data:', samples.dtype, samples.shape, samples.nbytes
                    diff = samples - samples_orig
                    ia, ja, ka = diff.nonzero()
                    print len(ia)
                    print ia[:10]
                    print ja[:10]
                    print ka[:10]
                    print samples[ia[:10], ja[:10], ka[:10]]
                    print samples_orig[ia[:10], ja[:10], ka[:10]]
                else:
                    print 'SUCCESS!'
            #validate = False
        return s