Esempio n. 1
0
def imreadStack(filenameList):
    """Simple wrapper to read a list of image series tiffs into a stack.

    Note that this function assumes all images are the same size.

    We tend to work with single channel tiff files, and as such use tifffile's imread function.
    We've wrapped tifffiles read function to account for the differences in default
    image dimension ordering.  By convention, we use x,y,frame.  The major advantages of
    tifffile are 1) speed and 2) the ability to read multiframe tiffs.

    :param filenameList: list of strings representing the files to load
    :returns:  4d numpy array
    """

    firstImageSeries = tifffile.imread(filenameList[0])
    if len(firstImageSeries.shape) == 3:
	firstImageSeries=np.transpose(firstImageSeries, [1,2,0])

    imageStack = np.zeros((firstImageSeries.shape[0], firstImageSeries.shape[1], firstImageSeries.shape[2], len(filenameList)))

    for i, fileName in enumerate(filenameList):
	array=tifffile.imread(fileName)
	if len(array.shape) == 3:
	    array=np.transpose(array, [1,2,0])
        imageStack[:,:,:,i] = array
    return imageStack
Esempio n. 2
0
def remove_dark(A, folder):
    """
    This function will subtract the dark files from the data files
    Parameters
    ----------
    A : list
        list of tiff files
        
    Returns
    -------
    clean_data : array
        dark subtracted data , clean data
        shape (number of clean images, detectore shape 0, detecotor shape 1)
    """
    
    clean_data_arr = []  # save the cleaned data
    for name in A:
        if "dark" in name:   # check the dark files
            dark_data = imread(name)  
            print ("+++++ bad", name)
        else:
            arr = imread(name)
            print ("good", name)
            #  clean the data
            clean_data = arr - dark_data 
            #print (os.path.join(name))
            imsave(name, clean_data)
            clean_data_arr.append(clean_data)
    return np.asarray(clean_data_arr)
Esempio n. 3
0
 def __init__(self, file_path):
     self.file_path = file_path
     if isinstance(file_path, str):
         self.data = tifffile.imread(self.file_path)
     elif any([isinstance(file_path, t) for t in [np.ndarray,list]]):
         data = [tifffile.imread(f) for f in self.file_path if 'tif' in f]
         self.data = np.concatenate([d if d.ndim==3 else [d] for d in data], axis=0)
Esempio n. 4
0
def get_images(imageId, img_key = None):
    '''
    Load images correspoding to imageId

    Parameters
    ----------
    imageId : str
        imageId as used in grid_size.csv
    img_key : {None, '3', 'A', 'M', 'P'}, optional
        Specify this to load single image
        None loads all images and returns in a dict
        '3' loads image from three_band/
        'A' loads '_A' image from sixteen_band/
        'M' loads '_M' image from sixteen_band/
        'P' loads '_P' image from sixteen_band/

    Returns
    -------
    images : dict
        A dict of image data from TIFF files as numpy array
    '''
    img_names = get_image_names(imageId)
    images = dict()
    if img_key is None:
        for k in img_names.keys():
            images[k] = tiff.imread(img_names[k])
    else:
        images[img_key] = tiff.imread(img_names[img_key])
    return images
 def get_new_images(self, temp_file_list):
     new_pics = []
     if temp_file_list is not None:
         for i in temp_file_list:
             self.pic_list.append(imread(self._directory_name + i))
             new_pics.append(imread(self._directory_name + i))
     return temp_file_list, new_pics
Esempio n. 6
0
def main(vol_fname='', label_fname='', dataset_name='', percent_test=0,
	normalize_mean=False):

	print "Reading data..."
	vol = tifffile.imread(vol_fname)
	label = tifffile.imread(label_fname)

	if zero_mean:
		vol = zero_mean(vol)

	if len(label.shape) > 3 and label.shape[3] == 3:
		print "Converting label to binary..."
		label = rgb2bin(label)

	#Splitting into training and test sets
	train, label_train, test, label_test = split_data(vol, label, percent_test)

	#Transpose
	train = train.transpose(0,2,1)
	test = test.transpose(0,2,1)

	label_train = label_train.transpose(0,2,1)
	label_test = label_test.transpose(0,2,1)

	s_train = train.shape
	s_test = test.shape

	print "Saving data in znn format..."
	#Making the necessary directories
	os.makedirs('dataset/{}/data/'.format(dataset_name))
	os.makedirs('dataset/{}/spec/'.format(dataset_name))
		
	#Save as znn format
	train_outname = "dataset/{0}/data/batch{1}.image".format(dataset_name, 1)
	label_train_outname = "dataset/{0}/data/batch{1}.label".format(dataset_name, 1)

	if percent_test > 0:

		test_outname = "dataset/{0}/data/batch{1}.image".format(dataset_name, 2)
		label_test_outname = "dataset/{0}/data/batch{1}.label".format(dataset_name, 2)

	emirt.io.znn_img_save(train.astype('double'), train_outname)
	emirt.io.znn_img_save(label_train.astype('double'), label_train_outname)

	if percent_test > 0:

		emirt.io.znn_img_save(test.astype('double'), test_outname)
		emirt.io.znn_img_save(label_test.astype('double'), label_test_outname)

	#Prepare a spec file
	print "Writing spec file..."
	write_spec_file(dataset_name, 1, s_train)

	if percent_test > 0:

		write_spec_file(dataset_name, 2, s_test)
Esempio n. 7
0
def readData(filename, x = all, y = all, z = all, **args):
    """Read data from a single tif image or stack
    
    Arguments:
        filename (str): file name as regular expression
        x,y,z (tuple): data range specifications
    
    Returns:
        array: image data
    """
    
    dsize = dataSize(filename);
    #print "dsize %s" % str(dsize);    
    
    if len(dsize) == 2:
        data = tiff.imread(filename, key = 0);
        #print "data.shape %s" % str(data.shape);        
        
        return io.dataToRange(data.transpose([1,0]), x = x, y = y);
        #return io.dataToRange(data, x = x, y = y);
        
    else:
        if z is all:
            data = tiff.imread(filename);
            if data.ndim == 2:
                # data = data
                data = data.transpose([1,0]);
            elif data.ndim == 3:
                #data = data.transpose([1,2,0]);
                data = data.transpose([2,1,0]);
            elif data.ndim == 4: # multi channel image
                #data = data.transpose([1,2,0,3]);
                data = data.transpose([2,1,0,3]);
            else:
                raise RuntimeError('readData: dimension %d not supproted!' % data.ndim)
            
            return io.dataToRange(data, x = x, y = y, z = all);
        
        else: #optimize for z ranges
            ds = io.dataSizeFromDataRange(dsize, x = x, y = y, z = z);
            t = tiff.TiffFile(filename);
            p = t.pages[0];
            data = numpy.zeros(ds, dtype = p.dtype);
            rz = io.toDataRange(dsize[2], r = z);
            
            #print "test"
            #print rz;
            #print dsize            
            
            for i in range(rz[0], rz[1]):
                xydata = t.pages[i].asarray();
                #data[:,:,i-rz[0]] = io.dataToRange(xydata, x = x, y = y);
                data[:,:,i-rz[0]] = io.dataToRange(xydata.transpose([1,0]), x = x, y = y);
            
            return data
Esempio n. 8
0
  def __init__(self,infile,rawfile,verbosity=1):
    self.verbosity = verbosity;

    # read images
    self.IN   =tiff.imread(infile,verbosity=self.verbosity);
    self.INRAW=tiff.imread(rawfile,verbosity=self.verbosity);
  
    # image parameters (TODO: read scale from Tiff?)
    self.info = { 'desc':     infile.split('/')[-1],
                  'filename': infile,
                  'atoms'   : 'C' };
Esempio n. 9
0
def main():
    flist = glob.glob(data_path_ptb+'*.tif')
    stack = tf.imread(flist)
    print(stack.shape)
    fig_comp = plt.figure(figsize = (12,5))
    db_stack = []
    db_blobs = []
    for frame in stack:
        db_frame = segmentation.frame_deblur(frame, sig = 4, wd = 8, Nit = 20, mode = 'gauss' )
        db_stack.append(db_frame)
        cblobs = segmentation.frame_blobs(db_frame)
        db_blobs.append(cblobs)


    ax1 = fig_comp.add_subplot(121)
    ax1.imshow(db_stack[0], cmap = 'Greys_r')
    ax1.scatter(db_blobs[1][:,1], db_blobs[1][:,0], s = 8)
    ax1.scatter(db_blobs[0][:,1], db_blobs[0][:,0], s = 8)


    ax2 = fig_comp.add_subplot(122)
    ax2.imshow(db_stack[1], cmap = 'Greys_r')
    ax2.scatter(db_blobs[0][:,1], db_blobs[0][:,0], s = 8)
    ax2.scatter(db_blobs[1][:,1], db_blobs[1][:,0], s = 8)

    plt.show()
Esempio n. 10
0
def loadTiffStack(fname,useLibTiff=False):
  """
  Read a TIFF stack.
  We're using tifflib by default as, right now, only this works when the application is compile on Windows. [17/08/15]
  Bugs: known to fail with tiffs produced by Icy [23/07/15]

  """
  if not os.path.exists(fname):
    print "imageStackLoader.loadTiffStack can not find %s" % fname
    return

  purePython = True
  if useLibTiff:
    from libtiff import TIFFfile
    import numpy as np
    tiff = TIFFfile(fname)
    samples, sample_names = tiff.get_samples() #we should have just one
    print "Loading:\n" + tiff.get_info() + " with libtiff\n"
    im = np.asarray(samples[0])
  else:
    print "Loading:\n" + fname + " with tifffile\n"
    from tifffile import imread 
    im = imread(fname)

  im=im.swapaxes(1,2) 
  print "read image of size: cols: %d, rows: %d, layers: %d" % (im.shape[1],im.shape[2],im.shape[0])
  return im
Esempio n. 11
0
  def get_volume(self, zoomlevel):
    '''
    @override
    '''
    files = super(Image, self).get_volume(zoomlevel)

    out = None
    out_is_there = False

    for f in files:
      input_image = tif.imread(f)

      if out_is_there:
        #out = np.dstack([out, input_image])
        out = np.concatenate([out, input_image.flatten()])
      else:
        #out = input_image
        out = input_image.flatten()
        out_is_there = True

    c_image_data = zlib.compress(out)

    output = StringIO.StringIO()
    output.write(c_image_data)

    content = output.getvalue()
    content_type = 'application/octstream'

    return content, content_type
Esempio n. 12
0
 def imread(_path):
     """ allow loading grayscale pngs as well as tiffs
     """
     if os.path.splitext(_path)[1] in ['.tiff', '.tif']:
         return tiff.imread(_path)
     else:
         return cv2.imread(_path,0)
Esempio n. 13
0
def read_tiff(fname, slc=None):
    """
    Read data from tiff file.

    Parameters
    ----------
    fname : str
        String defining the path of file or file name.
    slc : sequence of tuples, optional
        Range of values for slicing data in each axis.
        ((start_1, end_1, step_1), ... , (start_N, end_N, step_N))
        defines slicing parameters for each axis of the data matrix.

    Returns
    -------
    ndarray
        Output 2D image.
    """
    fname = _check_read(fname)
    try:
        arr = tifffile.imread(fname, out='memmap')
    except IOError:
        logger.error('No such file or directory: %s', fname)
        return False
    arr = _slice_array(arr, slc)
    _log_imported_data(fname, arr)
    return arr
Esempio n. 14
0
def recolor_volume(dir, outdir, database_file):

    conn = sqlite3.connect(database_file)
    cursor = conn.cursor()
    cursor.execute('SELECT * FROM relabelMap')
    result = cursor.fetchall()

    mergeTable = {}
    for r in result:
        mergeTable[r[0]] = r[1:]

    print 'loaded colortable.'
    # print mergeTable

    files = os.listdir(dir)

    for f in files:

        if (f.startswith('.')):
            continue

        i = tif.imread(os.path.join(dir,f))

        for oldid in mergeTable.keys():
            i[i==oldid] = mergeTable[oldid]

        tif.imsave(os.path.join(outdir,f), i)
Esempio n. 15
0
def baboon(size=512, dtype='float32'):
    """
    Load test baboon image array.

    Parameters
    ----------
    size : int or tuple of int, optional
        Size of the output image.
    dtype : str, optional
        The desired data-type for the array.

    Returns
    -------
    ndarray
        Output 3D test image.
    """
    size = _totuple(size, 2)
    fname = os.path.join(DATA_PATH, 'baboon.tif')
    im = tifffile.imread(fname)
    im = skimage.transform.resize(im, size, order=3,
                                  preserve_range=True, mode='constant',
                                  **resize_kwargs)
    im = np.expand_dims(im, 0)
    im = im.astype(dtype)
    return im
Esempio n. 16
0
def find(args):
    from sys import stdout
    from tifffile import imread

    image = imread(str(args.image)).astype('float32')

    scale = asarray(args.scale) if args.scale else ones(image.ndim, dtype='int')
    blobs = findBlobs(image, range(*args.size), args.threshold)[:, 1:] # Remove scale
    blobs = blobs[peakEnclosed(blobs, shape=image.shape, size=args.edge)]
    blobs = blobs[:, ::-1] # Reverse to xyz order
    blobs = blobs * scale

    if args.format == "pickle":
        from pickle import dump, HIGHEST_PROTOCOL
        from functools import partial
        dump = partial(dump, protocol=HIGHEST_PROTOCOL)

        dump(blobs, stdout.buffer)
    else:
        import csv

        if args.format == 'txt':
            delimiter = ' '
        elif args.format == 'csv':
            delimiter = ','
        writer = csv.writer(stdout, delimiter=delimiter)
        for blob in blobs:
            writer.writerow(blob)
Esempio n. 17
0
def read_image(path):
    """Read a tif image and return the data."""
    try:
        return tifffile.imread(path, key=0)
    except IOError as exception:
        _LOGGER.error('Bad path to image! %s', exception)
        return np.array([])
Esempio n. 18
0
 def roundtrip(self, dtype, x):
     f = NamedTemporaryFile(suffix='.tif')
     fname = f.name
     f.close()
     imsave(fname, x)
     y = imread(fname)
     assert_array_equal(x, y)
Esempio n. 19
0
def optimize_z(x,y,z,image,n=None):
	"""Optimize z for poly fit"""
	if type(image) == str:
		img = tf.imread(image)
	elif type(image) == np.ndarray:
		img = image

	data_z = img[:,y,x]

	if n is None:
		n = getn(data_z)

	x_opt_vals, y_opt_vals, z_opt_vals = [], [], []

	x_opt,y_opt,z_opt = x,y,z
	for i in range(5):
		try:
			print x_opt,y_opt,z_opt
			x_opt,y_opt,z_opt = int(round(x_opt)),int(round(y_opt)),int(round(z_opt))
			x_opt, y_opt = optimize_xy(x_opt,y_opt,z_opt,img,nx=None,ny=None)
			data_z = img[:,round(y_opt),round(x_opt)]
		except Exception as e:
			if clrmsg and debug is True: print clrmsg.ERROR
			print IndexError("Optimization failed, possibly due to low signal or low SNR. "+str(e))
			return [x],[y],['failed']
		n = getn(data_z)
		z_opt, data_z_yp_poly = parabolic.parabolic_polyfit(data_z, np.argmax(data_z), n)
		x_opt_vals.append(x_opt)
		y_opt_vals.append(y_opt)
		z_opt_vals.append(z_opt)

	return x_opt_vals, y_opt_vals, z_opt_vals
Esempio n. 20
0
    def showLoadDialog(self):
        """
        This slot brings up the load dialog and retrieves the file name.
        If the file name is valid, it loads the base stack using the load method.
        """
        
        fname = self.lasagna.showFileLoadDialog(fileFilter="LSM (*.lsm)")
        if fname is None:
            return

        colorOrder = lasHelp.readPreference('colorOrder')
        if os.path.isfile(fname): 
            im=tifffile.imread(str(fname)) 
            print "Found LSM stack with dimensions:"
            print im.shape
            for ii in range(im.shape[2]):
                stack=im[0,:,ii,:,:]

                objName="layer_%d" % (ii+1)
                self.lasagna.addIngredient(objectName=objName, 
                           kind='imagestack', 
                           data=stack, 
                           fname=fname
                           )
                self.lasagna.returnIngredientByName(objName).addToPlots() #Add item to all three 2D plots                

                print "Adding '%s' layer" % colorOrder[ii]
                self.lasagna.returnIngredientByName(objName).lut=colorOrder[ii]


            self.lasagna.initialiseAxes()

        else:
            self.lasagna.statusBar.showMessage("Unable to find " + str(fname))
Esempio n. 21
0
def load(filename, run):
    '''
    Load image from given path and resizes it.
    '''

    start = time.time()

    try:
        img = Image.open(filename)
        img = np.array(img)
        run['bigtiff'] = False
        size = np.shape(img)
        if size[0]*size[1] >= 89478485:
            run['bigtiff'] = True
    except IOError:
        if 'tif' in run['input_ext']:
            print "File may be a BIGtiff, attempting alternate read..."
            img = tifffile.imread(filename)
            run['bigtiff'] = True
        else:
            raise

    # If image has four channels, convert to three channels
    if img.shape[2] == 4:
        img = cv2.cvtColor(img,cv2.COLOR_BGRA2BGR)

    print np.shape(img)
    end = time.time()
    print 'INFO: images.load() processed %s ( %f seconds)' % (filename, end-start)

    if run['pixel_size_x'] != run['pixel_size_y']:
        print 'INFO: x and y pixel sizes differ, resizing image to have square pixels'
        img = resize(img, run)

    return img
Esempio n. 22
0
def read_clip_write(directory, file_name, all_out_files, out_directory, desired_width, desired_height):
    try:
        prefix, extension = file_name.split('.')
        run_string, site_key = prefix.split("_")
        if site_key + ".npy" in all_out_files: return
        imarray = tf.imread(directory + "\\" + file_name)
        if(any(np.isnan(np.ravel(imarray)))):
            print("NA VALUES")
            return
        width = imarray.shape[0]
        height = imarray.shape[1]
        if(width != desired_width):
            excess = width - desired_width
            if excess < 0:
                print("TOO SMALL")
                return
            offset = excess / 2
            if 2 * offset == excess:
                left_offset, right_offset = offset, width-offset
            else:
                left_offset, right_offset = offset+1, width-offset
        if(height != desired_height): 
            excess = height - desired_height
            if excess < 0:
                print("TOO SMALL")
                return
            offset = excess / 2
            if 2 * offset == excess:
                bottom_offset, top_offset = offset, height - offset
            else:
                bottom_offset, top_offset = offset+1, height - offset
        out_array = imarray[left_offset:right_offset, bottom_offset:top_offset, :]
        np.save(out_directory + "\\" + site_key + ".npy", out_array)
    except:
        print("Error")
Esempio n. 23
0
def _process(lock, int_from, int_to, offset, abs_offset, files, projorder, outfile, dsetname, outshape, outtype, 
			crop_top, crop_bottom, crop_left, crop_right, tot_files, provenance_dt, logfilename):
	"""To do...

	"""
	# Process the required subset of images:
	for i in range(int_from, int_to + 1):    
		
		# Read input image:
		t0 = time.time()
		try:
			im = imread(files[i])

			# Crop:
			im = im[crop_top:im.shape[0] - crop_bottom,crop_left:im.shape[1] - crop_right]	

			# Get the timestamp:
			t = os.path.getmtime(files[i])
			t1 = time.time() 					
								
			# Save processed image to HDF5 file (atomic procedure - lock used):
			_write_data(lock, im, i, offset, abs_offset, files[i], t, projorder, tot_files, provenance_dt, 
						outfile, dsetname, outshape, outtype, logfilename, t1 - t0, len(files))
		except:

			io_warning = True

			# Print out execution time:
			log = open(logfilename,"a")
			log.write(os.linesep + "\tError when reading %s. File skipped." % (os.path.basename(files[i])))
			log.close()	

			pass
Esempio n. 24
0
def HSVizualizer(inputDirectory, filenames, outputDirectory):
    """

    Args:
        inputDirectory: (str)
        filenames: (list) filenames to analyze and draw
        outputDirectory: (str) Where to place output

    Returns:

    """
    for filename in filenames:

        raw = tiff.imread(inputDirectory+filename)
        nframes, frame_width, frame_height = raw.shape
        #outstack is in RGB color so we need an extra 3 dimensions
        outStack = np.zeros((nframes-1, frame_width, frame_height, 3), dtype='uint8')

        for i in xrange(nframes-1):
            t1 = time.time()
            print "Start frame "+str(i)+"..."+filename
            frame = raw[i]
            next_frame = raw[i+1]
            flow = cv2.calcOpticalFlowFarneback(frame, next_frame, None, 0.5, 3, 8, 4, 7, 1.5, 0)
            outStack[i] = draw_hsv(flow)

            print "Finish frame "+str(i)+" in "+str(time.time()-t1)+" s."

        #print outStack.shape
        tiff.imsave(outputDirectory+filename+'_HSV.tif', outStack)

    print "All done in "+str(time.time()-t0)+" s"
Esempio n. 25
0
def mapConvert(slide):
    code.interact(local=locals())
    slide = tifffile.imread(slide)
    xx, yy = slide.shape
    newSlide = np.zeros((xx, yy, 3))
    u = []
    for r in xrange(slide.shape[0]):
        for c in xrange(slide.shape[1]):
            if slide[r,c] not in u:
                u.append(slide[r,c])

    number_of_colors = len(u)


    #subprocess.Popen(['./glasbey.py', str(number_QtCoreof_colors), 'colors.txt'])

    with open('colors.txt') as f:
        colors = f.readlines()

    colors = [[int(x) for x in c[:-1].split(',')] for c in colors]

    map_16_to_8 = dict([(x,y) for x,y in zip(u, colors)])

    for c in map_16_to_8.keys():
        for point in zip(np.where(slide==c)[0],np.where(slide==c)[1]):
            newSlide[point] = map_16_to_8[c]

    code.interact(local=locals())

    return newSlide
Esempio n. 26
0
def relabel_volume(dir, outdir):


    files = sorted(os.listdir(dir))

    out = None
    out_is_there = False

    for f in files:

        i = tif.imread(os.path.join(dir,f))
        if (out_is_there):
            out = numpy.dstack([out, i])

        else:
            out = i
            out_is_there = True


    print '3d volume', out.shape

    import skimage
    from skimage.segmentation import relabel_sequential

    relabeled,fm,im = skimage.segmentation.relabel_sequential(out)

    print 'Max', relabeled.max()

    for z in range(relabeled.shape[2]):
        tif.imsave(os.path.join(outdir,str(z)+'.tif'),relabeled[:,:,z].astype(numpy.uint32))
        print 'stored', z
Esempio n. 27
0
def makeDisplayFile(path):
    img = tifffile.imread(path)

    xx, yy = img.shape
    newImg = np.zeros((xx, yy, 3))
    u = []
    for r in xrange(img.shape[0]):
        for c in xrange(img.shape[1]):
            if img[r,c] not in u:
                u.append(img[r,c])

    number_of_colors = len(u)

    with open('colors.txt') as f:
        colors = f.readlines()

    colors = [[int(x) for x in c[:-1].split(',')] for c in colors]

    map_16_to_8 = dict([(x,y) for x,y in zip(u, colors)])
    map_16_to_8[0] = [0, 0, 0]

    for c in map_16_to_8.keys():
        for point in zip(np.where(img==c)[0],np.where(img==c)[1]):
            newImg[point] = map_16_to_8[c]

    if not os.path.exists('display'):
        os.mkdir('display')

    newPath = 'display/' + path[path.index('/') + 1:]

    cv2.imwrite(newPath, newImg)
    print 'Writing ' + newPath

    return newPath
Esempio n. 28
0
 def imread(_path):
     """ allow loading jpg, png as well as tif
     """
     if os.path.splitext(_path)[1] in ['.tiff', '.tif']:
         return tiff.imread(_path)
     else:
         return cv2.imread(_path,0)
def singletiff2multidicom(in_files, dicomdir, plans, out_prefix):
    import DicomIO
    import numpy as np
    import os
    import shutil
    import warnings
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        import tifffile as tiff
    outdir = experiment_dir + '/' + out_prefix + '/' + dicomdir
    if not os.path.exists(outdir):
        os.makedirs(outdir)
    else:
        shutil.rmtree(outdir)
        os.makedirs(outdir)

    # Resolve new frame list
    out_vols = plans
    for file_i in range(len(in_files)):
        print "Reading " + in_files[file_i]
        ds = tiff.imread(in_files[file_i])
        no_slices = ds.shape[0]
        for z_i in range(no_slices):
            out_vols[file_i][z_i].PixelData = ds[z_i].astype(np.uint16).tostring()

    dcmio = DicomIO.DicomIO()
    filenames = dcmio.WriteDICOM_frames(outdir, out_vols, 'IM')

    return outdir, filenames
    def x_and_y_vals(self, lock, queue, file_list):

        #x = range(0,len(self.file_list))
        y = []
        label = ""
        func = None

        if self.selection == "sigma":
            func= self.get_stdev
            self.label = "standard deviation"
        elif self.selection == "mean":
            func = self.get_avg_2d
            self.label = "mean"
        elif self.selection == "min":
            func = self.get_min
            self.label = "min"
        elif self.selection == "max":
            func = self.get_max
            self.label = "max"
        elif  self.selection == "total intensity":
            func = self.get_total_intensity
            self.label = "total intensity"

        list_num = file_list.pop(0)
        y.append(list_num)
        for img in file_list:

            lock.acquire()
            print(img)
            lock.release()
            temp_arr = imread(img)

            y.append(func(temp_arr))

        queue.put(y)
Esempio n. 31
0
def save_cycle(input_path_list: List[str], out_path: str, ome_meta: str):
    with tif.TiffWriter(out_path, bigtiff=True) as TW:
        for path in input_path_list:
            TW.save(tif.imread(path),
                    photometric='minisblack',
                    description=ome_meta)
Esempio n. 32
0
def save_memmap(filenames: List[str],
                base_name: str = 'Yr',
                resize_fact: Tuple = (1, 1, 1),
                remove_init: int = 0,
                idx_xy: Tuple = None,
                order: str = 'F',
                xy_shifts: Optional[List] = None,
                is_3D: bool = False,
                add_to_movie: float = 0,
                border_to_0=0,
                dview=None,
                n_chunks: int = 100,
                slices=None) -> str:
    """ Efficiently write data from a list of tif files into a memory mappable file

    Args:
        filenames: list
            list of tif files or list of numpy arrays

        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"

        resize_fact: tuple
            x,y, and z downsampling factors (0.5 means downsampled by a factor 2)

        remove_init: int
            number of frames to remove at the begining of each tif file
            (used for resonant scanning images if laser in rutned on trial by trial)

        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance
            idx_xy = (slice(150,350,None), slice(150,350,None))

        order: string
            whether to save the file in 'C' or 'F' order

        xy_shifts: list
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping

        is_3D: boolean
            whether it is 3D data

        add_to_movie: floating-point
            value to add to each image point, typically to keep negative values out.

        border_to_0: (undocumented)

        dview:       (undocumented)

        n_chunks:    (undocumented)

        slices: slice object or list of slice objects
            slice can be used to select portion of the movies in time and x,y
            directions. For instance 
            slices = [slice(0,200),slice(0,100),slice(0,100)] will take 
            the first 200 frames and the 100 pixels along x and y dimensions. 
    Returns:
        fname_new: the name of the mapped file, the format is such that
            the name will contain the frame dimensions and the number of frames

    """
    if type(filenames) is not list:
        raise Exception('input should be a list of filenames')

    if slices is not None:
        slices = [slice(0, None) if sl is None else sl for sl in slices]

    if len(filenames) > 1:
        recompute_each_memmap = False
        for file__ in filenames:
            if ('order_' + order not in file__) or ('.mmap' not in file__):
                recompute_each_memmap = True


        if recompute_each_memmap or (remove_init>0) or (idx_xy is not None)\
                or (xy_shifts is not None) or (add_to_movie != 0) or (border_to_0>0)\
                or slices is not None:

            logging.debug('Distributing memory map over many files')
            # Here we make a bunch of memmap files in the right order. Same parameters
            # TODO: Use separate variables to hold the list here vs the string we return
            fname_new = cm.save_memmap_each(filenames,
                                            base_name=base_name,
                                            order=order,
                                            border_to_0=border_to_0,
                                            dview=dview,
                                            resize_fact=resize_fact,
                                            remove_init=remove_init,
                                            idx_xy=idx_xy,
                                            xy_shifts=xy_shifts,
                                            slices=slices,
                                            add_to_movie=add_to_movie)
        else:
            fname_new = filenames

        # The goal is to make a single large memmap file, which we do here
        if order == 'F':
            raise Exception(
                'You cannot merge files in F order, they must be in C order for CaImAn'
            )

        fname_new = cm.save_memmap_join(fname_new,
                                        base_name=base_name,
                                        dview=dview,
                                        n_chunks=n_chunks)

    else:
        # TODO: can be done online
        Ttot = 0
        for idx, f in enumerate(filenames):
            if isinstance(f, str):  # Might not always be filenames.
                logging.debug(f)

            if is_3D:
                Yr = f if not (isinstance(f,
                                          basestring)) else tifffile.imread(f)
                if slices is not None:
                    Yr = Yr[slices]
                else:
                    if idx_xy is None:  #todo remove if not used, superceded by the slices parameter
                        Yr = Yr[remove_init:]
                    elif len(
                            idx_xy
                    ) == 2:  #todo remove if not used, superceded by the slices parameter
                        Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
                    else:  #todo remove if not used, superceded by the slices parameter
                        Yr = Yr[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

            else:
                if isinstance(f, basestring) or isinstance(f, list):
                    Yr = cm.load(f, fr=1, in_memory=True)
                else:
                    Yr = cm.movie(f)
                if xy_shifts is not None:
                    Yr = Yr.apply_shifts(xy_shifts,
                                         interpolation='cubic',
                                         remove_blanks=False)

                if slices is not None:
                    Yr = Yr[slices]
                else:
                    if idx_xy is None:
                        if remove_init > 0:
                            Yr = Yr[remove_init:]
                    elif len(idx_xy) == 2:
                        Yr = Yr[remove_init:, idx_xy[0], idx_xy[1]]
                    else:
                        raise Exception(
                            'You need to set is_3D=True for 3D data)')
                        Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1],
                                          idx_xy[2]]

            if border_to_0 > 0:
                if slices is not None:
                    if type(slices) is list:
                        raise Exception(
                            'You cannot slice in x and y and then use add_to_movie: if you only want to slice in time do not pass in a list but just a slice object'
                        )

                min_mov = Yr.calc_min()
                Yr[:, :border_to_0, :] = min_mov
                Yr[:, :, :border_to_0] = min_mov
                Yr[:, :, -border_to_0:] = min_mov
                Yr[:, -border_to_0:, :] = min_mov

            fx, fy, fz = resize_fact
            if fx != 1 or fy != 1 or fz != 1:
                if 'movie' not in str(type(Yr)):
                    Yr = cm.movie(Yr, fr=1)
                Yr = Yr.resize(fx=fx, fy=fy, fz=fz)

            T, dims = Yr.shape[0], Yr.shape[1:]
            Yr = np.transpose(Yr, list(range(1, len(dims) + 1)) + [0])
            Yr = np.reshape(Yr, (np.prod(dims), T), order='F')
            Yr = np.ascontiguousarray(Yr, dtype=np.float32) + np.float32(
                0.0001) + np.float32(add_to_movie)

            if idx == 0:
                fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(
                    dims[1]) + '_d3_' + str(
                        1 if len(dims) == 2 else dims[2]) + '_order_' + str(
                            order)  # TODO: Rewrite more legibly
                if isinstance(f, str):
                    fname_tot = os.path.join(os.path.split(f)[0], fname_tot)
                if len(filenames) > 1:
                    big_mov = np.memmap(fname_tot,
                                        mode='w+',
                                        dtype=np.float32,
                                        shape=prepare_shape(
                                            (np.prod(dims), T)),
                                        order=order)
                    big_mov[:, Ttot:Ttot + T] = Yr
                    del big_mov
                else:
                    logging.debug('SAVING WITH numpy.tofile()')
                    Yr.tofile(fname_tot)
            else:
                big_mov = np.memmap(fname_tot,
                                    dtype=np.float32,
                                    mode='r+',
                                    shape=prepare_shape(
                                        (np.prod(dims), Ttot + T)),
                                    order=order)

                big_mov[:, Ttot:Ttot + T] = Yr
                del big_mov

            sys.stdout.flush()
            Ttot = Ttot + T

        fname_new = fname_tot + '_frames_' + str(Ttot) + '_.mmap'
        try:
            # need to explicitly remove destination on windows
            os.unlink(fname_new)
        except OSError:
            pass
        os.rename(fname_tot, fname_new)

    return fname_new
Esempio n. 33
0
def run_nuclear_chromatin_feat_ext(raw_image_path:str, labelled_image_path:str, output_dir:str,
                                   calliper_angular_resolution:int = 10, 
                                   measure_simple_geometry:bool = True, 
                                   measure_calliper_distances:bool = True, 
                                   measure_radii_features:bool = True,
                                   step_size_curvature:int = 2, 
                                   prominance_curvature:float = 0.1, 
                                   width_prominent_curvature:int = 5, 
                                   dist_bt_peaks_curvature:int = 10,
                                   measure_int_dist_features:bool = True, 
                                   measure_hc_ec_ratios_features:bool = True, 
                                   hc_threshold:float = 1, 
                                   gclm_lengths:list = [1, 5, 20],
                                   measure_gclm_features: bool = True, 
                                   measure_moments_features: bool = True):
    """
    Function that reads in the raw and segmented/labelled images for a field of view and computes nuclear features. 
    Note this has been used only for DAPI stained images
    Args:
        raw_image_path: path pointing to the raw image
        labelled_image_path: path pointing to the segmented image
        output_dir: path where the results need to be stored
    """
    labelled_image = imread(labelled_image_path)
    raw_image = imread(raw_image_path)
    labelled_image = labelled_image.astype(int)
    raw_image = raw_image.astype(int)

    # Insert code for preprocessing image
    # Eg normalize
    # raw_image = cv.normalize(
    #      raw_image, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F
    #  )
    # raw_image[raw_image < 0] = 0.0
    # raw_image[raw_image > 255] = 255.0

    # Get features for the individual nuclei in the image
    props = measure.regionprops(labelled_image, raw_image)
    
    all_features = pd.DataFrame()
    # Measure scikit's built in features
    
    for i in tqdm(range(len(props))):
        all_features = all_features.append(
            pd.concat(
                [pd.DataFrame([i + 1], columns=["label"]),
                 BG.measure_global_morphometrics(props[i].image, 
                                                 angular_resolution = calliper_angular_resolution, 
                                                 measure_simple = measure_simple_geometry,
                                                 measure_calliper = measure_calliper_distances, 
                                                 measure_radii = measure_radii_features).reset_index(drop=True),
                 BLC.measure_curvature_features(props[i].image, step = step_size_curvature, 
                                                prominance = prominance_curvature, 
                                                width = width_prominent_curvature, 
                                                dist_bt_peaks = dist_bt_peaks_curvature).reset_index(drop=True),
                 IDF.measure_intensity_features(props[i].image, props[i].intensity_image, 
                                                measure_int_dist = measure_int_dist_features, 
                                                measure_hc_ec_ratios = measure_hc_ec_ratios_features, 
                                                hc_alpha = hc_threshold).reset_index(drop=True),
                 IT.measure_texture_features(props[i].image, props[i].intensity_image, lengths=gclm_lengths,
                                             measure_gclm = measure_gclm_features,
                                             measure_moments = measure_moments_features)],
                axis=1,
            ),
            ignore_index=True,
        )
   
    #save the output
    all_features.to_csv(output_dir+"/"+labelled_image_path.rsplit('/', 1)[-1][:-4]+".csv")

    return all_features
Esempio n. 34
0
 def run(self):
     time.sleep(np.random.randint(0, 40))
     ImageWms = WebMapService(self.ImageURL, version='1.1.1', timeout=200)
     ContourWms = WebMapService(self.ContourURL,
                                version='1.1.1',
                                timeout=200)
     x_min = self.x_start
     y_min = self.y_start
     for ii in range(0, self.x_num):
         for jj in range(0, self.y_num):
             ll_x_ = x_min + ii * self.x_stride
             ll_y_ = y_min + jj * self.y_stride
             bbox = (ll_x_, ll_y_, ll_x_ + self.x_stride,
                     ll_y_ + self.y_stride)
             try:
                 img_3 = tiff.imread("%s%f_%f_%f_%f.tif" %
                                     (ImageOutDirectory, bbox[0], bbox[1],
                                      bbox[2], bbox[3]))
                 if (img_3.max() - img_3.min()) < 30:
                     try:
                         img = ImageWms.getmap(layers=['Actueel_ortho25'],
                                               srs='EPSG:4326',
                                               bbox=bbox,
                                               size=(1024, 1024),
                                               format='image/GeoTIFF',
                                               transparent=True)
                     except:
                         self.queue.put(0)
                         continue
                     try:
                         ContourImg = ContourWms.getmap(layers=['aan'],
                                                        srs='EPSG:4326',
                                                        bbox=bbox,
                                                        size=(4096, 4096),
                                                        format='image/png',
                                                        transparent=True)
                     except:
                         self.queue.put(0)
                         continue
                 self.queue.put(1)
                 continue
             except BaseException:
                 try:
                     img = ImageWms.getmap(layers=['Actueel_ortho25'],
                                           srs='EPSG:4326',
                                           bbox=bbox,
                                           size=(1024, 1024),
                                           format='image/GeoTIFF',
                                           transparent=True)
                 except:
                     self.queue.put(0)
                     continue
                 try:
                     ContourImg = ContourWms.getmap(layers=['aan'],
                                                    srs='EPSG:4326',
                                                    bbox=bbox,
                                                    size=(4096, 4096),
                                                    format='image/png',
                                                    transparent=True)
                 except:
                     self.queue.put(0)
                     continue
             filename = "{}_{}_{}_{}.tif".format(bbox[0], bbox[1], bbox[2],
                                                 bbox[3])
             filename2 = "{}_{}_{}_{}.png".format(bbox[0], bbox[1], bbox[2],
                                                  bbox[3])
             with open(self.ImageOutDirectory + filename, 'wb') as out:
                 out.write(img.read())
             with open(self.ContourOutDirectory + filename2, 'wb') as out1:
                 out1.write(ContourImg.read())
             self.queue.put(1)
Esempio n. 35
0
    def from_tiff(
            cls, path: str,
            method: str,
            meta_path: Optional[str] = None,
            axes_order: Optional[str] = None,
            meta_format: Optional[str] = None,
    ):
        """
        Return instance of work environment with ImgData.seq set from the tiff file.

        :param path:        path to the tiff file
        :param method:      one of 'imread', 'asarray', or 'asarray-multi'. Refers to usage of either tifffile.imread
                            or tifffile.asarray. 'asarray-multi' will load multi-page tiff files.
        :param meta_path:   path to a file containing meta data
        :param meta_format: meta data format, must correspond to the name of a function in viewer.core.organize_meta
        :param axes_order:  Axes order as a 3 or 4 letter string for 2D or 3D data respectively.
                            Axes order is assumed to be "txy" or "tzxy" if not specified.
        """

        if method == 'imread':
            seq = tifffile.imread(path)

        elif method == 'asarray':
            tif = tifffile.TiffFile(path, is_nih=True)
            seq = tif.asarray(maxworkers=int(get_sys_config()['_MESMERIZE_N_THREADS']))

        elif method == 'asarray-multi':
            tif = tifffile.TiffFile(path, is_nih=True)

            seq = tif.asarray(key=range(0, len(tif.series)),
                              maxworkers=int(get_sys_config()['_MESMERIZE_N_THREADS']))
        else:
            raise ValueError("Must specify 'imread' or 'asarray' in method argument")

        if (meta_path is not None) and (meta_format is not None):
            try:
                meta = getattr(organize_metadata, meta_format)(meta_path)
            except Exception as e:
                raise TypeError(f"The meta data loader for <{meta_format}> was unable to "
                                f"load the following file:"
                                f"\n\n"
                                f"{meta_path}"
                                f"\n\n"
                                f"Make sure that you have chosen the correct `meta_format` "
                                f"for this file."
                                f"\n\n"
                                f"{traceback.format_exc()}")

            required = ['fps', 'date']

            if not all(k in meta.keys() for k in required):
                raise KeyError(f'Meta data dict must contain all mandatory fields: {required}')

        else:
            meta = None

        # Custom user mapping if specified
        if axes_order is not None:
            if not set(axes_order).issubset({'x', 'y', 't', 'z'}):
                raise ValueError('`axes_order` must only contain "x", "y", "t" and/or "z" characters')

            if len(axes_order) != seq.ndim:
                raise ValueError('number of dims specified by `axes_order` must match dims of image sequence')

            if seq.ndim == 4:
                new_axes = tuple(map({'x': 0, 'y': 1, 't': 2, 'z': 3}.get, axes_order))
                seq = np.moveaxis(seq, (0, 1, 2, 3), new_axes)
            else:
                new_axes = tuple(map({'x': 0, 'y': 1, 't': 2}.get, axes_order))
                seq = np.moveaxis(seq, (0, 1, 2), new_axes)

        # Default mapping
        else:
            # default axes remapping from tzxy
            if seq.ndim == 4:
                # tzxy to xytz
                seq = np.moveaxis(seq, (0, 1, 2, 3), (2, 3, 0, 1))
            else:
                # for 2D
                seq = seq.T

        imdata = ImgData(seq, meta)
        return cls(imdata)
Esempio n. 36
0
 def load(self, ee_product, query: TileDateRangeQuery, subdir="tmp"):
     path = self.save(ee_product, query, subdir=subdir)
     return imread(path) if path else None
Esempio n. 37
0
 def load(self, image_id, query: TileDateRangeQuery, subdir="tmp"):
     path = os.path.join(self.data_subdir(subdir), image_id + ".tif")
     img = imread(path)
     return img
Esempio n. 38
0
    def processFolder(self):

        folder = self.camCalibFolderLineEdit.text()

        darkavg = None
        if os.path.isfile(self.darkAVGLineEdit.text()):
            darkavg = tf.imread(self.darkAVGLineEdit.text())
        elif os.path.isfile(os.path.join(folder, "dark_AVG.tif")):
            darkavg = tf.imread(os.path.join(folder, "dark_AVG.tif"))
        darkstd = None
        if os.path.isfile(self.darkSTDLineEdit.text()):
            darkstd = tf.imread(self.darkSTDLineEdit.text())
        elif os.path.isfile(os.path.join(folder, "dark_STD.tif")):
            darkstd = tf.imread(os.path.join(folder, "dark_STD.tif"))

        if not all([isinstance(a, np.ndarray) for a in (darkavg, darkstd)]):
            if not pathHasPattern(folder, "*dark*.tif*"):
                QtW.QMessageBox.warning(
                    self,
                    "No dark images!",
                    "Camera calibration requires dark images, but none were provided"
                    " and none were detected in the specified folder."
                    " Read documentation on camera calibration for more info.",
                    QtW.QMessageBox.Ok,
                    QtW.QMessageBox.NoButton,
                )
                return

        if sum([isinstance(a, np.ndarray) for a in (darkavg, darkstd)]) == 1:
            if not pathHasPattern(folder, "*dark*.tif*"):
                QtW.QMessageBox.warning(
                    self,
                    "No dark images!",
                    "Camera calibration requires both a dark image average projection, "
                    " and a standard deviation projection, but only one of the"
                    " two was provided, and no *dark*.tif images "
                    " were detected in the specified folder."
                    " Read documentation on camera calibration for more info.",
                    QtW.QMessageBox.Ok,
                    QtW.QMessageBox.NoButton,
                )
                return
            else:
                reply = QtW.QMessageBox.question(
                    self,
                    "No dark images!",
                    "Camera calibration requires both a dark image average projection, "
                    " and a standard deviation projection, but only one of the"
                    " two was provided. *dark*.tif images "
                    " were detected in the specified folder, and will still be "
                    " used to calculate the projection images.  Continue?",
                    QtW.QMessageBox.Yes | QtW.QMessageBox.No,
                    QtW.QMessageBox.No,
                )
                if reply != QtW.QMessageBox.Yes:
                    return

        self.worker, self.thread = newWorkerThread(
            CamCalibWorker,
            folder,
            darkavg,
            darkstd,
            workerConnect={
                "progress": self.incrementProgress,
                "setProgMax": self.resetWithMax,
                "setStatus": self.statusLabel.setText,
                "finished": self.abortButton.hide,
            },
            start=True,
        )
Esempio n. 39
0
def save_memmap(filenames, base_name='Yr', resize_fact=(1, 1, 1), remove_init=0, idx_xy=None, order='F',xy_shifts=None,is_3D=False,add_to_movie=0,border_to_0=0):

    """ Saves efficiently a list of tif files into a memory mappable file
    Parameters
    ----------
        filenames: list
            list of tif files
        base_name: str
            the base used to build the file name. IT MUST NOT CONTAIN "_"    
        resize_fact: tuple
            x,y, and z downampling factors (0.5 means downsampled by a factor 2) 
        remove_init: int
            number of frames to remove at the begining of each tif file (used for resonant scanning images if laser in rutned on trial by trial)
        idx_xy: tuple size 2 [or 3 for 3D data]
            for selecting slices of the original FOV, for instance idx_xy=(slice(150,350,None),slice(150,350,None))
        order: string
            whether to save the file in 'C' or 'F' order     
        xy_shifts: list 
            x and y shifts computed by a motion correction algorithm to be applied before memory mapping    

        is_3D: boolean
            whether it is 3D data
    Return
    -------
        fname_new: the name of the mapped file, the format is such that the name will contain the frame dimensions and the number of f

    """

    #TODO: can be done online    
    Ttot = 0
    for idx, f in enumerate(filenames):
        print(f)

        if is_3D:
            import tifffile                       
            print("Using tifffile library instead of skimage because of  3D")

            if idx_xy is None:
                Yr = tifffile.imread(f)[remove_init:]
            elif len(idx_xy) == 2:
                Yr = tifffile.imread(f)[remove_init:, idx_xy[0], idx_xy[1]]
            else:
                Yr = tifffile.imread(f)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]     

#        elif :
#            
#            if xy_shifts is not None:
#                raise Exception('Calblitz not installed, you cannot motion correct')
#                
#            if idx_xy is None:
#                Yr = imread(f)[remove_init:]
#            elif len(idx_xy) == 2:
#                Yr = imread(f)[remove_init:, idx_xy[0], idx_xy[1]]
#            else:
#                raise Exception('You need to set is_3D=True for 3D data)')                  

        else:

            Yr=cm.load(f,fr=1)            
            if xy_shifts is not None:
                Yr=Yr.apply_shifts(xy_shifts,interpolation='cubic',remove_blanks=False)

            if idx_xy is None:
                if remove_init > 0:
                    Yr = np.array(Yr)[remove_init:]
            elif len(idx_xy) == 2:
                Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1]]
            else:
                raise Exception('You need to set is_3D=True for 3D data)')
                Yr = np.array(Yr)[remove_init:, idx_xy[0], idx_xy[1], idx_xy[2]]

        if border_to_0>0:
            min_mov= Yr.calc_min()
            Yr[:,:border_to_0,:]=min_mov
            Yr[:,:,:border_to_0]=min_mov
            Yr[:,:,-border_to_0:]=min_mov
            Yr[:,-border_to_0:,:]=min_mov

        fx, fy, fz = resize_fact
        if fx != 1 or fy != 1 or fz != 1:

            Yr = cm.movie(Yr, fr=1)
            Yr = Yr.resize(fx=fx, fy=fy, fz=fz)


        T, dims = Yr.shape[0], Yr.shape[1:]
        Yr = np.transpose(Yr, list(range(1, len(dims) + 1)) + [0])
        Yr = np.reshape(Yr, (np.prod(dims), T), order='F')

        if idx == 0:
            fname_tot = base_name + '_d1_' + str(dims[0]) + '_d2_' + str(dims[1]) + '_d3_' + str(
                1 if len(dims) == 2 else dims[2]) + '_order_' + str(order)
            fname_tot = os.path.join(os.path.split(f)[0],fname_tot)         
            big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32,
                                shape=(np.prod(dims), T), order=order)
        else:
            big_mov = np.memmap(fname_tot, dtype=np.float32, mode='r+',
                                shape=(np.prod(dims), Ttot + T), order=order)
        #    np.save(fname[:-3]+'npy',np.asarray(Yr))

        big_mov[:, Ttot:Ttot + T] = np.asarray(Yr, dtype=np.float32) + 1e-10 + add_to_movie
        big_mov.flush()
        del big_mov
        Ttot = Ttot + T

    fname_new = fname_tot + '_frames_' + str(Ttot) + '_.mmap'
    os.rename(fname_tot, fname_new)

    return fname_new
Esempio n. 40
0
def test_export_regions_to_file(tmpdir):
    image = tifffile.imread(regions_dir / "region.tiff")
    filename = tmpdir / "region.obj"
    region_IO.export_regions_to_file(image, filename, VOXEL_SIZE)

    cmp(regions_dir / "region.obj", tmpdir / "region.obj")
Esempio n. 41
0
Raw_path = os.path.join(basedir, '*TIF')  #tif or TIF be careful

axes = 'ZYX'  #projection axes : 'YX'

filesRaw = glob.glob(Raw_path)

# In[6]:

for fname in filesRaw:
    if os.path.exists(fname) == True:
        if os.path.exists(basedirResults3Dextended +
                          os.path.basename(fname)) == False or os.path.exists(
                              basedirResults2Dextended + '_' +
                              os.path.basename(fname)) == False:
            print(fname)
            y = imread(fname)
            y = np.delete(y, np.s_[:9], axis=0)
            restored = RestorationModel.predict(
                y, axes, n_tiles=(1, 2, 4)
            )  #n_tiles is for the decomposition of the image in (z,y,x). (1,2,2) will work with light images. Less tiles we have, faster the calculation is
            projection = ProjectionModel.predict(
                restored, axes, n_tiles=(1, 1, 1)
            )  #n_tiles is for the decomposition of the image in (z,y,x). There is overlapping in the decomposition wich is managed by the program itself
            axes_restored = axes.replace(ProjectionModel.proj_params.axis, '')
            restored = restored.astype(
                'uint8'
            )  # if prediction and projection running at the same time
            #restored = restored.astype('uint16') # if projection training set creation or waiting for a future projection
            #projection = projection.astype('uint8')
            save_tiff_imagej_compatible(
                (basedirResults3Dextended + os.path.basename(fname)), restored,
Esempio n. 42
0
        assert len(other_bead_pos.shape) == 2
        ax1.scatter(other_bead_pos[:, 2],
                    other_bead_pos[:, 1],
                    c=abs(other_bead_pos[:, 0] - 25),
                    marker="2")
        ax2.scatter(other_bead_pos[:, 0],
                    other_bead_pos[:, 1],
                    c=abs(other_bead_pos[:, 0] - 25),
                    marker="2")
        ax3.scatter(other_bead_pos[:, 2],
                    other_bead_pos[:, 0],
                    c=abs(other_bead_pos[:, 0] - 25),
                    marker="2")

    plt.tight_layout()
    fig.subplots_adjust(wspace=0, hspace=0)
    return fig


if __name__ == "__main__":
    tgt = (imread(
        "K:/beuttenm/repos/lnet/logs/beads/19-08-23_18-32_c307a5a_aux1_/result/test/target/0000.tif"
    )[None, ...] / numpy.iinfo(numpy.uint16).max)
    pred = (imread(
        "K:/beuttenm/repos/lnet/logs/beads/19-08-23_18-32_c307a5a_aux1_/result/test/prediction/0000.tif"
    )[None, ...] / numpy.iinfo(numpy.uint16).max)
    plot_img_projections(tgt)
    plt.show()
    plot_img_projections(pred)
    plt.show()
Esempio n. 43
0
 def obj(sigma):
     run(f"{bm3d} {tmp} {sigma} {outname}", shell=True)
     res = imread(outname)
     return ((gt - res)**2).mean()
Esempio n. 44
0
def imread(name, **kwargs):
    return tifffile.imread(str(name), **kwargs)
Esempio n. 45
0
I2_rectified = cv.remap(I2, right_map1, right_map2, cv.INTER_LINEAR)

minDisparity = 0
numDisparities = 192
SADWindowSize = 3
P1 = 8 * 3 * SADWindowSize**2
# P1 = 10
P2 = 32 * 3 * SADWindowSize**2
# P2 = 200
disp12MaxDiff = 10
preFilterCap = 0
uniquenessRatio = 1
speckleWindowSize = 100
speckleRange = 10

gt_original = tfl.imread(
    '/Users/zhouying/Desktop/三维重建/DATASET/train/d3/k3/left_depth_map.tiff')
gt3 = gt_original[:, :, 2]
gt_rectified = cv.remap(gt3, left_map1, left_map2, cv.INTER_LINEAR)
gt_norm = cv.normalize(gt_rectified[0:, numDisparities:],
                       gt_rectified[0:, numDisparities:],
                       alpha=0,
                       beta=255,
                       norm_type=cv.NORM_MINMAX,
                       dtype=cv.CV_8U)
cv.imwrite("/Users/zhouying/Desktop/gt_norm.png", gt_norm)

imgL = cv.cvtColor(I1_rectified, cv.COLOR_BGR2GRAY)
imgR = cv.cvtColor(I2_rectified, cv.COLOR_BGR2GRAY)

cv.imwrite("/Users/zhouying/Desktop/I1_rectified.png", I1_rectified)
cv.imwrite("/Users/zhouying/Desktop/I2_rectified.png", I2_rectified)
Esempio n. 46
0
def run01():
    p = "/projects/project-broaddus/denoise_experiments/cele/e01/nlm2_3d/"
    for name in sorted(glob(p + '*.tif')):
        print(name)
        img = imread(name)
        save(img[22], name.replace("nlm2_3d/", "nlm2_3d_s22/"))
Esempio n. 47
0
cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
    "", ["white", "lime"])  #lime color makes cells pop

pth = "/jukebox/wang/zahra/h129_qc/thal_transformed_points"

brains_to_inspect = ["20180410_jg51_bl6_lob6b_04", "20180417_jg59_bl6_cri_03"]

brain_pths = [
    os.path.join(os.path.join(pth, xx), "transformed_volume/merged.tif")
    for xx in brains_to_inspect
]

i = 1
name = os.path.basename(os.path.dirname(os.path.dirname(brain_pths[i])))
brain = tifffile.imread(brain_pths[i])
img = brain[200:204, :, :, 1]
cell = brain[200:204, :, :, 0]

#apply x y dilation
r = 2
selem = ball(r)[int(r / 2)]
cell = cell.astype("uint8")
cell = np.asarray(
    [cv2.dilate(cell[i], selem, iterations=1) for i in range(cell.shape[0])])

merged = np.stack([
    np.max(img, axis=0),
    np.max(cell, axis=0),
    np.zeros_like(np.max(cell, axis=0))
], -1)
    os.makedirs(weights_path)
weights_path += '/unet_weights.hdf5'

trainIds = [str(i).zfill(2)
            for i in range(1, 25)]  # all availiable ids: from "01" to "24"

if __name__ == '__main__':
    X_DICT_TRAIN = dict()
    Y_DICT_TRAIN = dict()
    X_DICT_VALIDATION = dict()
    Y_DICT_VALIDATION = dict()

    print('Reading images')
    for img_id in trainIds:
        img_m = normalize(
            tiff.imread('./data/mband/{}.tif'.format(img_id)).transpose(
                [1, 2, 0]))
        mask = tiff.imread('./data/gt_mband/{}.tif'.format(img_id)).transpose(
            [1, 2, 0]) / 255
        train_xsz = int(
            3 / 4 *
            img_m.shape[0])  # use 75% of image as train and 25% for validation
        X_DICT_TRAIN[img_id] = img_m[:train_xsz, :, :]
        Y_DICT_TRAIN[img_id] = mask[:train_xsz, :, :]
        X_DICT_VALIDATION[img_id] = img_m[train_xsz:, :, :]
        Y_DICT_VALIDATION[img_id] = mask[train_xsz:, :, :]
        print(img_id + ' read')
    print('Images were read')

    def train_net():
        print("start train net")
        x_train, y_train = get_patches(X_DICT_TRAIN,
Esempio n. 49
0
    def dehaze(self, image, slide=0, level=5, threshold=10, substitute=1):
        '''
        Method to dehaze image out of imagestack (Tiff-File) by using Wavelet decomposition
        :param image: Tiff-File
        :param slide: int, index of slide of choice of image stack
        :param level: int, number of level of deconposition
        :param threshold: float, threshold of original image(!) which unmasks area on dehazed image
        :param substitute: float, substitute for thresholding
        :return: 2d array, return dehazed image with WaveletDehaze.return_image_dehazed()
        '''

        self.level = level
        # read image
        img_raw_ = tf.imread(image, key=slide)
        img_raw = WaveletDehaze.__make_arr_quadratic(img_raw_)
        self.original_image = img_raw_

        # pre-process input image
        if (0, 0) == (np.shape(img_raw)[0] % 2, np.shape(img_raw)[1] % 2):
            img = img_raw
        elif (0, 1) == (np.shape(img_raw)[0] % 2, np.shape(img_raw)[1] % 2):
            img = img_raw[:, :-1]
        elif (1, 0) == (np.shape(img_raw)[0] % 2, np.shape(img_raw)[1] % 2):
            img = img_raw[:-1, :]
        elif (1, 1) == (np.shape(img_raw)[0] % 2, np.shape(img_raw)[1] % 2):
            img = img_raw[:-1, :-1]
        else:
            raise ValueError('Error404: This should not happen!')

        # mean_w(f(t)) = mean_w(img)
        coeffs_raw = pywt.wavedec2(img,
                                   self.wavelet,
                                   self.mode,
                                   level=self.level)
        low_freq = coeffs_raw[0]
        coeffs_low = [low_freq]
        for x in range(self.level):
            coeffs_low.append((None, None, None))
        mean_w_img = pywt.waverec2(coeffs_low, self.wavelet, self.mode)

        # calculate difference d(t)
        difference = np.subtract(
            img, WaveletDehaze.__make_array_fitting(mean_w_img, img))

        # mean_w(d(t))^2
        coeffs_d_raw = pywt.wavedec2(difference,
                                     self.wavelet,
                                     self.mode,
                                     level=self.level)
        low_freq_d = coeffs_d_raw[0]
        coeffs_d_low = [low_freq_d]
        for x in range(self.level):
            coeffs_d_low.append((None, None, None))
        mean_w_d = pywt.waverec2(coeffs_d_low, self.wavelet, self.mode)

        # calculate a(t)
        a_t = np.sqrt(np.multiply(2, np.square(mean_w_d)))

        # calculate img_rec = e(t)
        img_rec = np.add(difference,
                         WaveletDehaze.__make_array_fitting(a_t, difference))

        # unmask reconstructed image
        self.dehazed_image = WaveletDehaze.__unmask(self.original_image,
                                                    img_rec,
                                                    threshold=threshold,
                                                    substitute=substitute)
Work with Tensorflow 1.x
In google colab, add this as your fitst line.
%tensorflow_version 1.x
"""

import numpy as np
from matplotlib import pyplot as plt
from patchify import patchify
import tifffile as tiff

#All 165 images
#large_image_stack = tiff.imread('full_dataset/images/mitochondria_train_01.tif')
#large_mask_stack = tiff.imread('full_dataset/masks/mitochondria_train_masks_01.tif')

#12 images only
large_image_stack = tiff.imread('small_dataset_for_training/images/12_training_mito_images.tif')
large_mask_stack = tiff.imread('small_dataset_for_training/masks/12_training_mito_masks.tif')

print(large_image_stack.shape)

all_img_patches = []
for img in range(large_image_stack.shape[0]):
    #print(img)     #just stop here to see all file names printed
     
    large_image = large_image_stack[img]
    
    patches_img = patchify(large_image, (256, 256), step=256)  #Step=256 for 256 patches means no overlap
    

    for i in range(patches_img.shape[0]):
        for j in range(patches_img.shape[1]):
Esempio n. 51
0
                    json_data[sliced_tif_filename] = {}
                    if WRITE_TIF == 1:
                        tifffile.imsave(
                            planet_dir + 'Planet_Data_Sliced/tif/' +
                            sliced_tif_filename, temp_for_tiff)
                        print('TIF file saved succesfully. Filename: ',
                              sliced_tif_filename)

                    #------------------------ saving jpeg from saved tif data -----------------------

                    # following is a redundant thing to do
                    # and could be done directly from gdal data1, data2, data3, data4 instead as done in the code block above
                    # but I am doing this to maintain integrity and ensure tiffs are well written

                    raw_im_data = tifffile.imread(planet_dir +
                                                  'Planet_Data_Sliced/tif/' +
                                                  sliced_tif_filename)
                    # print (raw_im_data.shape, max(raw_im_data.flatten())) (4, xBlockSize, yBlockSize), 65536 ==> 65536 = 16 bit

                    raw_rgb_im = raw_im_data[0:3, :, :]
                    raw_rgb_im = raw_rgb_im.transpose(
                        [1, 2, 0]
                    )  # to transform 3, xBlockSize, yBlockSize to xBlockSize, yBlockSize, 3
                    enhanced_im2 = np.uint8(enhance_img(
                        raw_rgb_im /
                        256))  # to transform from (65536)16bit to (256)8 bit

                    if SHOW_IMG == 1:
                        tifffile.imshow(enhanced_im2)

                    image = Image.fromarray(enhanced_im2)
def stitch_plane(
        path_list: List[Path],
        page: int,
        x_nblocks: int,
        y_nblocks: int,
        block_shape: list,
        dtype,
        overlap: int,
        padding: dict,
        remap_dict: dict = None) -> Tuple[Image, Union[np.ndarray, None]]:

    x_axis = -1
    y_axis = -2

    block_x_size = block_shape[x_axis] - overlap * 2
    block_y_size = block_shape[y_axis] - overlap * 2

    big_image_x_size = (x_nblocks *
                        block_x_size) - padding["left"] - padding["right"]
    big_image_y_size = (y_nblocks *
                        block_y_size) - padding["top"] - padding["bottom"]

    big_image_shape = (big_image_y_size, big_image_x_size)
    big_image = np.zeros(big_image_shape, dtype=dtype)

    previous_tile_max = 0
    tile_additions = np.zeros((y_nblocks, x_nblocks), dtype=dtype)
    print('n blocks x,y:', (x_nblocks, y_nblocks))
    print('plane shape x,y:', big_image_shape[::-1])
    n = 0
    for i in range(0, y_nblocks):
        ver_f = i * block_y_size
        ver_t = ver_f + block_y_size

        for j in range(0, x_nblocks):
            hor_f = j * block_x_size
            hor_t = hor_f + block_x_size

            big_image_slice, block_slice = get_slices(big_image, hor_f, hor_t,
                                                      ver_f, ver_t, padding,
                                                      overlap)
            block = tif.imread(path_to_str(path_list[n]),
                               key=page).astype(dtype)

            if remap_dict is not None:
                block[np.nonzero(block)] += previous_tile_max

            big_image[tuple(big_image_slice)] = block[tuple(block_slice)]

            if remap_dict is not None:
                tile_additions[i, j] = previous_tile_max

                # update previous tile max
                non_zero_selection = block[np.nonzero(block)]
                if len(non_zero_selection) > 0:
                    previous_tile_max = non_zero_selection.max()

            n += 1
    if remap_dict is None:
        tile_additions = None
    return big_image, tile_additions
import os
import h5py
import numpy as np
import tifffile as tf
import stia.motion_correction as mc
import stia.utility.image_analysis as ia

zstack_fn = 'FOV2_projection_site_zstack_red.tif'

curr_folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(curr_folder)

zstack = tf.imread(zstack_fn)

step_offsets = [[0., 0.]]  # offsets between adjacent steps

print('calculating step offsets ...')
for step_i in range(1, zstack.shape[0]):
    curr_offset = mc.phase_correlation(zstack[step_i], zstack[step_i - 1])
    step_offsets.append(curr_offset)
step_offsets = np.array([np.array(so) for so in step_offsets],
                        dtype=np.float32)
print('\nsetp offsets:')
print(step_offsets)

print('\ncalculating final offsets ...')
final_offsets_y = np.cumsum(step_offsets[:, 0])
final_offsets_x = np.cumsum(step_offsets[:, 1])
final_offsets = np.array([final_offsets_x, final_offsets_y],
                         dtype=np.float32).transpose()
print('\nfinal offsets:')
Esempio n. 54
0
def main():
	args = parser.parse_args()
	mask_dim = args.crop_dim
	
	device = torch.device("cuda") if torch.cuda.is_available() else \
						 torch.device("cpu") # Setting device

	model_path = os.path.join(args.data_dir, 'saved_models', args.model)
	# Load the pretrained model
	saved_data = torch.load(model_path)
	if args.ternaus:
		model = TernausNetV2(num_classes = num_classes).to(device)
	else:
		model = unet.UNet(args.num_channels, num_classes).to(device)
	model.load_state_dict(saved_data["model_state_dict"])
	done_epochs = saved_data["epochs"]
	best_metric = saved_data["best_metric"]

	predicted_labels = []
	predicted_labels_images = {}
	true_labels = []

	if not os.path.exists(args.out_dir):
		os.mkdir(args.out_dir)

	for file in os.listdir(args.test_data):
		if not file.endswith('.tif'):
			continue
		print("Processing file: {}".format(file))
		
		base_image = tiff.imread(os.path.join(args.test_data, file))
		base_image = base_image.astype(float)
		num_channels = base_image.shape[2]
		for i in range(num_channels):
			base_image[:,:,i] = (base_image[:,:,i]-mean[i])/std[i]

		margin = mask_dim // 10
		if args.crop_end:
			base_image = np.moveaxis(np.array([np.pad(base_image[:,:,channel],\
			 ((margin,margin),(margin,margin)), 'reflect') \
					for channel in range(num_channels)]), 0, 2)

		
		pred_image = return_pred_image(args, base_image, model, device)
		predicted_labels_images[file] = pred_image

		pred_image = np.argmax(pred_image, 2)

		color_image = np.zeros([base_image.shape[0], base_image.shape[1], 3])
		for ix,iy in np.ndindex(pred_image.shape):
			color_image[ix, iy, :] = map_dict[pred_image[ix, iy]]
		im = Image.fromarray(np.uint8(color_image))
		
		im.save(os.path.join(args.out_dir, file))
		
		if args.label_data is not None:
			file_name = file.split('.')[0]
			label_path = os.path.join(args.label_data, '{}.npy'.\
								format(file_name))
			predicted_labels.extend(list(pred_image.reshape([-1])))
			flat_true_label = np.expand_dims(np.load(label_path),
								axis=2).reshape(-1)
			true_labels.extend(list(flat_true_label))
	
	
	if args.label_data is not None:
		predicted_labels = np.array(predicted_labels)
		true_labels = np.array(true_labels)
		print("Accuracy on these images : {}".format(
			sklearn.metrics.accuracy_score(predicted_labels, true_labels)))
		print("Cohen kappa score on these images : {}".format(
			sklearn.metrics.cohen_kappa_score(predicted_labels, true_labels)))			
		print("Confusion matrix on these images : {}".format(
			sklearn.metrics.confusion_matrix(true_labels, predicted_labels)))
		print("Precision recall class F1 : {}".format(
			sklearn.metrics.precision_recall_fscore_support(true_labels, 
												predicted_labels)))

	if args.pkl_dir is not None:
		with open(os.path.join(args.pkl_dir, args.model+'.pkl'),'wb') as f:
			pkl.dump(predicted_labels_images, f)
Esempio n. 55
0
label = []
data_test = []
label_test = []

# путь до изображений
PATH_IMG = '../tif/tif_train_percentile'
# путь до масок. Указать путь до масок машин или до масок зданий
PATH_MASK = '../masks/masks_build_train'
# не забыть поменять внизу путь до выходного файла

tif_list = os.listdir(PATH_IMG)

if __name__ == "__main__":
    count = 0
    for count, tif_name in enumerate(tif_list):
        img = tiff.imread(os.path.join(PATH_IMG, tif_name))
        mask = tiff.imread(os.path.join(PATH_MASK, tif_name))
        # .transpose([1, 2, 0])

        img_norm = img - img.mean()
        img_norm /= img_norm.std()

        im_size_x, im_size_y = img_norm.shape

        # с каждой картинки в рандомных местах вырезаем 50 кропов размером 256х256
        for i in range(50):
            # выбираем рандомно точку на изображении, чтобы от этой точки
            # вниз и в право можно было вырезать кроп разером 256х256
            x, y = np.random.randint(0, im_size_x - 257), np.random.randint(
                0, im_size_y - 257)
            im_crop = img_norm[x:x + 256, y:y + 256]
Esempio n. 56
0
 def arr_gen():
     for fp in in_filepath_list:
         yield tifffile.imread(fp)
Esempio n. 57
0
def train2(batch_size=100, lr=0.0002, max_epoches=50):

    x = tf.placeholder(tf.float32, shape=[None, FLAGS.hidden_size * 2])
    y = tf.placeholder(tf.float32, shape=[None, 1])

    w1 = tf.Variable(tf.truncated_normal(
        shape=[FLAGS.mid_hidden_size * 2, 200], stddev=0.5),
                     name="weight1")
    b1 = tf.Variable(tf.zeros(shape=[200]), name="bias1")
    layer1 = tf.nn.relu(tf.matmul(x, w1) + b1)

    w2 = tf.Variable(tf.truncated_normal(shape=[200, 1], stddev=0.5),
                     name="weight2")
    b2 = tf.Variable(tf.zeros(shape=[1]), name="bias2")
    layer2 = tf.matmul(layer1, w2) + b2

    pred = tf.sigmoid(layer2)

    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred))
    loss = tf.reduce_sum(
        tf.nn.weighted_cross_entropy_with_logits(targets=y,
                                                 logits=layer2,
                                                 pos_weight=59))
    tf.summary.scalar("loss", loss)
    global_step = tf.Variable(0, name="global_step", trainable=False)
    optimizer = tf.train.GradientDescentOptimizer(lr).minimize(
        loss, global_step=global_step)

    correct = (np.abs(y - pred) < 0.05)
    accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
    tf.summary.scalar("accuracy", accuracy)
    summary_op = tf.summary.merge_all()
    init_op = tf.global_variables_initializer()
    saver = tf.train.Saver()

    sv = tf.train.Supervisor(logdir=FLAGS.save_path,
                             init_op=init_op,
                             summary_op=summary_op,
                             saver=saver,
                             save_model_secs=600,
                             global_step=global_step)
    config_proto = tf.ConfigProto(allow_soft_placement=True,
                                  log_device_placement=False)

    FILE_2015 = '/home/zyt/data/TianChi/20171105_quarterfinals/quarterfinals_2015.tif'
    FILE_2017 = '/home/zyt/data/TianChi/20171105_quarterfinals/quarterfinals_2017.tif'
    FILE_label = '/home/zyt/data/TianChi/label/label1110.tif'

    im_2015 = np.array(tiff.imread(FILE_2015).transpose([1, 2, 0]),
                       dtype=np.float32)
    im_2017 = np.array(tiff.imread(FILE_2017).transpose([1, 2, 0]),
                       dtype=np.float32)
    label = np.array(tiff.imread(FILE_label), dtype=np.float32)

    with sv.managed_session(config=config_proto) as sess:
        for epoch in range(max_epoches):
            total_loss = 0
            total_accuracy = 0
            for step, (batch_x, batch_y) in enumerate(
                    produce_images_by_dae.whole_pic_iterator2(
                        im_2015, im_2017, label)):
                _, l, accu = sess.run([optimizer, loss, accuracy],
                                      feed_dict={
                                          x: batch_x,
                                          y: batch_y
                                      })

                total_loss += l
                avg_loss = total_loss / (step + 1)
                total_accuracy += accu
                avg_accu = total_accuracy / (step + 1)
                if step % 50 == 0:
                    print(
                        "Epoch %d: at step %d, average loss is %f, accuracy is %f."
                        % (epoch, step, avg_loss, avg_accu))
        if FLAGS.save_path:
            print("Saving model to %s." % FLAGS.save_path)
            sv.saver.save(sess, FLAGS.save_path, global_step=sv.global_step)
            print("Save successfully!")

        # Export tensorflow serving
        sess.graph._unsafe_unfinalize()
        export_path = os.path.join(
            tf.compat.as_bytes(FLAGS.model_path),
            tf.compat.as_bytes(str(FLAGS.model_version)))
        builder = saved_model_builder.SavedModelBuilder(export_path)
        prediction_inputs = {
            'input': tf.saved_model.utils.build_tensor_info(x)
        }
        prediction_outputs = {
            'output': tf.saved_model.utils.build_tensor_info(pred)
        }
        prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(
            inputs=prediction_inputs,
            outputs=prediction_outputs,
            method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
        builder.add_meta_graph_and_variables(
            sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                'predict_signature': prediction_signature,
            })
        sess.graph.finalize()
        builder.save()
        print("Done export!")
def transform_to_TFOR(fpath_seg,
                      fpaths_lm,
                      n_points=3000,
                      verbose=False,
                      show=False):
    """Transform a prim's cellular point clouds to the corresponding tissue
    frame of reference (TFOR), i.e. the primordium frame of reference (PFOR).

    For downstream processing, cellular point clouds must be brought to a
    common frame of reference. In the case of the lateral line primordium, one
    such frame of reference is that of the tissue as a whole.

    This function determines how a given primordium would have to be trans-
    formed to align it with the image axes. By doing this, different prims are
    automatically registered (without a "registration proper"). The function
    then transforms the given prim's cellular point cloud accordingly, thus
    bringing them into a common "tissue frame of reference" with any other cell
    point cloud that has been transformed in the same way.

    The transformation is detected by performing a PCA, which will detect the
    major axes of the primordium. PCs are flipped to align positively with the
    corresponding image axis and sorted to be in the same order as the image
    dimensions. Cell point clouds are then transformed by the same PCA.

    The origin of the frame of reference is left untouched in y and z (after
    the PCA it will be in the center of the tissue) and is set to the very tip
    of the segmentation mask in x (registration to the tip of the prim).

    In addition to saving the transformed landmarks, the function writes the
    transformed centroids, the PCA object itself and the origin and transformed
    origin to the stack metadata.

    WARNING: The approach used here has been developed for the Zebrafish
    posterior lateral line primordium. It is likely not readily applicable to
    other tissues!

    Parameters
    ----------
    fpath_seg : string
        The path (either local from cwd or global) to a tif file containing
        the single-cell segmentation stack. The total masked volume (all cells)
        is used for determining the frame of reference.
    fpaths_lm : list or string
        A path or list of paths (either local from cwd or global) to npy files
        containing cellular landmarks as generated by
        `katachi.tools.assign_landmarks`.
    n_points : int, optional, Default 3000
        The number of landmarks extracted from the overall segmentation mask.
        These landmarks are used to determine the transform.
    verbose : bool, optional, default False
        If True, more information is printed.
    show : bool, optional, default False
        If True, 3D plots of the PCA transform and the origin are produced.
    """

    #--------------------------------------------------------------------------

    ### Load data

    if verbose: print "Loading data..."

    # Try loading the segmentation stack
    try:
        img_seg = imread(fpath_seg)
    except:
        print "Attempting to load segmentation stack failed with this error:"
        raise

    # Check dimensionality
    if not img_seg.ndim == 3:
        raise IOError("Expected a 3D segmentation stack, got " +
                      str(img_seg.ndim) + "D instead.")

    # Try loading the landmark data
    if type(fpaths_lm) == str:
        fpaths_lm = [fpaths_lm]
    lm_data = {}
    for fpath_lm in fpaths_lm:
        try:
            lm_data[fpath_lm] = np.load(fpath_lm)
        except:
            print "Attempting to load landmark data failed with this error:"
            raise

    # Try loading centroid data and resolution information
    try:
        dirpath, fname = os.path.split(fpath_seg)
        fpath_meta = os.path.join(dirpath, fname[:10] + "_stack_metadata.pkl")
        with open(fpath_meta, 'rb') as metafile:
            meta_dict = pickle.load(metafile)
        centroids = meta_dict["centroids"]
        res = meta_dict["resolution"]
    except:
        print "Getting centroid positions / resolution failed with this error:"
        raise

    #--------------------------------------------------------------------------

    ### Create point cloud of tissue mask using ISLA

    if verbose: print "Creating tissue point cloud..."

    # Run ISLA
    cloud = isla(img_seg > 0, n_points, seed=42)

    # Change from pixels to um
    cloud = cloud * np.array(res)

    #--------------------------------------------------------------------------

    ### Find and transform to a common frame of reference
    #   Note: Here, this is done by finding the major axes of the point cloud
    #         by PCA and then transforming them so they are aligned with the
    #         axes of the microscopy image. In this way, all prims will end up
    #         in the same FOR without the need for a proper registration.

    if verbose: print "Finding common frame of reference..."

    # Fit PCA model to data
    pca = PCA()
    pca.fit(cloud)

    # Ensure that the sign of PCs is consistent with the image orientation
    # Note: Given that the images are always acquired in the same orientations,
    #       a matching orientation can be ensured by finding the highest
    #       contributing image axis for each PC, and invert the PC if that
    #       contribution is negative. In other words, ensuring for each PC that
    #       it is positively correlated with the corresponding image axis.

    # Find highest contributions of image axes to each PC
    # Note: This asks "which image axis contributes the most to each PC?"
    max_weights = np.argmax(np.abs(pca.components_), axis=1)

    # Get the signs of the highest contributions
    signs = np.sign(pca.components_[np.arange(pca.components_.shape[0]),
                                    max_weights])

    # Using the signs, flip those PCs where the sign is negative
    pca.components_ = pca.components_ * signs[:, np.newaxis]

    # Match the order of PCs to the order of image dimensions (zyx)
    # Note: Following the transform, the PCs will be sorted according to
    #       explained variance. Instead, they should be sorted in order of the
    #       highest contributing image dimension.

    # Find indices for zyx-sorting of transformed data
    # Note: This asks "which PC is most contributed to by each image axis?"
    zyx_sort = np.argmax(np.abs(pca.components_), axis=0)

    # If the image is not a proper prim where (with extents x>y>z),
    # this sort may fail terminally; if so, abort!
    if not np.all(np.sort(zyx_sort) == np.array([0, 1, 2])):
        raise Exception("Primordium mask extents are not x>y>z in " +
                        fpath_seg)

    # Transform cloud to PFOR with matching signs and sort to zyx order
    cloud_tf = pca.transform(cloud)[:, zyx_sort]

    # Get PCs and explained variance for visualization
    # Note: np.copy may not be necessary but I want to be careful about the
    #       mutability of pca's attributes...
    PCs = np.copy(pca.components_.T)
    PCvars = np.copy(pca.explained_variance_ratio_)

    # Print results
    if verbose:
        print '  PCs:'
        print '   ', str(PCs).replace('\n', '\n    ')
        print '  Explained variance:'
        print '   ', str(PCvars)

    # Plot the outcome
    if show:

        # Prepare PC axis dots
        extents = [np.min(cloud_tf, axis=0), np.max(cloud_tf, axis=0)]
        pcax_tf = np.zeros((300, 3))
        for d in range(3):
            pcax_tf[d * 100:(d + 1) * 100, d] = np.linspace(extents[0][d],
                                                            extents[1][d],
                                                            num=100)
        pcax = pca.inverse_transform(pcax_tf[:, np.argsort(zyx_sort)])

        # Plot stuff (before transform)
        fig, ax = point_cloud_3D(cloud[:, 2],
                                 cloud[:, 1],
                                 cloud[:, 0],
                                 fin=False,
                                 title="Before Transform",
                                 xlbl='x',
                                 ylbl='y',
                                 zlbl='z')
        point_cloud_3D(pcax[:, 2],
                       pcax[:, 1],
                       pcax[:, 0],
                       c='r',
                       init=False,
                       pre_fig=fig,
                       pre_ax=ax)

        # After transform
        fig, ax = point_cloud_3D(cloud_tf[:, 2],
                                 cloud_tf[:, 1],
                                 cloud_tf[:, 0],
                                 fin=False,
                                 title="After Transform",
                                 xlbl='x',
                                 ylbl='y',
                                 zlbl='z')
        point_cloud_3D(pcax_tf[:, 2],
                       pcax_tf[:, 1],
                       pcax_tf[:, 0],
                       c='r',
                       init=False,
                       pre_fig=fig,
                       pre_ax=ax)

    #--------------------------------------------------------------------------

    ### Find the proper origin
    # Note: In transformed space, y and z are already nicely centered so they
    #       can be used for the origin directly (i.e. one can just use 0). For
    #       x, the tip position is used as it is the most straightforwardly
    #       extractable common reference point. Note that the tip position is
    #       measured from the segmentation, not from the point cloud.

    # Initialize the origin point
    origin_tf = np.zeros(3)

    # Find tip (in real space), transform it, add x component to origin
    ind = np.where(img_seg > 0)  # Indices of the mask
    tip_x = np.max(ind[2])  # Index of greatest x (tip)
    tip_point = np.array((ind[0][tip_x], ind[1][tip_x], tip_x))  # Tip in 3D
    tip_point = tip_point * np.array(res)  # In microns
    transf_tip_point = pca.transform(tip_point.reshape((1, 3)))[0,
                                                                zyx_sort]  # tf
    origin_tf[2] = transf_tip_point[2]  # Assign x component as origin

    # Inverse transform to get origin in imaging space
    # (including the x and y components, which are 0 in PFOR)
    origin = pca.inverse_transform(origin_tf[np.argsort(zyx_sort)])

    # Show the origin
    if show:
        fig, ax = point_cloud_3D(origin[2],
                                 origin[1],
                                 origin[0],
                                 c='r',
                                 s=200,
                                 fin=False,
                                 config=False)
        point_cloud_3D(cloud[:, 2],
                       cloud[:, 1],
                       cloud[:, 0],
                       init=False,
                       pre_fig=fig,
                       pre_ax=ax,
                       title="Origin Point",
                       xlbl='x',
                       ylbl='y',
                       zlbl='z')

    #--------------------------------------------------------------------------

    ### Transform data so origin lies at (0,0,0)

    # Transformation
    cloud_tf = cloud_tf - origin_tf

    #--------------------------------------------------------------------------

    ### Transform the cell point clouds to the PFOR

    if verbose: print "Transforming cells to common frame of reference..."

    # First, also transform the centroids to the PFOR
    centroids_tf = pca.transform(centroids)[:, zyx_sort] - origin_tf

    # For each landmark dataset...
    lm_data_tf = {}
    for key in lm_data.keys():

        # For each cell...
        cell_tf = np.zeros_like(lm_data[key])
        for i in range(cell_tf.shape[0]):

            # Translate the point cloud to the position in the image FOR
            cell_cloud = lm_data[key][i, :, :] + centroids[i]

            # Transform the cell cloud to PFOR
            cell_cloud = pca.transform(cell_cloud)[:, zyx_sort] - origin_tf

            # Set the cloud's origin to the PFOR centroid
            cell_tf[i, :, :] = cell_cloud - centroids_tf[i]

        # Add to the new data dict
        lm_data_tf[key] = cell_tf

    #--------------------------------------------------------------------------

    ### Write results

    if verbose: print "Saving results..."

    # Write the transformed landmark point clouds
    for key in lm_data_tf.keys():
        np.save(key[:-4] + "_TFOR", lm_data_tf[key])

    # Write origins, the PCA object and the transformed centroids to metadata
    meta_dict["origin"] = origin
    meta_dict["originTFOR"] = origin_tf
    meta_dict["TFOR_PCA"] = pca
    meta_dict["centroids_TFOR"] = centroids_tf
    with open(fpath_meta, 'wb') as metafile:
        pickle.dump(meta_dict, metafile, pickle.HIGHEST_PROTOCOL)

    # Report and return
    if verbose: print "Processing complete!"
    return
Esempio n. 59
0
# cmd line inputs
# Remember sys.argv[0] is the name of the python script
bgID = str(sys.argv[1])

folder = '/xfel/ffs/dat/ue_191123_FXS/reduced'

#==============================================================================
# Main program
#==============================================================================

# Summing all tiffs
tiffList = glob.glob1(folder + '/r' + str(bgID).zfill(3), '*.tif')

for j, nm in enumerate(tiffList):
    fpath = os.path.join(folder, 'r' + str(bgID).zfill(3), nm)
    img = tifffile.imread(fpath)
    if j:
        total += img
    else:
        total = img

# Summing all shots
csvNm = 'run' + str(bgID).zfill(3) + '_on_s' + str(1).zfill(3) + '.csv'
fpath = os.path.join(folder, 'r' + str(bgID).zfill(3), csvNm)
dfon = pd.read_csv(fpath, index_col=0)
onshots = np.nansum(dfon['shots'])

csvNm = 'run' + str(bgID).zfill(3) + '_off_s' + str(1).zfill(3) + '.csv'
fpath = os.path.join(folder, 'r' + str(bgID).zfill(3), csvNm)
dfoff = pd.read_csv(fpath, index_col=0)
offshots = np.nansum(dfoff['shots'])
Esempio n. 60
0
def load_tif_perframe(fname, fid):
    return imread(fname, key=fid)