Exemplo n.º 1
0
def process_image(input_img_path, output_img_path, projection, method):

    # Grab a free process
    with ProcessManager.process():

        # initalize biowriter and bioreader
        with BioReader(input_img_path, max_workers=ProcessManager._active_threads) as br, \
            BioWriter(output_img_path, metadata=br.metadata, max_workers=ProcessManager._active_threads) as bw:

            # output image is 2d
            bw.Z = 1

            # iterate along the x,y direction
            for x in range(0, br.X, tile_size):
                x_max = min([br.X, x + tile_size])

                for y in range(0, br.Y, tile_size):
                    y_max = min([br.Y, y + tile_size])

                    ProcessManager.submit_thread(projection,
                                                 br,
                                                 bw, (x, x_max), (y, y_max),
                                                 method=method)

            ProcessManager.join_threads()
Exemplo n.º 2
0
 def write_slide(self):
     
     with ProcessManager.process(f'{self.base_path} - {self.output_depth}'):
         
         ProcessManager.submit_thread(self._write_slide)
         
         ProcessManager.join_threads()
Exemplo n.º 3
0
def image_to_zarr(inp_image: Path, out_dir: Path) -> None:

    with ProcessManager.process():

        with BioReader(inp_image) as br:

            # Loop through timepoints
            for t in range(br.T):

                # Loop through channels
                for c in range(br.C):

                    extension = "".join([
                        suffix for suffix in inp_image.suffixes[-2:]
                        if len(suffix) < 5
                    ])

                    out_path = out_dir.joinpath(
                        inp_image.name.replace(extension, FILE_EXT))
                    if br.C > 1:
                        out_path = out_dir.joinpath(
                            out_path.name.replace(FILE_EXT,
                                                  f"_c{c}" + FILE_EXT))
                    if br.T > 1:
                        out_path = out_dir.joinpath(
                            out_path.name.replace(FILE_EXT,
                                                  f"_t{t}" + FILE_EXT))

                    with BioWriter(
                            out_path,
                            max_workers=ProcessManager._active_threads,
                            metadata=br.metadata,
                    ) as bw:

                        bw.C = 1
                        bw.T = 1
                        bw.channel_names = [br.channel_names[c]]

                        # Loop through z-slices
                        for z in range(br.Z):

                            # Loop across the length of the image
                            for y in range(0, br.Y, TILE_SIZE):
                                y_max = min([br.Y, y + TILE_SIZE])

                                bw.max_workers = ProcessManager._active_threads
                                br.max_workers = ProcessManager._active_threads

                                # Loop across the depth of the image
                                for x in range(0, br.X, TILE_SIZE):
                                    x_max = min([br.X, x + TILE_SIZE])

                                    bw[y:y_max, x:x_max, z:z + 1, 0,
                                       0] = br[y:y_max, x:x_max, z:z + 1, c, t]
Exemplo n.º 4
0
def assemble_image(vector_path: pathlib.Path, out_path: pathlib.Path,
                   depth: int) -> None:
    """Assemble a 2d or 3d image

    This method assembles one image from one stitching vector. It can 
    assemble both 2d and z-stacked 3d images It is intended to run as 
    a process to parallelize stitching of multiple images.

    The basic approach to stitching is:
    1. Parse the stitching vector and abstract the image dimensions
    2. Generate a thread for each subsection (supertile) of an image.

    Args:
        vector_path: Path to the stitching vector
        out_path: Path to the output directory
        depth: depth of the input images
    """

    # Grab a free process
    with ProcessManager.process():

        # Parse the stitching vector
        parsed_vector = _parse_stitch(vector_path, timesliceNaming)

        # Initialize the output image
        with BioReader(parsed_vector['filePos'][0]['file']) as br:
            bw = BioWriter(out_path.joinpath(parsed_vector['name']),
                           metadata=br.metadata,
                           max_workers=ProcessManager._active_threads)
            bw.x = parsed_vector['width']
            bw.y = parsed_vector['height']
            bw.z = depth

        # Assemble the images
        ProcessManager.log(f'Begin assembly')

        for z in range(depth):
            ProcessManager.log(f'Assembling Z position : {z}')
            for x in range(0, parsed_vector['width'], chunk_size):
                X_range = min(x + chunk_size, parsed_vector['width']
                              )  # max x-pixel index in the assembled image
                for y in range(0, parsed_vector['height'], chunk_size):
                    Y_range = min(y + chunk_size, parsed_vector['height']
                                  )  # max y-pixel index in the assembled image

                    ProcessManager.submit_thread(make_tile, x, X_range, y,
                                                 Y_range, z, parsed_vector, bw)

            ProcessManager.join_threads()

        bw.close()
Exemplo n.º 5
0
def unshade_batch(files: typing.List[Path],
                  out_dir: Path,
                  brightfield: Path,
                  darkfield: Path,
                  photobleach: typing.Optional[Path] = None):

    if photobleach != None:
        with open(photobleach, 'r') as f:
            reader = csv.reader(f)
            photo_offset = {
                line[0]: float(line[1])
                for line in reader if line[0] != 'file'
            }
        offset = np.mean([o for o in photo_offset.values()])
    else:
        offset = None

    with ProcessManager.process():

        with BioReader(brightfield, max_workers=2) as bf:
            brightfield_image = bf[:, :, :, 0, 0].squeeze()

        with BioReader(darkfield, max_workers=2) as df:
            darkfield_image = df[:, :, :, 0, 0].squeeze()

        threads = []

        for file in files:

            if photobleach != None:
                pb = photo_offset[file['file']]
            else:
                pb = None

            ProcessManager.submit_thread(unshade_image, file['file'], out_dir,
                                         brightfield_image, darkfield_image,
                                         pb, offset)

        ProcessManager.join_threads()
Exemplo n.º 6
0
def basic(files: typing.List[Path],
          out_dir: Path,
          metadata_dir: typing.Optional[Path] = None,
          darkfield: bool = False,
          photobleach: bool = False):

    # Try to infer a filename
    try:
        pattern = infer_pattern([f['file'].name for f in files])
        fp = FilePattern(files[0]['file'].parent,pattern)
        base_output = fp.output_name()
        
    # Fallback to the first filename
    except:
        base_output = files[0]['file'].name
        
    extension = ''.join(files[0]['file'].suffixes)

    with ProcessManager.process(base_output):

        # Load files and sort
        ProcessManager.log('Loading and sorting images...')
        img_stk,X,Y = _get_resized_image_stack(files)
        img_stk_sort = np.sort(img_stk)
        
        # Initialize options
        new_options = _initialize_options(img_stk_sort,darkfield,OPTIONS)

        # Initialize flatfield/darkfield matrices
        ProcessManager.log('Beginning flatfield estimation')
        flatfield_old = np.ones((new_options['size'],new_options['size']),dtype=np.float64)
        darkfield_old = np.random.normal(size=(new_options['size'],new_options['size'])).astype(np.float64)
        
        # Optimize until the change in values is below tolerance or a maximum number of iterations is reached
        for w in range(new_options['max_reweight_iterations']):
            # Optimize using inexact augmented Legrangian multiplier method using L1 loss
            A, E1, A_offset = _inexact_alm_l1(copy.deepcopy(img_stk_sort),new_options)

            # Calculate the flatfield/darkfield images and update training weights
            flatfield, darkfield, new_options = _get_flatfield_and_reweight(A,E1,A_offset,new_options)

            # Calculate the change in flatfield and darkfield images between iterations
            mad_flat = np.sum(np.abs(flatfield-flatfield_old))/np.sum(np.abs(flatfield_old))
            temp_diff = np.sum(np.abs(darkfield - darkfield_old))
            if temp_diff < 10**-7:
                mad_dark =0
            else:
                mad_dark = temp_diff/np.max(np.sum(np.abs(darkfield_old)),initial=10**-6)
            flatfield_old = flatfield
            darkfield_old = darkfield

            # Stop optimizing if the change in flatfield/darkfield is below threshold
            ProcessManager.log('Iteration {} loss: {}'.format(w+1,mad_flat))
            if np.max(mad_flat,initial=mad_dark) < new_options['reweight_tol']:
                break

        # Calculate photobleaching effects if specified
        if photobleach:
            pb = _get_photobleach(copy.deepcopy(img_stk),flatfield,darkfield)

        # Resize images back to original image size
        ProcessManager.log('Saving outputs...')
        flatfield = cv2.resize(flatfield,(Y,X),interpolation=cv2.INTER_CUBIC).astype(np.float32)
        if new_options['darkfield']:
            darkfield = cv2.resize(darkfield,(Y,X),interpolation=cv2.INTER_CUBIC).astype(np.float32)
        
        # Export the flatfield image as a tiled tiff
        flatfield_out = base_output.replace(extension,'_flatfield' + extension)
        
        with BioReader(files[0]['file'],max_workers=2) as br:
            metadata = br.metadata
        
        with BioWriter(out_dir.joinpath(flatfield_out),metadata=metadata,max_workers=2) as bw:
            bw.dtype = np.float32
            bw.x = X
            bw.y = Y
            bw[:] = np.reshape(flatfield,(Y,X,1,1,1))
        
        # Export the darkfield image as a tiled tiff
        if new_options['darkfield']:
            darkfield_out = base_output.replace(extension,'_darkfield' + extension)
            with BioWriter(out_dir.joinpath(darkfield_out),metadata=metadata,max_workers=2) as bw:
                bw.dtype = np.float32
                bw.x = X
                bw.y = Y
                bw[:] = np.reshape(darkfield,(Y,X,1,1,1))
            
        # Export the photobleaching components as csv
        if photobleach:
            offsets_out = base_output.replace(extension,'_offsets.csv')
            with open(metadata_dir.joinpath(offsets_out),'w') as fw:
                fw.write('file,offset\n')
                for f,o in zip(files,pb[0,:].tolist()):
                    fw.write("{},{}\n".format(f,o))
Exemplo n.º 7
0
def extract_fovs(file_path: Path,
                 out_path: Path):
    """ Extract individual FOVs from a czi file

    When CZI files are loaded by BioFormats, it will generally try to mosaic
    images together by stage position if the image was captured with the
    intention of mosaicing images together. At the time this function was
    written, there was no clear way of extracting individual FOVs so this
    algorithm was created.
    
    Every field of view in each z-slice, channel, and timepoint contained in a
    CZI file is saved as an individual image.

    Args:
        file_path (Path): Path to CZI file
        out_path (Path): Path to output directory
    """
    
    with ProcessManager.process(file_path.name):
        
        logger.info('Starting extraction from ' + str(file_path) + '...')

        if Path(file_path).suffix != '.czi':
            TypeError("Path must be to a czi file.")
            
        base_name = Path(file_path.name).stem
        
        # Load files without mosaicing
        czi = czifile.CziFile(file_path,detectmosaic=False)
        subblocks = [s for s in czi.filtered_subblock_directory if s.mosaic_index is not None]
        
        ind = {'X': [],
               'Y': [],
               'Z': [],
               'C': [],
               'T': [],
               'Row': [],
               'Col': []}
        
        # Get the indices of each FOV
        for s in subblocks:
            scene = [dim.start for dim in s.dimension_entries if dim.dimension=='S']
            if scene is not None and scene[0] != 0:
                continue
            
            for dim in s.dimension_entries:
                if dim.dimension=='X':
                    ind['X'].append(dim.start)
                elif dim.dimension=='Y':
                    ind['Y'].append(dim.start)
                elif dim.dimension=='Z':
                    ind['Z'].append(dim.start)
                elif dim.dimension=='C':
                    ind['C'].append(dim.start)
                elif dim.dimension=='T':
                    ind['T'].append(dim.start)
                    
        row_conv = {y:row for (y,row) in zip(np.unique(np.sort(ind['Y'])),range(0,len(np.unique(ind['Y']))))}
        col_conv = {x:col for (x,col) in zip(np.unique(np.sort(ind['X'])),range(0,len(np.unique(ind['X']))))}
        
        ind['Row'] = [row_conv[y] for y in ind['Y']]
        ind['Col'] = [col_conv[x] for x in ind['X']]
        
        with BioReader(file_path) as br:
            
            metadata = br.metadata
            chan_names = br.cnames
        
        for s,i in zip(subblocks,range(0,len(subblocks))):
            
            Z = None if len(ind['Z'])==0 else ind['Z'][i]
            C = None if len(ind['C'])==0 else ind['C'][i]
            T = None if len(ind['T'])==0 else ind['T'][i]
        
            out_file_path = out_path.joinpath(_get_image_name(base_name,
                                                              row=ind['Row'][i],
                                                              col=ind['Col'][i],
                                                              Z=Z,
                                                              C=C,
                                                              T=T))
            
            dims = [_get_image_dim(s,'Y'),
                    _get_image_dim(s,'X'),
                    _get_image_dim(s,'Z'),
                    _get_image_dim(s,'C'),
                    _get_image_dim(s,'T')]
            
            data = s.data_segment().data().reshape(dims)
            
            write_thread(out_file_path,
                         data,
                         metadata,
                         chan_names[C])
Exemplo n.º 8
0
def _merge_layers(input_files,output_path):
    
    with ProcessManager.process(output_path.name):

        # Get the number of layers to stack
        z_size = 0
        for f in input_files:
            with BioReader(f['file']) as br:
                z_size += br.z
                
        # Get some basic info about the files to stack
        with BioReader(input_files[0]['file']) as br:

            # Get the physical z-distance if available, set to physical x if not
            ps_z = br.ps_z
            
            # If the z-distances are undefined, average the x and y together
            if None in ps_z:
                # Get the size and units for x and y
                x_val,x_units = br.ps_x
                y_val,y_units = br.ps_y
                
                # Convert x and y values to the same units and average
                z_val = (x_val*UNITS[x_units] + y_val*UNITS[y_units])/2
                
                # Set z units to the smaller of the units between x and y
                z_units = x_units if UNITS[x_units] < UNITS[y_units] else y_units
                
                # Convert z to the proper unit scale
                z_val /= UNITS[z_units]
                ps_z = (z_val,z_units)
                ProcessManager.log('Could not find physical z-size. Using the average of x & y {}.'.format(ps_z))

            # Hold a reference to the metadata once the file gets closed
            metadata = br.metadata

        # Create the output file within a context manager
        with BioWriter(output_path,metadata=metadata,max_workers=ProcessManager._active_threads) as bw:

            # Adjust the dimensions before writing
            bw.z = z_size
            bw.ps_z = ps_z

            # ZIndex tracking for the output file
            zi = 0

            # Start stacking
            for file in input_files:

                # Open an image
                with BioReader(file['file'],max_workers=ProcessManager._active_threads) as br:

                    # Open z-layers one at a time
                    for z in range(br.z):

                        # Use tiled reading in x&y to conserve memory
                        # At most, [chunk_size, chunk_size] pixels are loaded
                        for xs in range(0,br.x,chunk_size):
                            xe = min([br.x,xs + chunk_size])

                            for ys in range(0,br.y,chunk_size):
                                ye = min([br.y,ys + chunk_size])

                                bw[ys:ye,xs:xe,zi:zi+1,...] = br[ys:ye,xs:xe,z:z+1,...]

                        zi += 1

                # update the BioWriter in case the ProcessManager found more threads
                bw.max_workers = ProcessManager._active_threads