예제 #1
0
def write_thread(out_file_path: Path,
                 data: np.ndarray,
                 metadata: OmeXml,
                 chan_name: str):
    """ Thread for saving images

    This function is intended to be run inside a threadpool to save an image.

    Args:
        out_file_path (Path): Path to an output file
        data (np.ndarray): FOV to save
        metadata (OmeXml): Metadata for the image
        chan_name (str): Name of the channel
    """
        
    ProcessManager.log(f'Writing: {out_file_path.name}')
    
    with BioWriter(out_file_path,metadata=metadata) as bw:
        
        bw.X = data.shape[1]
        bw.Y = data.shape[0]
        bw.Z = 1
        bw.C = 1
        bw.cnames = [chan_name]
        
        bw[:] = data
예제 #2
0
def assemble_image(vector_path: pathlib.Path, out_path: pathlib.Path,
                   depth: int) -> None:
    """Assemble a 2d or 3d image

    This method assembles one image from one stitching vector. It can 
    assemble both 2d and z-stacked 3d images It is intended to run as 
    a process to parallelize stitching of multiple images.

    The basic approach to stitching is:
    1. Parse the stitching vector and abstract the image dimensions
    2. Generate a thread for each subsection (supertile) of an image.

    Args:
        vector_path: Path to the stitching vector
        out_path: Path to the output directory
        depth: depth of the input images
    """

    # Grab a free process
    with ProcessManager.process():

        # Parse the stitching vector
        parsed_vector = _parse_stitch(vector_path, timesliceNaming)

        # Initialize the output image
        with BioReader(parsed_vector['filePos'][0]['file']) as br:
            bw = BioWriter(out_path.joinpath(parsed_vector['name']),
                           metadata=br.metadata,
                           max_workers=ProcessManager._active_threads)
            bw.x = parsed_vector['width']
            bw.y = parsed_vector['height']
            bw.z = depth

        # Assemble the images
        ProcessManager.log(f'Begin assembly')

        for z in range(depth):
            ProcessManager.log(f'Assembling Z position : {z}')
            for x in range(0, parsed_vector['width'], chunk_size):
                X_range = min(x + chunk_size, parsed_vector['width']
                              )  # max x-pixel index in the assembled image
                for y in range(0, parsed_vector['height'], chunk_size):
                    Y_range = min(y + chunk_size, parsed_vector['height']
                                  )  # max y-pixel index in the assembled image

                    ProcessManager.submit_thread(make_tile, x, X_range, y,
                                                 Y_range, z, parsed_vector, bw)

            ProcessManager.join_threads()

        bw.close()
예제 #3
0
def label_cython(input_path: Path, output_path: Path, connectivity: int):
    """ Label the input image and writes labels back out.

    Args:
        input_path: Path to input image.
        output_path: Path for output image.
        connectivity: Connectivity kind.
    """
    with ProcessManager.thread() as active_threads:
        with BioReader(
                input_path,
                max_workers=active_threads.count,
        ) as reader:

            with BioWriter(
                    output_path,
                    max_workers=active_threads.count,
                    metadata=reader.metadata,
            ) as writer:
                # Load an image and convert to binary
                image = numpy.squeeze(reader[..., 0, 0])

                if not numpy.any(image):
                    writer.dtype = numpy.uint8
                    writer[:] = numpy.zeros_like(image, dtype=numpy.uint8)
                    return

                image = (image > 0)
                if connectivity > image.ndim:
                    ProcessManager.log(
                        f'{input_path.name}: Connectivity is not less than or equal to the number of image dimensions, '
                        f'skipping this image. connectivity={connectivity}, ndim={image.ndim}'
                    )
                    return

                # Run the labeling algorithm
                labels = ftl.label_nd(image, connectivity)

                # Save the image
                writer.dtype = labels.dtype
                writer[:] = labels
    return True
예제 #4
0
def label_thread(input_path, output_path, connectivity):

    with ProcessManager.thread() as active_threads:
        with bfio.BioReader(input_path,
                            max_workers=active_threads.count) as br:
            with bfio.BioWriter(output_path,
                                max_workers=active_threads.count,
                                metadata=br.metadata) as bw:

                # Load an image and convert to binary
                image = (br[..., 0, 0] > 0).squeeze()

                if connectivity > image.ndim:
                    ProcessManager.log(
                        "{}: Connectivity is not less than or equal to the number of image dimensions, skipping this image. connectivity={}, ndim={}"
                        .format(input_path.name, connectivity, image.ndim))
                    return

                # Run the labeling algorithm
                labels = ftl.label_nd(image.squeeze(), connectivity)

                # Save the image
                bw.dtype = labels.dtype
                bw[:] = labels
예제 #5
0
def _parse_stitch(stitchPath: pathlib.Path,
                  timepointName: bool = False) -> dict:
    """ Load and parse image stitching vectors

    This function parses the data from a stitching vector, then extracts the
    relevant image sizes for each image in the stitching vector to obtain a
    stitched image size. This function also infers an output file name.

    Args:
        stitchPath: A path to stitching vectors
        timepointName: Use the vector timeslice as the image name
    Returns:
        Dictionary with keys (width, height, name, filePos)
    """

    # Initialize the output
    out_dict = { 'width': int(0),
                 'height': int(0),
                 'name': '',
                 'filePos': []}

    # Try to parse the stitching vector using the infered file pattern
    if fp.pattern != '.*':
        vp = filepattern.VectorPattern(stitchPath,fp.pattern)
        unique_vals = {k.upper():v for k,v in vp.uniques.items() if len(v)==1}
        files = fp.get_matching(**unique_vals)

    else:

        # Try to infer a pattern from the stitching vector
        try:
            vector_files = filepattern.VectorPattern(stitchPath,'.*')
            pattern = filepattern.infer_pattern([v[0]['file'] for v in vector_files()])
            vp = filepattern.VectorPattern(stitchPath,pattern)

        # Fall back to universal filepattern
        except ValueError:
            vp = filepattern.VectorPattern(stitchPath,'.*')

        files = fp.files

    file_names = [f['file'].name for f in files]

    for file in vp():

        if file[0]['file'] not in file_names:
            continue

        stitch_groups = {k:get_number(v) for k,v in file[0].items()}
        stitch_groups['file'] = files[0]['file'].with_name(stitch_groups['file'])

        # Get the image size
        stitch_groups['width'], stitch_groups['height'] = BioReader.image_size(stitch_groups['file'])

        # Set the stitching vector values in the file dictionary
        out_dict['filePos'].append(stitch_groups)

    # Calculate the output image dimensions
    out_dict['width'] = max([f['width'] + f['posX'] for f in out_dict['filePos']])
    out_dict['height'] = max([f['height'] + f['posY'] for f in out_dict['filePos']])

    # Generate the output file name
    if timepointName:
        global_regex = ".*global-positions-([0-9]+).txt"
        name = re.match(global_regex,pathlib.Path(stitchPath).name).groups()[0]
        name += '.ome.tif'
        out_dict['name'] = name
        ProcessManager.job_name(out_dict['name'])
        ProcessManager.log(f'Setting output name to timepoint slice number.')
    else:
        # Try to infer a good filename
        try:
            out_dict['name'] = vp.output_name()
            ProcessManager.job_name(out_dict['name'])
            ProcessManager.log(f'Inferred output file name from vector.')

        # A file name couldn't be inferred, default to the first image name
        except:
            ProcessManager.job_name(out_dict['name'])
            ProcessManager.log(f'Could not infer output file name from vector, using first file name in the stitching vector as an output file name.')
            for file in vp():
                out_dict['name'] = file[0]['file']
                break

    return out_dict
예제 #6
0
def basic(files: typing.List[Path],
          out_dir: Path,
          metadata_dir: typing.Optional[Path] = None,
          darkfield: bool = False,
          photobleach: bool = False):

    # Try to infer a filename
    try:
        pattern = infer_pattern([f['file'].name for f in files])
        fp = FilePattern(files[0]['file'].parent,pattern)
        base_output = fp.output_name()
        
    # Fallback to the first filename
    except:
        base_output = files[0]['file'].name
        
    extension = ''.join(files[0]['file'].suffixes)

    with ProcessManager.process(base_output):

        # Load files and sort
        ProcessManager.log('Loading and sorting images...')
        img_stk,X,Y = _get_resized_image_stack(files)
        img_stk_sort = np.sort(img_stk)
        
        # Initialize options
        new_options = _initialize_options(img_stk_sort,darkfield,OPTIONS)

        # Initialize flatfield/darkfield matrices
        ProcessManager.log('Beginning flatfield estimation')
        flatfield_old = np.ones((new_options['size'],new_options['size']),dtype=np.float64)
        darkfield_old = np.random.normal(size=(new_options['size'],new_options['size'])).astype(np.float64)
        
        # Optimize until the change in values is below tolerance or a maximum number of iterations is reached
        for w in range(new_options['max_reweight_iterations']):
            # Optimize using inexact augmented Legrangian multiplier method using L1 loss
            A, E1, A_offset = _inexact_alm_l1(copy.deepcopy(img_stk_sort),new_options)

            # Calculate the flatfield/darkfield images and update training weights
            flatfield, darkfield, new_options = _get_flatfield_and_reweight(A,E1,A_offset,new_options)

            # Calculate the change in flatfield and darkfield images between iterations
            mad_flat = np.sum(np.abs(flatfield-flatfield_old))/np.sum(np.abs(flatfield_old))
            temp_diff = np.sum(np.abs(darkfield - darkfield_old))
            if temp_diff < 10**-7:
                mad_dark =0
            else:
                mad_dark = temp_diff/np.max(np.sum(np.abs(darkfield_old)),initial=10**-6)
            flatfield_old = flatfield
            darkfield_old = darkfield

            # Stop optimizing if the change in flatfield/darkfield is below threshold
            ProcessManager.log('Iteration {} loss: {}'.format(w+1,mad_flat))
            if np.max(mad_flat,initial=mad_dark) < new_options['reweight_tol']:
                break

        # Calculate photobleaching effects if specified
        if photobleach:
            pb = _get_photobleach(copy.deepcopy(img_stk),flatfield,darkfield)

        # Resize images back to original image size
        ProcessManager.log('Saving outputs...')
        flatfield = cv2.resize(flatfield,(Y,X),interpolation=cv2.INTER_CUBIC).astype(np.float32)
        if new_options['darkfield']:
            darkfield = cv2.resize(darkfield,(Y,X),interpolation=cv2.INTER_CUBIC).astype(np.float32)
        
        # Export the flatfield image as a tiled tiff
        flatfield_out = base_output.replace(extension,'_flatfield' + extension)
        
        with BioReader(files[0]['file'],max_workers=2) as br:
            metadata = br.metadata
        
        with BioWriter(out_dir.joinpath(flatfield_out),metadata=metadata,max_workers=2) as bw:
            bw.dtype = np.float32
            bw.x = X
            bw.y = Y
            bw[:] = np.reshape(flatfield,(Y,X,1,1,1))
        
        # Export the darkfield image as a tiled tiff
        if new_options['darkfield']:
            darkfield_out = base_output.replace(extension,'_darkfield' + extension)
            with BioWriter(out_dir.joinpath(darkfield_out),metadata=metadata,max_workers=2) as bw:
                bw.dtype = np.float32
                bw.x = X
                bw.y = Y
                bw[:] = np.reshape(darkfield,(Y,X,1,1,1))
            
        # Export the photobleaching components as csv
        if photobleach:
            offsets_out = base_output.replace(extension,'_offsets.csv')
            with open(metadata_dir.joinpath(offsets_out),'w') as fw:
                fw.write('file,offset\n')
                for f,o in zip(files,pb[0,:].tolist()):
                    fw.write("{},{}\n".format(f,o))
예제 #7
0
def _merge_layers(input_files,output_path):
    
    with ProcessManager.process(output_path.name):

        # Get the number of layers to stack
        z_size = 0
        for f in input_files:
            with BioReader(f['file']) as br:
                z_size += br.z
                
        # Get some basic info about the files to stack
        with BioReader(input_files[0]['file']) as br:

            # Get the physical z-distance if available, set to physical x if not
            ps_z = br.ps_z
            
            # If the z-distances are undefined, average the x and y together
            if None in ps_z:
                # Get the size and units for x and y
                x_val,x_units = br.ps_x
                y_val,y_units = br.ps_y
                
                # Convert x and y values to the same units and average
                z_val = (x_val*UNITS[x_units] + y_val*UNITS[y_units])/2
                
                # Set z units to the smaller of the units between x and y
                z_units = x_units if UNITS[x_units] < UNITS[y_units] else y_units
                
                # Convert z to the proper unit scale
                z_val /= UNITS[z_units]
                ps_z = (z_val,z_units)
                ProcessManager.log('Could not find physical z-size. Using the average of x & y {}.'.format(ps_z))

            # Hold a reference to the metadata once the file gets closed
            metadata = br.metadata

        # Create the output file within a context manager
        with BioWriter(output_path,metadata=metadata,max_workers=ProcessManager._active_threads) as bw:

            # Adjust the dimensions before writing
            bw.z = z_size
            bw.ps_z = ps_z

            # ZIndex tracking for the output file
            zi = 0

            # Start stacking
            for file in input_files:

                # Open an image
                with BioReader(file['file'],max_workers=ProcessManager._active_threads) as br:

                    # Open z-layers one at a time
                    for z in range(br.z):

                        # Use tiled reading in x&y to conserve memory
                        # At most, [chunk_size, chunk_size] pixels are loaded
                        for xs in range(0,br.x,chunk_size):
                            xe = min([br.x,xs + chunk_size])

                            for ys in range(0,br.y,chunk_size):
                                ye = min([br.y,ys + chunk_size])

                                bw[ys:ye,xs:xe,zi:zi+1,...] = br[ys:ye,xs:xe,z:z+1,...]

                        zi += 1

                # update the BioWriter in case the ProcessManager found more threads
                bw.max_workers = ProcessManager._active_threads