示例#1
0
def _get_resized_image_stack(flist):
    """ Load all images in a list and resize to OPTIONS['size']

    When files are parsed, the variables are used in an index to provide
    a method to reference a specific file name by its dimensions. This
    function returns the variable index based on the input filename pattern.

    Inputs:ed th
        flist - Paths of list of images to load and resize
    Outputs:
        img_stack - A 3D stack of 2D images
        X - width of image
        Y - height of image
    """

    #Initialize the output
    br = BioReader(str(flist[0]))
    X = br.num_x()
    Y = br.num_y()
    C = len(flist)
    img_stack = np.zeros((OPTIONS['size'], OPTIONS['size'], C),
                         dtype=np.float32)

    # Load every image as a z-slice
    for ind, fname in zip(range(len(flist)), flist):
        br = BioReader(str(fname))
        I = np.squeeze(br.read_image())
        img_stack[:, :, ind] = cv2.resize(
            I, (OPTIONS['size'], OPTIONS['size']),
            interpolation=cv2.INTER_LINEAR).astype(np.float32)
    return img_stack, X, Y
示例#2
0
def process_image(input_img_path, output_img_path, projection, method):

    # Grab a free process
    with ProcessManager.process():

        # initalize biowriter and bioreader
        with BioReader(input_img_path, max_workers=ProcessManager._active_threads) as br, \
            BioWriter(output_img_path, metadata=br.metadata, max_workers=ProcessManager._active_threads) as bw:

            # output image is 2d
            bw.Z = 1

            # iterate along the x,y direction
            for x in range(0, br.X, tile_size):
                x_max = min([br.X, x + tile_size])

                for y in range(0, br.Y, tile_size):
                    y_max = min([br.Y, y + tile_size])

                    ProcessManager.submit_thread(projection,
                                                 br,
                                                 bw, (x, x_max), (y, y_max),
                                                 method=method)

            ProcessManager.join_threads()
示例#3
0
    def buffer_image(self, image_path, Xi, Yi, Xt, Yt, color=False):
        """buffer_image Load and image and store in buffer

        This method loads an image and stores it in the appropriate
        position based on the stitching vector coordinates within
        a large tile of the output image. It is intended to be
        used as a thread to increase the reading component to
        assembling the image.
        
        Args:
            image_path ([str]): Path to image to load
            Xi ([list]): Xmin and Xmax of pixels to load from the image
            Yi ([list]): Ymin and Ymax of pixels to load from the image
            Xt ([list]): X position within the buffer to store the image
            Yt ([list]): Y position within the buffer to store the image
        """

        # Load the image
        br = BioReader(image_path, max_workers=2)
        image = br.read_image(X=Xi, Y=Yi)  # only get the first z,c,t layer

        # Put the image in the buffer
        if color != None:
            image_temp = (
                255 * (image[..., 0, 0].astype(np.float32) - self.bounds[0]) /
                (self.bounds[1] - self.bounds[0]))
            image_temp[image_temp > 255] = 255
            image_temp[image_temp < 0] = 0
            image_temp = image_temp.astype(np.uint8)
            self._image[Yt[0]:Yt[1], Xt[0]:Xt[1], ...] = 0
            self._image[Yt[0]:Yt[1], Xt[0]:Xt[1], self.color] = image_temp
        else:
            self._image[Yt[0]:Yt[1], Xt[0]:Xt[1], ...] = image[:, :, :, 0, 0]
示例#4
0
def main(
    input_dir: Path,
    ball_radius: int,
    light_background: bool,
    output_dir: Path,
) -> None:
    """ Main execution function

    Args:
        input_dir: path to directory containing the input images.
        ball_radius: radius of ball to use for the rolling-ball algorithm.
        light_background: whether the image has a light or dark background.
        output_dir: path to directory where to store the output images.
    """

    for in_path in input_dir.iterdir():
        in_path = Path(in_path)
        out_path = Path(output_dir).joinpath(in_path.name)

        # Load the input image
        with BioReader(in_path) as reader:
            logger.info(f'Working on {in_path.name} with shape {reader.shape}')

            # Initialize the output image
            with BioWriter(out_path,
                           metadata=reader.metadata,
                           max_workers=cpu_count()) as writer:
                rolling_ball(
                    reader=reader,
                    writer=writer,
                    ball_radius=ball_radius,
                    light_background=light_background,
                )
    return
示例#5
0
def validate_and_copy(
    file: Path,
    outDir: Path,
) -> None:

    # Enter context manager to verify the file is a tiled tiff
    with BioReader(file['file'], backend='python') as br:

        shutil.copy2(file['file'], outDir.joinpath(file['file'].name))
示例#6
0
def image_to_zarr(inp_image: Path, out_dir: Path) -> None:

    with ProcessManager.process():

        with BioReader(inp_image) as br:

            # Loop through timepoints
            for t in range(br.T):

                # Loop through channels
                for c in range(br.C):

                    extension = "".join([
                        suffix for suffix in inp_image.suffixes[-2:]
                        if len(suffix) < 5
                    ])

                    out_path = out_dir.joinpath(
                        inp_image.name.replace(extension, FILE_EXT))
                    if br.C > 1:
                        out_path = out_dir.joinpath(
                            out_path.name.replace(FILE_EXT,
                                                  f"_c{c}" + FILE_EXT))
                    if br.T > 1:
                        out_path = out_dir.joinpath(
                            out_path.name.replace(FILE_EXT,
                                                  f"_t{t}" + FILE_EXT))

                    with BioWriter(
                            out_path,
                            max_workers=ProcessManager._active_threads,
                            metadata=br.metadata,
                    ) as bw:

                        bw.C = 1
                        bw.T = 1
                        bw.channel_names = [br.channel_names[c]]

                        # Loop through z-slices
                        for z in range(br.Z):

                            # Loop across the length of the image
                            for y in range(0, br.Y, TILE_SIZE):
                                y_max = min([br.Y, y + TILE_SIZE])

                                bw.max_workers = ProcessManager._active_threads
                                br.max_workers = ProcessManager._active_threads

                                # Loop across the depth of the image
                                for x in range(0, br.X, TILE_SIZE):
                                    x_max = min([br.X, x + TILE_SIZE])

                                    bw[y:y_max, x:x_max, z:z + 1, 0,
                                       0] = br[y:y_max, x:x_max, z:z + 1, c, t]
示例#7
0
def main(
        input_dir: Path,
        file_pattern: str,
        output_dir: Path,
):
    fp = filepattern.FilePattern(input_dir, file_pattern)
    files = [Path(file[0]['file']).resolve() for file in fp]
    files = list(filter(
        lambda file_path: file_path.name.endswith('.ome.tif') or file_path.name.endswith('.ome.zarr'),
        files
    ))

    executor = (ThreadPoolExecutor if utils.USE_GPU else ProcessPoolExecutor)(utils.NUM_THREADS)
    processes: List[Future[bool]] = list()

    for in_file in files:
        with BioReader(in_file) as reader:
            x_shape, y_shape, z_shape = reader.X, reader.Y, reader.Z
            metadata = reader.metadata

        ndims = 2 if z_shape == 1 else 3

        out_file = output_dir.joinpath(utils.replace_extension(in_file, extension='_flow.ome.zarr'))
        init_zarr_file(out_file, ndims, metadata)

        tile_count = 0
        for z in range(0, z_shape, utils.TILE_SIZE):
            z = None if ndims == 2 else z

            for y in range(0, y_shape, utils.TILE_SIZE):
                for x in range(0, x_shape, utils.TILE_SIZE):
                    coordinates = x, y, z
                    device = (tile_count % utils.NUM_THREADS) if utils.USE_GPU else None
                    tile_count += 1

                    # flow_thread(in_file, out_file, coordinates, device)

                    processes.append(executor.submit(
                        flow_thread,
                        in_file,
                        out_file,
                        coordinates,
                        device,
                    ))

    done, not_done = wait(processes, 0)
    while len(not_done) > 0:
        logger.info(f'Percent complete: {100 * len(done) / len(processes):6.3f}%')
        for r in done:
            r.result()
        done, not_done = wait(processes, 5)
    executor.shutdown()

    return
示例#8
0
def _merge_layers(input_dir, input_files, output_dir, output_file):
    zs = [z for z in input_files.keys()]  # sorted list of filenames by z-value
    zs.sort()

    # Initialize the output file
    br = BioReader(
        str(Path(input_dir).joinpath(input_files[zs[0]][0]).absolute()))
    bw = BioWriter(str(Path(output_dir).joinpath(output_file).absolute()),
                   metadata=br.read_metadata())
    bw.num_z(Z=len(zs))
    del br

    # Load each image and save to the volume file
    for z, i in zip(zs, range(len(zs))):
        br = BioReader(
            str(Path(input_dir).joinpath(input_files[z][0]).absolute()))
        bw.write_image(br.read_image(), Z=[i, i + 1])
        del br

    # Close the output image and delete
    bw.close_image()
    del bw
示例#9
0
        "-Dlog4j.configuration=file:{}".format(str(log_config.absolute()))
    ],
                   class_path=bioformats.JARS)

    # Make the output directory
    image = Path(input_dir).joinpath(image)
    if pyramid_type == "Neuroglancer":
        out_dir = Path(output_dir).joinpath(image.name)
    elif pyramid_type == "DeepZoom":
        out_dir = Path(output_dir).joinpath('{}_files'.format(image_num))
    out_dir.mkdir()
    out_dir = str(out_dir.absolute())

    # Create the BioReader object
    logger.info('Getting the BioReader...')
    bf = BioReader(str(image.absolute()))

    # Create the output path and info file
    if pyramid_type == "Neuroglancer":
        file_info = utils.neuroglancer_info_file(bf, out_dir)
    elif pyramid_type == "DeepZoom":
        file_info = utils.dzi_file(bf, out_dir, image_num)
    else:
        ValueError("pyramid_type must be Neuroglancer or DeepZoom")
    logger.info("data_type: {}".format(file_info['data_type']))
    logger.info("num_channels: {}".format(file_info['num_channels']))
    logger.info("number of scales: {}".format(len(file_info['scales'])))
    logger.info("type: {}".format(file_info['type']))

    # Create the classes needed to generate a precomputed slice
    logger.info("Creating encoder and file writer...")
 # Set up the number of threads for each task
 read_workers = max([cpu_count()//3,1])
 write_workers = max([cpu_count()-1,2])
 loop_workers = max([3*cpu_count()//4,2])
 
 # extract filenames from registration_string and similar_transformation_string
 registration_set=registration_string.split()
 similar_transformation_set=similar_transformation_string.split()
 
 filename_len=len(template)
     
 # seperate the filename of the moving image from the complete path
 moving_image_name=registration_set[1][-1*filename_len:]
 
 # read and downscale reference image
 br_ref = BioReader(registration_set[0],max_workers=write_workers)
 scale_factor=get_scale_factor(br_ref.num_y(),br_ref.num_x())
 logger.info('Scale factor: {}'.format(scale_factor))
 
 # intialize the scale factor and scale matrix(to be used to upscale the transformation matrices)
 if method == 'Projective':
     scale_matrix = np.array([[1,1,scale_factor],[1,1,scale_factor],[1/scale_factor,1/scale_factor,1]])
 else:
     scale_matrix = np.array([[1/scale_factor,1/scale_factor,1],[1/scale_factor,1/scale_factor,1]])
 
 logger.info('Reading and downscaling reference image: {}'.format(Path(registration_set[0]).name))
 reference_image_downscaled,max_val,min_val = get_scaled_down_images(br_ref,scale_factor,get_max=True)
 br_ref.max_workers = read_workers
 
 # read moving image
 logger.info('Reading and downscaling moving image: {}'.format(Path(registration_set[1]).name))
示例#11
0
def flow_thread(input_path: Path, zfile: Path, use_gpu: bool,
                dev: torch.device, x: int, y: int, z: int) -> bool:
    """ Converts labels to flows

    This function converts labels in each tile to vector field.

    Args:
        input_path(path): Path of input image collection
        zfile(path): Path where output zarr file will be saved
        x(int): Start index of the tile in x dimension of image
        y(int): Start index of the tile in y dimension of image
        z(int): Z slice of the  image

    """

    logging.basicConfig(
        format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("flow")
    logger.setLevel(logging.INFO)

    root = zarr.open(str(zfile))[0]

    with BioReader(input_path) as br:
        x_min = max([0, x - TILE_OVERLAP])
        x_max = min([br.X, x + TILE_SIZE + TILE_OVERLAP])
        y_min = max([0, y - TILE_OVERLAP])
        y_max = min([br.Y, y + TILE_SIZE + TILE_OVERLAP])

        # Normalize
        I = br[y_min:y_max, x_min:x_max, z:z + 1, 0, 0].squeeze()
        _, image = np.unique(I, return_inverse=True)
        image = image.reshape(y_max - y_min, x_max - x_min)

        flow = dynamics.masks_to_flows(image, use_gpu, dev)[0]

        logger.debug('Computed flows on slice %d tile(y,x) %d:%d %d:%d ', z, y,
                     y_max, x, x_max)
        flow_final = flow[:, :, :, np.newaxis,
                          np.newaxis].transpose(1, 2, 3, 0, 4)
        x_overlap = x - x_min
        x_min = x
        x_max = min([br.X, x + TILE_SIZE])
        y_overlap = y - y_min
        y_min = y
        y_max = min([br.Y, y + TILE_SIZE])

        root[0:1, 0:1, z:z + 1, y_min:y_max, x_min:x_max, ] = (
            I[y_overlap:y_max - y_min + y_overlap, x_overlap:x_max - x_min +
              x_overlap, np.newaxis, np.newaxis, np.newaxis] > 0).transpose(
                  4, 3, 2, 0, 1)
        root[0:1, 1:3, z:z + 1, y_min:y_max,
             x_min:x_max] = flow_final[y_overlap:y_max - y_min + y_overlap,
                                       x_overlap:x_max - x_min + x_overlap,
                                       ...].transpose(4, 3, 2, 0, 1)
        root[0:1, 3:4, z:z + 1, y_min:y_max,
             x_min:x_max, ] = I[y_overlap:y_max - y_min + y_overlap,
                                x_overlap:x_max - x_min + x_overlap,
                                np.newaxis, np.newaxis, np.newaxis].astype(
                                    np.float32).transpose(4, 3, 2, 0, 1)

    return True
示例#12
0
        for files in fp.iterate(group_by='c'):

            # Get the filenames in the channel order
            paths = []
            for c in channelOrder:
                for file in files:
                    if file['c'] == c:
                        paths.append(file)
                        break

            # make sure that files were found in the current loop
            if len(paths) == 0:
                continue

            # Initialize the output file
            br = BioReader(paths[0]['file'])
            file_name = filepattern.output_name(
                filePattern, paths,
                {c: paths[0][c]
                 for c in fp.variables if c != 'c'})
            logger.info('Writing: {}'.format(file_name))
            bw = BioWriter(str(Path(outDir).joinpath(file_name)),
                           metadata=br.read_metadata())
            del br

            # Modify the metadata to make sure channels are written correctly
            bw.num_c(len(paths))
            bw._metadata.image().Pixels.channel_count = bw.num_c()

            # Process the data in tiles
            threads = []
示例#13
0
        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("do_flat")
    logger.setLevel(logging.INFO)

    # Set up the FilePattern object
    images = FilePattern(inpDir, filepattern)
    ''' Start the javabridge '''
    logger.info("Starting the javabridge...")
    log_config = Path(__file__).parent.joinpath("log4j.properties")
    jutil.start_vm(args=[
        "-Dlog4j.configuration=file:{}".format(str(log_config.absolute()))
    ],
                   class_path=bioformats.JARS)
    ''' Load the flatfielding data '''
    logger.info("Loading the flatfield data...")
    flat_br = BioReader(brightfield)
    flat_image = np.squeeze(flat_br.read_image())
    del flat_br

    # Normalize the brightfield image if it isn't done already
    flat_image = flat_image.astype(np.float32)
    flat_image = np.divide(flat_image, np.mean(flat_image))

    # Load the darkfield and photobleach offsets if they are specified
    if darkfield != None:
        dark_br = BioReader(darkfield)
        dark_image = np.squeeze(dark_br.read_image())
        del dark_br
    else:
        dark_image = np.zeros(flat_image.shape, dtype=np.float32)
    if photobleach != None:
示例#14
0
def flow_thread(
        file_name: Path,
        zarr_path: Path,
        coordinates: Tuple[int, int, Optional[int]],
        device: Optional[int],
) -> bool:
    x, y, z = coordinates
    ndims = 2 if z is None else 3
    z = 0 if z is None else z

    # Load the data
    with BioReader(file_name) as reader:
        x_shape, y_shape, z_shape = reader.X, reader.Y, reader.Z

        x_min = max(0, x - utils.TILE_OVERLAP)
        x_max = min(x_shape, x + utils.TILE_SIZE + utils.TILE_OVERLAP)

        y_min = max(0, y - utils.TILE_OVERLAP)
        y_max = min(y_shape, y + utils.TILE_SIZE + utils.TILE_OVERLAP)

        z_min = max(0, z - utils.TILE_OVERLAP)
        z_max = min(z_shape, z + utils.TILE_SIZE + utils.TILE_OVERLAP)

        masks = numpy.squeeze(reader[y_min:y_max, x_min:x_max, z_min:z_max, 0, 0])
    
    masks = masks if ndims == 2 else numpy.transpose(masks, (2, 0, 1))
    masks_shape = masks.shape
    
    # Calculate index and offsets
    x_overlap = x - x_min
    x_min, x_max = x, min(x_shape, x + utils.TILE_SIZE)
    cx_min, cx_max = x_overlap, x_max - x_min + x_overlap

    y_overlap = y - y_min
    y_min, y_max = y, min(y_shape, y + utils.TILE_SIZE)
    cy_min, cy_max = y_overlap, y_max - y_min + y_overlap

    z_overlap = z - z_min
    z_min, z_max = z, min(z_shape, z + utils.TILE_SIZE)
    cz_min, cz_max = z_overlap, z_max - z_min + z_overlap
    
    # Save the mask before transforming
    if ndims == 2:
        masks_original = masks[numpy.newaxis, numpy.newaxis, numpy.newaxis, :, :]
    else:
        masks_original = masks[numpy.newaxis, numpy.newaxis, :, :]
    masks_original = masks_original[:, :, cz_min:cz_max, cy_min:cy_max, cx_min:cx_max]

    # noinspection PyTypeChecker
    zarr_root = zarr.open(str(zarr_path))[0]
    zarr_root[0:1, 0:1, z_min:z_max, y_min:y_max, x_min:x_max] = numpy.asarray(masks_original != 0, dtype=numpy.float32)
    zarr_root[0:1, ndims + 1:ndims + 2, z_min:z_max, y_min:y_max, x_min:x_max] = numpy.asarray(masks_original, dtype=numpy.float32)
    
    if not numpy.any(masks):
        logger.debug(f'Tile (x, y, z) = {x, y, z} in file {file_name.name} has no objects. Setting flows to zero...')
        flows = numpy.zeros((ndims, *masks.shape), dtype=numpy.float32)
    else:
        # Normalize
        labels, masks = numpy.unique(masks, return_inverse=True)
        if len(labels) == 1:
            logger.debug(f'Tile (x, y, z) = {x, y, z} in file {file_name.name} has only one object.')
            masks += 1

        masks = numpy.reshape(masks, newshape=masks_shape)
        flows = dynamics.masks_to_flows(masks, device=device)
        
        logger.debug(f'Computed flows on tile (x, y, z) = {x, y, z} in file {file_name.name}')

    # Zarr axes ordering should be (t, c, z, y, x). Add missing t, c, and z axes
    if ndims == 2:
        flows = flows[numpy.newaxis, :, numpy.newaxis, :, :]
    else:
        flows = flows[numpy.newaxis, :, :, :]

    flows = flows[:, :, cz_min:cz_max, cy_min:cy_max, cx_min:cx_max]
    
    zarr_root[0:1, 1:ndims + 1, z_min:z_max, y_min:y_max, x_min:x_max] = flows

    return True
示例#15
0
def extract_fovs(file_path: Path,
                 out_path: Path):
    """ Extract individual FOVs from a czi file

    When CZI files are loaded by BioFormats, it will generally try to mosaic
    images together by stage position if the image was captured with the
    intention of mosaicing images together. At the time this function was
    written, there was no clear way of extracting individual FOVs so this
    algorithm was created.
    
    Every field of view in each z-slice, channel, and timepoint contained in a
    CZI file is saved as an individual image.

    Args:
        file_path (Path): Path to CZI file
        out_path (Path): Path to output directory
    """
    
    with ProcessManager.process(file_path.name):
        
        logger.info('Starting extraction from ' + str(file_path) + '...')

        if Path(file_path).suffix != '.czi':
            TypeError("Path must be to a czi file.")
            
        base_name = Path(file_path.name).stem
        
        # Load files without mosaicing
        czi = czifile.CziFile(file_path,detectmosaic=False)
        subblocks = [s for s in czi.filtered_subblock_directory if s.mosaic_index is not None]
        
        ind = {'X': [],
               'Y': [],
               'Z': [],
               'C': [],
               'T': [],
               'Row': [],
               'Col': []}
        
        # Get the indices of each FOV
        for s in subblocks:
            scene = [dim.start for dim in s.dimension_entries if dim.dimension=='S']
            if scene is not None and scene[0] != 0:
                continue
            
            for dim in s.dimension_entries:
                if dim.dimension=='X':
                    ind['X'].append(dim.start)
                elif dim.dimension=='Y':
                    ind['Y'].append(dim.start)
                elif dim.dimension=='Z':
                    ind['Z'].append(dim.start)
                elif dim.dimension=='C':
                    ind['C'].append(dim.start)
                elif dim.dimension=='T':
                    ind['T'].append(dim.start)
                    
        row_conv = {y:row for (y,row) in zip(np.unique(np.sort(ind['Y'])),range(0,len(np.unique(ind['Y']))))}
        col_conv = {x:col for (x,col) in zip(np.unique(np.sort(ind['X'])),range(0,len(np.unique(ind['X']))))}
        
        ind['Row'] = [row_conv[y] for y in ind['Y']]
        ind['Col'] = [col_conv[x] for x in ind['X']]
        
        with BioReader(file_path) as br:
            
            metadata = br.metadata
            chan_names = br.cnames
        
        for s,i in zip(subblocks,range(0,len(subblocks))):
            
            Z = None if len(ind['Z'])==0 else ind['Z'][i]
            C = None if len(ind['C'])==0 else ind['C'][i]
            T = None if len(ind['T'])==0 else ind['T'][i]
        
            out_file_path = out_path.joinpath(_get_image_name(base_name,
                                                              row=ind['Row'][i],
                                                              col=ind['Col'][i],
                                                              Z=Z,
                                                              C=C,
                                                              T=T))
            
            dims = [_get_image_dim(s,'Y'),
                    _get_image_dim(s,'X'),
                    _get_image_dim(s,'Z'),
                    _get_image_dim(s,'C'),
                    _get_image_dim(s,'T')]
            
            data = s.data_segment().data().reshape(dims)
            
            write_thread(out_file_path,
                         data,
                         metadata,
                         chan_names[C])
示例#16
0
    files = list({% for inp,val in cookiecutter._inputs.items() if val.type=='collection' -%}
        {% if loop.first %}{{ inp }}{% endif %}
        {%- endfor -%}.iterdir())
    
    for file in files:
        
    {%- endif %}
    {#- Use bfio if requested #}
    {%- if cookiecutter.use_bfio == "True" %}
    {%- filter indent(level2,True) %}
    
    logger.info(f'Processing image: {file["file"]}')
    
    # Load the input image
    logger.debug(f'Initializing BioReader for {file["file"]}')
    with BioReader(file['file']) as br:
        
        input_extension = ''.join([s for s in file['file'].suffixes[-2:] if len(s) < 5])
        out_name = file['file'].name.replace(input_extension,POLUS_EXT)
        out_path = {{ cookiecutter._outputs.keys()|first }}.joinpath(out_name)
        
        # Initialize the output image
        logger.debug(f'Initializing BioReader for {out_path}')
        with BioWriter(out_path,metadata=br.metadata) as bw:
            
            # This is where the magic happens, replace this part with your method
            bw[:] = awesome_function(br[:])
    {%- endfilter %}
    {%- endif %}

if __name__=="__main__":
示例#17
0
def main(inpDir: Path, outDir: Path, filePattern: str = None) -> None:
    """ Turn labels into flow fields.

    Args:
        inpDir: Path to the input directory
        outDir: Path to the output directory
    """

    # Use a gpu if it's available
    use_gpu = torch.cuda.is_available()
    if use_gpu:
        dev = torch.device("cuda")
    else:
        dev = torch.device("cpu")
    logger.info(f'Running on: {dev}')

    # Determine the number of threads to run on
    num_threads = max([cpu_count() // 2, 1])
    logger.info(f'Number of threads: {num_threads}')

    # Get all file names in inpDir image collection based on input pattern
    if filePattern:
        fp = filepattern.FilePattern(inpDir, filePattern)
        inpDir_files = [file[0]['file'].name for file in fp()]
        logger.info('Processing %d labels based on filepattern  ' %
                    (len(inpDir_files)))
    else:
        inpDir_files = [f.name for f in Path(inpDir).iterdir() if f.is_file()]

    # Loop through files in inpDir image collection and process
    processes = []

    if use_gpu:
        executor = ThreadPoolExecutor(num_threads)
    else:
        executor = ProcessPoolExecutor(num_threads)

    for f in inpDir_files:
        br = BioReader(Path(inpDir).joinpath(f).absolute())
        out_file = Path(outDir).joinpath(
            f.replace('.ome', '_flow.ome').replace('.tif',
                                                   '.zarr')).absolute()
        bw = BioWriter(out_file, metadata=br.metadata)
        bw.C = 4
        bw.dtype = np.float32
        bw.channel_names = ['cell_probability', 'x', 'y', 'labels']

        bw._backend._init_writer()

        for z in range(br.Z):
            for x in range(0, br.X, TILE_SIZE):
                for y in range(0, br.Y, TILE_SIZE):
                    processes.append(
                        executor.submit(flow_thread,
                                        Path(inpDir).joinpath(f).absolute(),
                                        out_file, use_gpu, dev, x, y, z))
        bw.close()
        br.close()

    done, not_done = wait(processes, 0)

    logger.info(f'Percent complete: {100 * len(done) / len(processes):6.3f}%')

    while len(not_done) > 0:
        for r in done:
            r.result()
        done, not_done = wait(processes, 5)
        logger.info(
            f'Percent complete: {100 * len(done) / len(processes):6.3f}%')

    executor.shutdown()
示例#18
0
def main(
    _opName: str,
    _in1: Path,
    _sigma: str,
    _calibration: str,
    _out: Path,
) -> None:
    """Initialize ImageJ"""

    # Bioformats throws a debug message, disable the loci debugger to mute it
    def disable_loci_logs():
        DebugTools = scyjava.jimport("loci.common.DebugTools")
        DebugTools.setRootLevel("WARN")

    scyjava.when_jvm_starts(disable_loci_logs)

    # This is the version of ImageJ pre-downloaded into the docker container
    logger.info("Starting ImageJ...")
    ij = imagej.init("sc.fiji:fiji:2.1.1+net.imagej:imagej-legacy:0.37.4",
                     headless=True)
    # ij_converter.ij = ij
    logger.info("Loaded ImageJ version: {}".format(ij.getVersion()))
    """ Validate and organize the inputs """
    args = []
    argument_types = []
    arg_len = 0

    # Validate opName
    opName_values = [
        "DefaultTubeness",
    ]
    assert _opName in opName_values, "opName must be one of {}".format(
        opName_values)

    # Validate in1
    in1_types = {
        "DefaultTubeness": "RandomAccessibleInterval",
    }

    # Check that all inputs are specified
    if _in1 is None and _opName in list(in1_types.keys()):
        raise ValueError("{} must be defined to run {}.".format(
            "in1", _opName))
    elif _in1 != None:
        in1_type = in1_types[_opName]

        # switch to images folder if present
        if _in1.joinpath("images").is_dir():
            _in1 = _in1.joinpath("images").absolute()

        args.append([f for f in _in1.iterdir() if f.is_file()])
        arg_len = len(args[-1])
    else:
        argument_types.append(None)
        args.append([None])

    # Validate sigma
    sigma_types = {
        "DefaultTubeness": "double",
    }

    # Check that all inputs are specified
    if _sigma is None and _opName in list(sigma_types.keys()):
        raise ValueError("{} must be defined to run {}.".format(
            "sigma", _opName))
    else:
        sigma = None

    # Validate calibration
    calibration_types = {
        "DefaultTubeness": "double[]",
    }

    # Check that all inputs are specified
    if _calibration is None and _opName in list(calibration_types.keys()):
        raise ValueError("{} must be defined to run {}.".format(
            "calibration", _opName))
    else:
        calibration = None

    for i in range(len(args)):
        if len(args[i]) == 1:
            args[i] = args[i] * arg_len
    """ Set up the output """
    out_types = {
        "DefaultTubeness": "IterableInterval",
    }
    """ Run the plugin """
    try:
        for ind, (in1_path, ) in enumerate(zip(*args)):
            if in1_path != None:

                # Load the first plane of image in in1 collection
                logger.info("Processing image: {}".format(in1_path))
                in1_br = BioReader(in1_path)

                # Convert to appropriate numpy array
                in1 = ij_converter.to_java(ij,
                                           np.squeeze(in1_br[:, :, 0:1, 0, 0]),
                                           in1_type)
                metadata = in1_br.metadata
                fname = in1_path.name
                dtype = ij.py.dtype(in1)
            if _sigma is not None:
                sigma = ij_converter.to_java(ij, _sigma, sigma_types[_opName],
                                             dtype)

            if _calibration is not None:
                calibration = ij_converter.to_java(ij, _calibration,
                                                   calibration_types[_opName],
                                                   dtype)

            logger.info("Running op...")
            if _opName == "DefaultTubeness":
                out = ij.op().filter().tubeness(in1, sigma, calibration)

            logger.info("Completed op!")
            if in1_path != None:
                in1_br.close()

            # Saving output file to out
            logger.info("Saving...")
            out_array = ij_converter.from_java(ij, out, out_types[_opName])
            bw = BioWriter(_out.joinpath(fname), metadata=metadata)
            bw.Z = 1
            bw.dtype = out_array.dtype
            bw[:] = out_array.astype(bw.dtype)
            bw.close()

    except:
        logger.error("There was an error, shutting down jvm before raising...")
        raise

    finally:
        # Exit the program
        logger.info("Shutting down jvm...")
        del ij
        jpype.shutdownJVM()
        logger.info("Complete!")
示例#19
0
        log_config = Path(__file__).parent.joinpath("log4j.properties")
        jutil.start_vm(args=["-Dlog4j.configuration=file:{}".format(str(log_config.absolute()))],class_path=bioformats.JARS)
        {% endif -%}
        {% for inp,val in cookiecutter._inputs|dictsort -%}
        {% if val.type=="collection" -%}
        # Get all file names in {{ inp }} image collection
        {{ inp }}_files = [f.name for f in Path({{ inp }}).iterdir() if f.is_file() and "".join(f.suffixes)=='.ome.tif']
        {% endif %}
        {% endfor -%}
        {% for inp,val in cookiecutter._inputs|dictsort -%}
        {% for out,n in cookiecutter._outputs|dictsort -%}
        {% if val.type=="collection" and cookiecutter.use_bfio -%}
        # Loop through files in {{ inp }} image collection and process
        for i,f in enumerate({{ inp }}_files):
            # Load an image
            br = BioReader(Path({{ inp }}).joinpath(f))
            image = np.squeeze(br.read_image())

            # initialize the output
            out_image = np.zeros(image.shape,dtype=br._pix['type'])

            """ Do some math and science - you should replace this """
            logger.info('Processing image ({}/{}): {}'.format(i,len({{ inp }}_files),f))
            out_image = awesome_math_and_science_function(image)

            # Write the output
            bw = BioWriter(Path({{ out }}).joinpath(f),metadata=br.read_metadata())
            bw.write_image(np.reshape(out_image,(br.num_y(),br.num_x(),br.num_z(),1,1)))
        {%- endif %}{% endfor %}{% endfor %}
        
    finally:
示例#20
0
    def make_tile(self, x_min, x_max, y_min, y_max, color=None):
        """make_tile Create a supertile

        This method identifies images that have stitching vector positions
        within the bounds of the supertile defined by the x and y input
        arguments. It then spawns threads to load images and store in the
        supertile buffer. Finally it returns the assembled supertile to
        allow the main thread to generate the write thread.

        Args:
            x_min ([int]): Minimum x bound of the tile
            x_max ([int]): Maximum x bound of the tile
            y_min ([int]): Minimum y bound of the tile
            y_max ([int]): Maximum y bound of the tile
            stitchPath ([str]): Path to the stitching vector

        Returns:
            [type]: [description]
        """

        self._X_offset = x_min
        self._Y_offset = y_min

        # Get the data type
        br = BioReader(
            str(
                Path(self._file_path).joinpath(
                    self._file_dict['filePos'][0]['file'])))
        dtype = br._pix['type']

        # initialize the image
        if color != None:
            self._image = np.full((y_max - y_min, x_max - x_min, 4),
                                  color,
                                  dtype=dtype)
        else:
            self._image = np.zeros((y_max - y_min, x_max - x_min, 1),
                                   dtype=dtype)

        # get images in bounds of current super tile
        with ThreadPoolExecutor(max([self._max_workers, 2])) as executor:
            for f in self._file_dict['filePos']:
                if (f['posX'] >= x_min and f['posX'] <= x_max) or (
                        f['posX'] + f['width'] >= x_min
                        and f['posX'] + f['width'] <= x_max):
                    if (f['posY'] >= y_min and f['posY'] <= y_max) or (
                            f['posY'] + f['height'] >= y_min
                            and f['posY'] + f['height'] <= y_max):

                        # get bounds of image within the tile
                        Xt = [max(0, f['posX'] - x_min)]
                        Xt.append(
                            min(x_max - x_min, f['posX'] + f['width'] - x_min))
                        Yt = [max(0, f['posY'] - y_min)]
                        Yt.append(
                            min(y_max - y_min,
                                f['posY'] + f['height'] - y_min))

                        # get bounds of image within the image
                        Xi = [max(0, x_min - f['posX'])]
                        Xi.append(min(f['width'], x_max - f['posX']))
                        Yi = [max(0, y_min - f['posY'])]
                        Yi.append(min(f['height'], y_max - f['posY']))

                        # self.buffer_image(str(Path(self._file_path).joinpath(f['file'])),Xi,Yi,Xt,Yt,color)
                        executor.submit(
                            self.buffer_image,
                            str(Path(self._file_path).joinpath(f['file'])), Xi,
                            Yi, Xt, Yt, color)