Example #1
0
    def write_components(self, destination_dir: Path):
        """ Write out the estimated bleed-through components.

        These bleed-through components can be subtracted from the original
        images to achieve bleed-through correction.

        Args:
            destination_dir: Path to the directory where the output images will
                be written.
        """
        with ProcessPoolExecutor(max_workers=utils.NUM_THREADS) as executor:
            processes = list()
            for source_index, input_path in enumerate(self.__files):
                writer_name = utils.replace_extension(input_path.name)
                processes.append(
                    executor.submit(
                        self._write_components_thread,
                        destination_dir,
                        writer_name,
                        source_index,
                    ))

            for process in processes:
                process.result()
        return
Example #2
0
    def test_resize_gif(self):

        basename = os.path.basename(PREVIEW_GIF)
        size = (200, None)
        gallery.resize(PREVIEW_GIF, os.getcwd(), "", basename, size, 1)

        self.assertTrue(
            os.path.exists(utils.replace_extension(basename, ".gif")))
        output_size = gallery.get_size(basename, 1)
        self.assertEqual(output_size["width"], size[0])
Example #3
0
    def test_resize_jpeg(self):

        basename = os.path.basename(IMG_3870_JPEG)
        size = (800, None)
        gallery.resize(IMG_3870_JPEG, os.getcwd(), "", basename, size, 1)

        self.assertTrue(
            os.path.exists(utils.replace_extension(basename, ".jpeg")))
        output_size = gallery.get_size(basename, 1)
        self.assertEqual(output_size["width"], size[0])
Example #4
0
    def test_resize_tiff(self):

        basename = os.path.basename(IMG_6218_TIFF)
        size = (1600, None)
        gallery.resize(IMG_6218_TIFF, os.getcwd(), "", basename, size, 1)

        expected_basename = utils.replace_extension(basename, ".tiff")
        self.assertTrue(os.path.exists(expected_basename))
        output_size = gallery.get_size(expected_basename, 1)
        self.assertEqual(output_size["width"], size[0])
Example #5
0
def main(
        input_dir: Path,
        file_pattern: str,
        output_dir: Path,
):
    fp = filepattern.FilePattern(input_dir, file_pattern)
    files = [Path(file[0]['file']).resolve() for file in fp]
    files = list(filter(
        lambda file_path: file_path.name.endswith('.ome.tif') or file_path.name.endswith('.ome.zarr'),
        files
    ))

    executor = (ThreadPoolExecutor if utils.USE_GPU else ProcessPoolExecutor)(utils.NUM_THREADS)
    processes: List[Future[bool]] = list()

    for in_file in files:
        with BioReader(in_file) as reader:
            x_shape, y_shape, z_shape = reader.X, reader.Y, reader.Z
            metadata = reader.metadata

        ndims = 2 if z_shape == 1 else 3

        out_file = output_dir.joinpath(utils.replace_extension(in_file, extension='_flow.ome.zarr'))
        init_zarr_file(out_file, ndims, metadata)

        tile_count = 0
        for z in range(0, z_shape, utils.TILE_SIZE):
            z = None if ndims == 2 else z

            for y in range(0, y_shape, utils.TILE_SIZE):
                for x in range(0, x_shape, utils.TILE_SIZE):
                    coordinates = x, y, z
                    device = (tile_count % utils.NUM_THREADS) if utils.USE_GPU else None
                    tile_count += 1

                    # flow_thread(in_file, out_file, coordinates, device)

                    processes.append(executor.submit(
                        flow_thread,
                        in_file,
                        out_file,
                        coordinates,
                        device,
                    ))

    done, not_done = wait(processes, 0)
    while len(not_done) > 0:
        logger.info(f'Percent complete: {100 * len(done) / len(processes):6.3f}%')
        for r in done:
            r.result()
        done, not_done = wait(processes, 5)
    executor.shutdown()

    return
def process_cm_data(data, ext_libs=[], user_args={}):
    """Processes data field from jsonnet config map.

    Iterates through jsonnet files in configMap (.libsonnet files first)
    and generates json data.


    Args:
        data (dict): Data from config map labeled as jsonnet code.
        ext_libs (:obj:`list of str`, optional): List of paths to
            external jsonnet libs.
        user_args (:obj:`dict`, optional): Keyword arguments to jsonnet build function.

    Returns:
        list of (str, dict): Generated json data.

    Raises:
        JsonnetConfigMapError: Raised if jsonnet evaluation fails.
    """
    libsonnet_folder = "./libsonnets"
    jsons = []

    # sort by extension: .libsonnet fields first, .jsonnet second
    for dataKey in sorted(data.keys(),
                          key=lambda x: x.split(".")[1],
                          reverse=True):

        _, extension = os.path.splitext(dataKey)
        if extension == ".libsonnet":
            utils.save_text_to_file(libsonnet_folder, dataKey, data[dataKey])
            continue

        try:
            jsonnet_code = data[dataKey]
            json_ = _jsonnet.evaluate_snippet(dataKey,
                                              jsonnet_code,
                                              jpathdir=ext_libs,
                                              **user_args)
        except RuntimeError as e:
            log.error(f"{dataKey} is not a valid jsonnet, raised error: {e}")
            if os.path.exists(libsonnet_folder):
                utils.remove_folder(libsonnet_folder)
            raise JsonnetConfigMapError
        else:
            json_filename = utils.replace_extension(dataKey, "json")
            jsons.append((json_filename, json.loads(json_)))

    if os.path.exists(libsonnet_folder):
        utils.remove_folder(libsonnet_folder)

    return jsons
def vector_to_label(
    *,
    in_path: Path,
    flow_magnitude_threshold: float,
    output_dir: Path,
):
    # TODO: This next line breaks in the docker container because the base pytorch container comes with python3.7
    #  Apparently, it's a classic serialization bug on thread-locks that was fixed for python3.9.
    #  Until pytorch provides a container with python3.9 or polus provides a container with python3.9 and pytorch,
    #  we are stuck with this.
    # executor = (ThreadPoolExecutor if utils.USE_GPU else ProcessPoolExecutor)(utils.NUM_THREADS)
    executor = ThreadPoolExecutor(utils.NUM_THREADS)

    with BioReader(in_path) as reader:
        reader_shape = (reader.Z, reader.Y, reader.X)
        metadata = reader.metadata

    zarr_path = output_dir.joinpath(
        utils.replace_extension(in_path, extension='_tmp.ome.zarr'))
    init_zarr_file(zarr_path, metadata)

    threads: Dict[Tuple[int, int, int], Future[ThreadFuture]] = dict()
    thread_kwargs: Dict[str, Any] = {
        'in_path': in_path,
        'zarr_path': zarr_path,
        'coordinates': (0, 0, 0),
        'reader_shape': reader_shape,
        'flow_magnitude_threshold': flow_magnitude_threshold,
        'future_z': None,
        'future_y': None,
        'future_x': None,
    }

    tile_count = 0
    for z_index, z in enumerate(range(0, reader_shape[0], utils.TILE_SIZE)):
        for y_index, y in enumerate(range(0, reader_shape[1],
                                          utils.TILE_SIZE)):
            for x_index, x in enumerate(
                    range(0, reader_shape[2], utils.TILE_SIZE)):
                tile_count += 1
                thread_kwargs['coordinates'] = x, y, z
                thread_kwargs['future_z'] = None if z_index == 0 else threads[(
                    z_index - 1, y_index, x_index)]
                thread_kwargs['future_y'] = None if y_index == 0 else threads[(
                    z_index, y_index - 1, x_index)]
                thread_kwargs['future_x'] = None if x_index == 0 else threads[(
                    z_index, y_index, x_index - 1)]

                threads[(z_index, y_index,
                         x_index)] = executor.submit(vector_thread,
                                                     **thread_kwargs)

    done, not_done = wait(threads.values(), 0)
    while len(not_done) > 0:
        logger.info(
            f'File {in_path.name}, Progress: {100 * len(done) / len(threads):6.3f}%'
        )
        [future.result() for future in done]
        done, not_done = wait(threads.values(), 15)
    executor.shutdown()

    out_path = output_dir.joinpath(utils.replace_extension(zarr_path))
    zarr_to_tif(zarr_path, out_path)
    return