Exemple #1
0
def main(input_dir: pathlib.Path,
         output_dir: pathlib.Path,
         file_pattern: typing.Optional[str] = None,
         group_by: typing.Optional[str] = None,
         get_darkfield: typing.Optional[bool] = None,
         get_photobleach: typing.Optional[bool] = None,
         metadata_dir: pathlib.Path = None) -> None:

    if group_by is None:
        group_by = 'xyp'

    if get_darkfield is None:
        get_darkfield = False

    if get_photobleach is None:
        get_photobleach = False

    if file_pattern is None:
        filepattern = '.*'

    fp = FilePattern(input_dir, file_pattern)

    ProcessManager.init_processes("basic")

    for files in fp(group_by=group_by):

        ProcessManager.submit_process(basic.basic, files, output_dir,
                                      metadata_dir, get_darkfield,
                                      get_photobleach)

    ProcessManager.join_processes()
Exemple #2
0
def main(imgPath: pathlib.Path,
         stitchPath: pathlib.Path,
         outDir: pathlib.Path,
         timesliceNaming: typing.Optional[bool]
         ) -> None:

    '''Setup stitching variables/objects'''
    # Get a list of stitching vectors
    vectors = list(stitchPath.iterdir())
    vectors.sort()

    # Try to infer a filepattern from the files on disk for faster matching later
    global fp # make the filepattern global to share between processes
    try:
        pattern = filepattern.infer_pattern([f.name for f in imgPath.iterdir()])
        logger.info(f'Inferred file pattern: {pattern}')
        fp = filepattern.FilePattern(imgPath,pattern)

    # Pattern inference didn't work, so just get a list of files
    except:
        logger.info(f'Unable to infer pattern, defaulting to: .*')
        fp = filepattern.FilePattern(imgPath,'.*')

    '''Run stitching jobs in separate processes'''
    ProcessManager.init_processes('main','asmbl')

    for v in vectors:
        # Check to see if the file is a valid stitching vector
        if 'img-global-positions' not in v.name:
            continue

        ProcessManager.submit_process(assemble_image,v,outDir)

    ProcessManager.join_processes()
Exemple #3
0
def main(inpDir, outDir, projection, method):

    # images in the input directory
    inpDir_files = os.listdir(inpDir)
    inpDir_files = [
        filename for filename in inpDir_files if filename.endswith('.ome.tif')
    ]

    # Surround with try/finally for proper error catching
    try:
        for image_name in inpDir_files:

            input_img_path = os.path.join(inpDir, image_name)
            output_img_path = os.path.join(outDir, image_name)

            ProcessManager.submit_process(process_image, input_img_path,
                                          output_img_path, projection, method)

        ProcessManager.join_processes()

    except Exception:
        traceback.print_exc()

    finally:
        # Exit the program
        logger.info('Exiting the workflow..')
        sys.exit()
Exemple #4
0
def main(imgDir: Path,
         imgPattern: str,
         ffDir: Path,
         brightPattern: str,
         outDir: Path,
         darkPattern: typing.Optional[str] = None,
         photoPattern: typing.Optional[str] = None) -> None:
    ''' Start a process for each set of brightfield/darkfield/photobleach patterns '''
    # Create the FilePattern objects to handle file access
    ff_files = FilePattern(ffDir, brightPattern)
    fp = FilePattern(imgDir, imgPattern)
    if darkPattern != None and darkPattern != '':
        dark_files = FilePattern(ffDir, darkPattern)
    if photoPattern != None and photoPattern != '':
        photo_files = FilePattern(
            str(Path(ffDir).parents[0].joinpath('metadata').absolute()),
            photoPattern)

    group_by = [v for v in fp.variables if v not in ff_files.variables]
    GROUPED = group_by + ['file']

    ProcessManager.init_processes('main', 'unshade')

    for files in fp(group_by=group_by):

        flat_path = ff_files.get_matching(
            **{k.upper(): v
               for k, v in files[0].items() if k not in GROUPED})[0]['file']
        if flat_path is None:
            logger.warning("Could not find a flatfield image, skipping...")
            continue

        if darkPattern is not None and darkPattern != '':
            dark_path = dark_files.get_matching(**{
                k.upper(): v
                for k, v in files[0].items() if k not in GROUPED
            })[0]['file']

            if dark_path is None:
                logger.warning("Could not find a darkfield image, skipping...")
                continue

        if photoPattern is not None and photoPattern != '':
            photo_path = photo_files.get_matching(**{
                k.upper(): v
                for k, v in files[0].items() if k not in GROUPED
            })[0]['file']

            if photo_path is None:
                logger.warning(
                    "Could not find a photobleach file, skipping...")
                continue

        ProcessManager.submit_process(unshade_batch, files, outDir, flat_path,
                                      dark_path, photo_path)

    ProcessManager.join_processes()
Exemple #5
0
def main(
    inpDir: Path,
    outDir: Path,
) -> None:

    ProcessManager.init_processes("main", "zarr")

    for file in inpDir.iterdir():
        ProcessManager.submit_process(image_to_zarr, file, outDir)

    ProcessManager.join_processes()
Exemple #6
0
def main(
    inpDir: Path,
    filePattern: str,
    outDir: Path,
) -> None:

    ProcessManager.init_processes("main", "zarr")

    fp = FilePattern(inpDir, filePattern)

    for files in fp():
        for file in files:
            ProcessManager.submit_process(image_to_zarr, file["file"], outDir)

    ProcessManager.join_processes()
Exemple #7
0
def main(input_dir: Path,
         output_dir: Path
         ) -> None:

    logger.info('Extracting tiffs and saving as ome.tif...')
    files = [f for f in Path(input_dir).iterdir() if f.suffix=='.czi']
    if not files:
        logger.error('No CZI files found.')
        raise ValueError('No CZI files found.')
        
    ProcessManager.init_processes()
    
    for file in files:
        ProcessManager.submit_process(extract_fovs,file,output_dir)
        
    ProcessManager.join_processes()
Exemple #8
0
def main(input_dir: pathlib.Path,
         file_pattern: str,
         output_dir: pathlib.Path
         ) -> None:
    
    # create the filepattern object
    fp = filepattern.FilePattern(input_dir,file_pattern)
    
    for files in fp(group_by='z'):

        output_name = fp.output_name(files)
        output_file = output_dir.joinpath(output_name)

        ProcessManager.submit_process(_merge_layers,files,output_file)
    
    ProcessManager.join_processes()
Exemple #9
0
def main(input_dir: pathlib.Path, pyramid_type: str, image_type: str,
         file_pattern: str, output_dir: pathlib.Path):

    # Set ProcessManager config and initialize
    ProcessManager.num_processes(multiprocessing.cpu_count())
    ProcessManager.num_threads(2 * ProcessManager.num_processes())
    ProcessManager.threads_per_request(1)
    ProcessManager.init_processes('pyr')
    logger.info('max concurrent processes = %s',
                ProcessManager.num_processes())

    # Parse the input file directory
    fp = filepattern.FilePattern(input_dir, file_pattern)
    group_by = ''
    if 'z' in fp.variables and pyramid_type == 'Neuroglancer':
        group_by += 'z'
        logger.info(
            'Stacking images by z-dimension for Neuroglancer precomputed format.'
        )
    elif 'c' in fp.variables and pyramid_type == 'Zarr':
        group_by += 'c'
        logger.info('Stacking channels by c-dimension for Zarr format')
    elif 't' in fp.variables and pyramid_type == 'DeepZoom':
        group_by += 't'
        logger.info('Creating time slices by t-dimension for DeepZoom format.')
    else:
        logger.info(
            f'Creating one pyramid for each image in {pyramid_type} format.')

    depth = 0
    depth_max = 0
    image_dir = ''

    processes = []

    for files in fp(group_by=group_by):

        # Create the output name for Neuroglancer format
        if pyramid_type in ['Neuroglancer', 'Zarr']:
            try:
                image_dir = fp.output_name([file for file in files])
            except:
                pass

            if image_dir in ['', '.*']:
                image_dir = files[0]['file'].name

            # Reset the depth
            depth = 0
            depth_max = 0

        pyramid_writer = None

        for file in files:

            with bfio.BioReader(file['file'], max_workers=1) as br:

                if pyramid_type == 'Zarr':
                    d_z = br.c
                else:
                    d_z = br.z

            depth_max += d_z

            for z in range(d_z):

                pyramid_args = {
                    'base_dir': output_dir.joinpath(image_dir),
                    'image_path': file['file'],
                    'image_depth': z,
                    'output_depth': depth,
                    'max_output_depth': depth_max,
                    'image_type': image_type
                }

                pw = PyramidWriter[pyramid_type](**pyramid_args)

                ProcessManager.submit_process(pw.write_slide)

                depth += 1

                if pyramid_type == 'DeepZoom':
                    pw.write_info()

        if pyramid_type in ['Neuroglancer', 'Zarr']:
            if image_type == 'segmentation':
                ProcessManager.join_processes()
            pw.write_info()

    ProcessManager.join_processes()