コード例 #1
0
ファイル: main.py プロジェクト: venkatadhatri/polus-plugins
def main(imgPath: pathlib.Path,
         stitchPath: pathlib.Path,
         outDir: pathlib.Path,
         timesliceNaming: typing.Optional[bool]
         ) -> None:

    '''Setup stitching variables/objects'''
    # Get a list of stitching vectors
    vectors = list(stitchPath.iterdir())
    vectors.sort()

    # Try to infer a filepattern from the files on disk for faster matching later
    global fp # make the filepattern global to share between processes
    try:
        pattern = filepattern.infer_pattern([f.name for f in imgPath.iterdir()])
        logger.info(f'Inferred file pattern: {pattern}')
        fp = filepattern.FilePattern(imgPath,pattern)

    # Pattern inference didn't work, so just get a list of files
    except:
        logger.info(f'Unable to infer pattern, defaulting to: .*')
        fp = filepattern.FilePattern(imgPath,'.*')

    '''Run stitching jobs in separate processes'''
    ProcessManager.init_processes('main','asmbl')

    for v in vectors:
        # Check to see if the file is a valid stitching vector
        if 'img-global-positions' not in v.name:
            continue

        ProcessManager.submit_process(assemble_image,v,outDir)

    ProcessManager.join_processes()
コード例 #2
0
ファイル: main.py プロジェクト: Priyaaxle/polus-plugins
def _parse_stitch(stitchPath: pathlib.Path,
                  timepointName: bool = False) -> dict:
    """ Load and parse image stitching vectors

    This function parses the data from a stitching vector, then extracts the
    relevant image sizes for each image in the stitching vector to obtain a
    stitched image size. This function also infers an output file name.

    Args:
        stitchPath: A path to stitching vectors
        timepointName: Use the vector timeslice as the image name
    Returns:
        Dictionary with keys (width, height, name, filePos)
    """

    # Initialize the output
    out_dict = { 'width': int(0),
                 'height': int(0),
                 'name': '',
                 'filePos': []}

    # Try to parse the stitching vector using the infered file pattern
    if fp.pattern != '.*':
        vp = filepattern.VectorPattern(stitchPath,fp.pattern)
        unique_vals = {k.upper():v for k,v in vp.uniques.items() if len(v)==1}
        files = fp.get_matching(**unique_vals)

    else:

        # Try to infer a pattern from the stitching vector
        try:
            vector_files = filepattern.VectorPattern(stitchPath,'.*')
            pattern = filepattern.infer_pattern([v[0]['file'] for v in vector_files()])
            vp = filepattern.VectorPattern(stitchPath,pattern)

        # Fall back to universal filepattern
        except ValueError:
            vp = filepattern.VectorPattern(stitchPath,'.*')

        files = fp.files

    file_names = [f['file'].name for f in files]

    for file in vp():

        if file[0]['file'] not in file_names:
            continue

        stitch_groups = {k:get_number(v) for k,v in file[0].items()}
        stitch_groups['file'] = files[0]['file'].with_name(stitch_groups['file'])

        # Get the image size
        stitch_groups['width'], stitch_groups['height'] = BioReader.image_size(stitch_groups['file'])

        # Set the stitching vector values in the file dictionary
        out_dict['filePos'].append(stitch_groups)

    # Calculate the output image dimensions
    out_dict['width'] = max([f['width'] + f['posX'] for f in out_dict['filePos']])
    out_dict['height'] = max([f['height'] + f['posY'] for f in out_dict['filePos']])

    # Generate the output file name
    if timepointName:
        global_regex = ".*global-positions-([0-9]+).txt"
        name = re.match(global_regex,pathlib.Path(stitchPath).name).groups()[0]
        name += '.ome.tif'
        out_dict['name'] = name
        ProcessManager.job_name(out_dict['name'])
        ProcessManager.log(f'Setting output name to timepoint slice number.')
    else:
        # Try to infer a good filename
        try:
            out_dict['name'] = vp.output_name()
            ProcessManager.job_name(out_dict['name'])
            ProcessManager.log(f'Inferred output file name from vector.')

        # A file name couldn't be inferred, default to the first image name
        except:
            ProcessManager.job_name(out_dict['name'])
            ProcessManager.log(f'Could not infer output file name from vector, using first file name in the stitching vector as an output file name.')
            for file in vp():
                out_dict['name'] = file[0]['file']
                break

    return out_dict
コード例 #3
0
ファイル: basic.py プロジェクト: Vishakha6/polus-plugins
def basic(files: typing.List[Path],
          out_dir: Path,
          metadata_dir: typing.Optional[Path] = None,
          darkfield: bool = False,
          photobleach: bool = False):

    # Try to infer a filename
    try:
        pattern = infer_pattern([f['file'].name for f in files])
        fp = FilePattern(files[0]['file'].parent,pattern)
        base_output = fp.output_name()
        
    # Fallback to the first filename
    except:
        base_output = files[0]['file'].name
        
    extension = ''.join(files[0]['file'].suffixes)

    with ProcessManager.process(base_output):

        # Load files and sort
        ProcessManager.log('Loading and sorting images...')
        img_stk,X,Y = _get_resized_image_stack(files)
        img_stk_sort = np.sort(img_stk)
        
        # Initialize options
        new_options = _initialize_options(img_stk_sort,darkfield,OPTIONS)

        # Initialize flatfield/darkfield matrices
        ProcessManager.log('Beginning flatfield estimation')
        flatfield_old = np.ones((new_options['size'],new_options['size']),dtype=np.float64)
        darkfield_old = np.random.normal(size=(new_options['size'],new_options['size'])).astype(np.float64)
        
        # Optimize until the change in values is below tolerance or a maximum number of iterations is reached
        for w in range(new_options['max_reweight_iterations']):
            # Optimize using inexact augmented Legrangian multiplier method using L1 loss
            A, E1, A_offset = _inexact_alm_l1(copy.deepcopy(img_stk_sort),new_options)

            # Calculate the flatfield/darkfield images and update training weights
            flatfield, darkfield, new_options = _get_flatfield_and_reweight(A,E1,A_offset,new_options)

            # Calculate the change in flatfield and darkfield images between iterations
            mad_flat = np.sum(np.abs(flatfield-flatfield_old))/np.sum(np.abs(flatfield_old))
            temp_diff = np.sum(np.abs(darkfield - darkfield_old))
            if temp_diff < 10**-7:
                mad_dark =0
            else:
                mad_dark = temp_diff/np.max(np.sum(np.abs(darkfield_old)),initial=10**-6)
            flatfield_old = flatfield
            darkfield_old = darkfield

            # Stop optimizing if the change in flatfield/darkfield is below threshold
            ProcessManager.log('Iteration {} loss: {}'.format(w+1,mad_flat))
            if np.max(mad_flat,initial=mad_dark) < new_options['reweight_tol']:
                break

        # Calculate photobleaching effects if specified
        if photobleach:
            pb = _get_photobleach(copy.deepcopy(img_stk),flatfield,darkfield)

        # Resize images back to original image size
        ProcessManager.log('Saving outputs...')
        flatfield = cv2.resize(flatfield,(Y,X),interpolation=cv2.INTER_CUBIC).astype(np.float32)
        if new_options['darkfield']:
            darkfield = cv2.resize(darkfield,(Y,X),interpolation=cv2.INTER_CUBIC).astype(np.float32)
        
        # Export the flatfield image as a tiled tiff
        flatfield_out = base_output.replace(extension,'_flatfield' + extension)
        
        with BioReader(files[0]['file'],max_workers=2) as br:
            metadata = br.metadata
        
        with BioWriter(out_dir.joinpath(flatfield_out),metadata=metadata,max_workers=2) as bw:
            bw.dtype = np.float32
            bw.x = X
            bw.y = Y
            bw[:] = np.reshape(flatfield,(Y,X,1,1,1))
        
        # Export the darkfield image as a tiled tiff
        if new_options['darkfield']:
            darkfield_out = base_output.replace(extension,'_darkfield' + extension)
            with BioWriter(out_dir.joinpath(darkfield_out),metadata=metadata,max_workers=2) as bw:
                bw.dtype = np.float32
                bw.x = X
                bw.y = Y
                bw[:] = np.reshape(darkfield,(Y,X,1,1,1))
            
        # Export the photobleaching components as csv
        if photobleach:
            offsets_out = base_output.replace(extension,'_offsets.csv')
            with open(metadata_dir.joinpath(offsets_out),'w') as fw:
                fw.write('file,offset\n')
                for f,o in zip(files,pb[0,:].tolist()):
                    fw.write("{},{}\n".format(f,o))
コード例 #4
0
def main(stitch_dir: Path, collection_dir: Path, output_dir: Path,
         pattern: str):

    if pattern in [None, ".+", ".*"]:
        pattern = filepattern.infer_pattern(
            [f.name for f in collection_dir.iterdir()])
        logger.info(f"Inferred filepattern: {pattern}")

    # Parse files in the image collection
    fp = filepattern.FilePattern(collection_dir, pattern)

    # Get valid stitching vectors
    vectors = [
        v for v in Path(stitch_dir).iterdir()
        if Path(v).name.startswith("img-global-positions")
    ]
    """Get filepatterns for each stitching vector

    This section of code creates a filepattern for each stitching vector, and while
    traversing the stitching vectors analyzes the patterns to see which values in the
    filepattern are static or variable within a single stitching vector and across
    stitching vectors. The `singulars` variable determines which case each variable is:

    `singulars[v]==-1` when the variable, v, changes within a stitching vector.

    `singulars[v]==None` when the variable, v, changes across stitching vectors.

    `singulars[v]==int` when the variable, v, doesn't change.

    The variables that change across stitching vectors are grouping variables for the
    filepattern iterator.

    """
    singulars = {}
    vps = {}
    for vector in vectors:
        vps[vector.name] = filepattern.VectorPattern(vector, pattern)
        for variable in vps[vector.name].variables:
            if variable not in singulars.keys():
                if len(vps[vector.name].uniques[variable]) == 1:
                    singulars[variable] = vps[vector.name].uniques[variable]
                else:
                    singulars[variable] = -1
            elif (variable in singulars.keys() and
                  vps[vector.name].uniques[variable] != singulars[variable]):
                singulars[variable] = None if singulars[variable] != -1 else -1

    group_by = "".join([k for k, v in singulars.items() if v == -1])

    vector_count = 1
    for vector in vectors:

        logger.info("Processing vector: {}".format(str(vector.absolute())))

        sp = vps[vector.name]

        # Define the variables used in the current vector pattern so that corresponding
        # files can be located from files in the image collection with filepattern.
        matching = {
            k.upper(): sp.uniques[k][0]
            for k, v in singulars.items() if v is None
        }
        vector_groups = [
            k for k, v in singulars.items() if v not in [None, -1]
        ]

        # Vector output dictionary
        vector_dict = {}

        # Loop through lines in the stitching vector, generate new vectors
        for v in sp():
            variables = {
                key.upper(): value
                for key, value in v[0].items() if key in group_by
            }
            variables.update(matching)

            for files in fp(**variables):

                for f in files:
                    # Get the file writer, create it if it doesn't exist
                    temp_dict = vector_dict
                    for key in vector_groups:
                        if f[key] not in temp_dict.keys():
                            if vector_groups[-1] != key:
                                temp_dict[f[key]] = {}
                            else:
                                fname = "img-global-positions-{}.txt".format(
                                    vector_count)
                                vector_count += 1
                                logger.info(
                                    "Creating vector: {}".format(fname))
                                temp_dict[f[key]] = open(
                                    str(
                                        Path(output_dir).joinpath(
                                            fname).absolute()),
                                    "w",
                                )
                        temp_dict = temp_dict[f[key]]

                    # If the only grouping variables are positional (xyp), then create an output file
                    fw = temp_dict

                    fw.write(
                        "file: {}; corr: {}; position: ({}, {}); grid: ({}, {});\n"
                        .format(
                            Path(f["file"]).name,
                            v[0]["correlation"],
                            v[0]["posX"],
                            v[0]["posY"],
                            v[0]["gridX"],
                            v[0]["gridY"],
                        ))

        # Close all open stitching vectors
        close_vectors(vector_dict)

    logger.info("Plugin completed all operations!")
コード例 #5
0
 def test_alphanumeric_variable_width(self):
     
     pattern = infer_pattern(self.data['variable'])
     
     self.assertEqual(pattern,'S1_R{r}_C1-C11_A1_y{t+}_x{c+}_c{z+}.ome.tif')
コード例 #6
0
 def test_alphanumeric_fixed_width(self):
     
     pattern = infer_pattern(self.data['brain'])
     
     self.assertEqual(pattern,'S1_R{r}_C1-C11_A1_y0{tt}_x0{cc}_c0{zz}.ome.tif')
コード例 #7
0
 def test_numeric_fixed_width(self):
     
     pattern = infer_pattern(self.data['robot'])
     
     self.assertEqual(pattern,'00{r}0{tt}-{c}-00100100{z}.tif')