Example #1
0
            if v in l:
                is_defined = True
                break
        if not is_defined:
            layout.append(v)

    # Layout dimensions, used to calculate positions later on
    layout_dimensions = {'grid_size':[[] for r in range(len(layout))],  # number of tiles in each dimension in the subgrid
                         'size':[[] for r in range(len(layout))],       # total size of subgrid in pixels
                         'tile_size':[[] for r in range(len(layout))]}  # dimensions of each tile in the grid

    # Get the size of each image
    logger.info('Get the size of every image...')
    grid_width = 0
    grid_height = 0
    for files in fp.iterate(group_by=layout[0]):
        # Determine number of rows and columns in the smallest subgrid
        grid_size = _get_xy_index(files,layout[0],layout)
        layout_dimensions['grid_size'][len(layout)-1].append(grid_size)

        # Get the height and width of each image
        for f in files:
            f['width'], f['height'] = BioReader.image_size(f['file'])

            if grid_width < f['width']:
                grid_width = f['width']
            if grid_height < f['height']:
                grid_height = f['height']
        logger.info('Got the size of {} images...'.format(len(files)))

        # Set the pixel and tile dimensions
Example #2
0
    feature_list = _parse_features(features,fp,method)

    # Determine the min, max, and unique values for each data set
    logger.info('Setting feature scales...')
    feature_mins = {}
    feature_ranges = {}
    for key,val in feature_list.items():
        valid_vals = [v for v in val if v is not 'NaN']
        if len(valid_vals) == 0:
            feature_mins[key] = 0
            feature_ranges[key] = 0
        else:
            feature_mins[key] = min(valid_vals)
            feature_ranges[key] = max(valid_vals)-feature_mins[key]
    unique_levels = set()
    for fl in fp.iterate():
        if 'line' not in fl.keys():
            continue
        for ft in feature_list:
            try:
                if get_number(fl[ft]):
                    fl[ft] = round((fl[ft] - feature_mins[ft])/feature_ranges[ft] * 254 + 1)
                    unique_levels.update([fl[ft]])
                else:
                    fl[ft] = 0
                    unique_levels.update([0])
            except ZeroDivisionError:
                fl[ft] = 0
                unique_levels.update([0])
                
    # Start the javabridge with proper java logging
Example #3
0
                    format(v))
    ''' Start a process for each set of brightfield/darkfield/photobleach patterns '''
    # Create the FilePattern objects to handle file access
    ff_files = FilePattern(ffDir, brightPattern)
    if darkPattern != None and darkPattern != '':
        dark_files = FilePattern(ffDir, darkPattern)
    if photoPattern != None and photoPattern != '':
        photo_files = FilePattern(
            str(Path(ffDir).parents[0].joinpath('metadata').absolute()),
            photoPattern)

    # Initialize variables for process management
    processes = []
    process_timer = []
    pnum = 0
    total_jobs = len([p for p in ff_files.iterate()])

    # Loop through files in ffDir image collection and process
    base_pstring = "python3 apply_flatfield.py --inpDir {} --outDir {} --filepattern {}".format(
        imgDir, outDir, imgPattern)
    for f in ff_files.iterate():
        # If there are num_cores - 1 processes running, wait until one finishes
        if len(processes) >= multiprocessing.cpu_count() - 1 and len(
                processes) > 0:
            free_process = -1
            while free_process < 0:
                for process in range(len(processes)):
                    if processes[process].poll() is not None:
                        free_process = process
                        break
                # Only check intermittently to free up processing power
Example #4
0
 if darkfield != None:
     dark_br = BioReader(darkfield)
     dark_image = np.squeeze(dark_br.read_image())
     del dark_br
 else:
     dark_image = np.zeros(flat_image.shape, dtype=np.float32)
 if photobleach != None:
     with open(photobleach, 'r') as f:
         reader = csv.reader(f)
         photo_offset = {
             line[0]: float(line[1])
             for line in reader if line[0] != 'file'
         }
     offset = np.mean([o for o in photo_offset.values()])
 ''' Apply flatfield to images '''
 for f in images.iterate(R=R, C=C, T=T):
     p = Path(f[0]['file'])
     logger.info("Applying flatfield to image: {}".format(p.name))
     br = BioReader(str(p.absolute()))
     image = br.read_image()
     if photobleach != None:
         new_image = _unshade(np.squeeze(image),
                              flat_image,
                              dark_image,
                              photo_offset[p.name],
                              offset=offset)
     else:
         new_image = _unshade(np.squeeze(image), flat_image, dark_image)
     bw = BioWriter(str(Path(outDir).joinpath(p.name).absolute()),
                    metadata=br.read_metadata())
     bw.write_image(np.reshape(new_image, image.shape))
Example #5
0
                        required=True)

    # Parse the arguments
    args = parser.parse_args()
    filePattern = args.filePattern
    logger.info('filePattern = {}'.format(filePattern))
    inpDir = args.inpDir
    logger.info('inpDir = {}'.format(inpDir))
    outDir = args.outDir
    logger.info('outDir = {}'.format(outDir))

    # Get all file names in inpDir image collection
    inpDir_files = FilePattern(inpDir, filePattern)

    # Loop through files in inpDir image collection and process
    for files in inpDir_files.iterate():
        if isinstance(files, list):
            # if a filename pattern is used, then files is a list of file dictionaries
            for f in files:
                input_path = Path(f['file'])
                output_path = Path(outDir).joinpath(input_path.name)
                logger.info('Copying file: {}'.format(input_path.name))
                shutil.copy(str(input_path.absolute()),
                            str(output_path.absolute()))
        else:
            # if no filename pattern was used, only a single file dictionary is returned
            input_path = Path(files['file'])
            output_path = Path(outDir).joinpath(input_path.name)
            logger.info('Copying file: {}'.format(input_path.name))
            shutil.copy(str(input_path.absolute()),
                        str(output_path.absolute()))
Example #6
0
def main():
    """ Initialize argument parser """
    logger.info("Parsing arguments...")
    parser = argparse.ArgumentParser(
        prog='main',
        description='Calculate flatfield information from an image collection.'
    )
    """ Define the arguments """
    parser.add_argument(
        '--inpDir',  # Name of the bucket
        dest='inpDir',
        type=str,
        help='Path to input images.',
        required=True)
    parser.add_argument(
        '--darkfield',  # Path to the data within the bucket
        dest='darkfield',
        type=str,
        help='If true, calculate darkfield contribution.',
        required=False)
    parser.add_argument(
        '--photobleach',  # Path to the data within the bucket
        dest='photobleach',
        type=str,
        help='If true, calculates a photobleaching scalar.',
        required=False)
    parser.add_argument(
        '--inpRegex',  # Output directory
        dest='inp_regex',
        type=str,
        help='Input file name pattern.',
        required=False)
    parser.add_argument(
        '--outDir',  # Output directory
        dest='output_dir',
        type=str,
        help='The output directory for the flatfield images.',
        required=True)
    """ Get the input arguments """
    args = parser.parse_args()
    fpath = args.inpDir
    """Checking if there is images subdirectory"""
    if (Path.is_dir(Path(args.inpDir).joinpath('images'))):
        fpath = Path(args.inpDir).joinpath('images')
    get_darkfield = str(args.darkfield).lower() == 'true'
    output_dir = Path(args.output_dir).joinpath('images')
    output_dir.mkdir(exist_ok=True)
    metadata_dir = Path(args.output_dir).joinpath('metadata_files')
    metadata_dir.mkdir(exist_ok=True)
    inp_regex = args.inp_regex
    get_photobleach = str(args.photobleach).lower() == 'true'

    logger.info('input_dir = {}'.format(fpath))
    logger.info('get_darkfield = {}'.format(get_darkfield))
    logger.info('get_photobleach = {}'.format(get_photobleach))
    logger.info('inp_regex = {}'.format(inp_regex))
    logger.info('output_dir = {}'.format(output_dir))
    # Set up lists for tracking processes
    processes = []
    process_timer = []
    pnum = 0
    # Iterator to group files with  constant r,t and c values
    file = FilePattern(fpath, inp_regex)
    total_no = len(list(file.iterate(group_by='xyz')))
    for i in file.iterate(group_by='xyz'):
        if len(processes) >= multiprocessing.cpu_count() - 1:
            free_process = -1
            while free_process < 0:
                for process in range(len(processes)):
                    if processes[process].poll() is not None:
                        free_process = process
                        break
                # Wait between checks to free up some processing power
                time.sleep(3)
            pnum += 1
            logger.info("Finished process {} of {} in {}s!".format(
                pnum, total_no,
                time.time() - process_timer[free_process]))
            del processes[free_process]
            del process_timer[free_process]

        logger.info("Starting process [r,t,c]: [{},{},{}]".format(
            i[0]['r'], i[0]['t'], i[0]['c']))
        processes.append(
            subprocess.Popen(
                "python3 basic.py --inpDir {} --outDir {} --darkfield {} --photobleach {} --inpRegex {} --R {} --T {} --C {}"
                .format(fpath, args.output_dir, get_darkfield, get_photobleach,
                        inp_regex, i[0]['r'], i[0]['t'], i[0]['c']),
                shell=True))
        process_timer.append(time.time())
    while len(processes) > 1:
        free_process = -1
        while free_process < 0:
            for process in range(len(processes)):
                if processes[process].poll() is not None:
                    free_process = process
                    break
            # Wait between checks to free up some processing power
            time.sleep(3)
        pnum += 1
        logger.info("Finished process {} of {} in {}s!".format(
            pnum, total_no,
            time.time() - process_timer[free_process]))
        del processes[free_process]
        del process_timer[free_process]

    processes[0].wait()

    logger.info("Finished process {} of {} in {}s!".format(
        total_no, total_no,
        time.time() - process_timer[0]))
    logger.info("Finished all processes!")