Example #1
0
def caller(inputs, inputs_labels, output, functions, params):
    make_dirs(output)
    inputs = inputs[FRAME_REVTRACK:0:-1] + inputs
    inputs_labels = inputs_labels[FRAME_REVTRACK:0:-1] + inputs_labels

    img0, labels0 = imread(inputs[0]), lbread(inputs_labels[0]).astype(
        np.int16)
    labels0 = neg2poslabels(labels0)
    imsave(labels0, output, basename(inputs[0]), dtype=np.int16)
    for holder.frame, (path,
                       pathl) in enumerate(zip(inputs[1:], inputs_labels[1:])):
        img1, labels1 = imread(path), lbread(pathl)
        labels1 = -labels1
        for fnum, (function, param) in enumerate(zip(functions, params)):
            func = getattr(track_operation, function)
            if not (labels1 < 0).any():
                continue
            labels0, labels1 = func(img0, img1, labels0, -labels1, **param)
            logger.debug('\t{0} with {1}: {2}'.format(
                function, param, len(set(labels1[labels1 < 0]))))
        logger.info("\tframe {0}: {1} objects linked and {2} unlinked.".format(
            holder.frame, len(set(labels1[labels1 > 0])),
            len(set(labels1[labels1 < 0]))))
        labels0 = neg2poslabels(labels1)
        img0 = img1
        imsave(labels0, output, path, dtype=np.int16)
Example #2
0
def caller(inputs_list, inputs_labels_list, output, primary, secondary):
    make_dirs(dirname(abspath(output)))

    inputs_list = [inputs_list, ] if isinstance(inputs_list[0], str) else inputs_list
    inputs_labels_list = [inputs_labels_list, ] if isinstance(inputs_labels_list[0], str) else inputs_labels_list

    obj_names = [basename(dirname(i[0])) for i in inputs_labels_list] if primary is None else primary
    ch_names = [basename(dirname(i[0])) for i in inputs_list] if secondary is None else secondary

    store = []
    for inputs, ch in zip(inputs_list, ch_names):
        for inputs_labels, obj in zip(inputs_labels_list, obj_names):
            logger.info("Channel {0}: {1} applied...".format(ch, obj))
            for frame, (path, pathl) in enumerate(zip(inputs, inputs_labels)):
                img, labels = imread(path), lbread(pathl, nonneg=False)
                cells = regionprops(labels, img)
                if (labels < 0).any():
                    cells = add_parent(cells, labels)
                [setattr(cell, 'frame', frame) for cell in cells]
                cells = [Cell(cell) for cell in cells]
                store.append(cells)

            logger.info("\tmaking dataframe...")
            df = multi_index([i for ii in store for i in ii], obj, ch)
            if exists(join(output, 'df.csv')):
                ex_df = pd.read_csv(join(output, 'df.csv'), index_col=['object', 'ch', 'prop', 'frame'])
                ex_df.columns = pd.to_numeric(ex_df.columns)
                ex_df = ex_df.astype(np.float32)
                df = pd.concat([df, ex_df])
            df.to_csv(join(output, 'df.csv'))
    larr = df2larr(df)
    larr.save(join(output, 'df.npz'))
    logger.info("\tdf.npz saved.")
def align(img, CROP=0.05):
    """
    CROP (float): crop images beforehand. When set to 0.05, 5% of each edges are cropped.
    """
    if not hasattr(holder, "align"):
        if isinstance(holder.inputs[0], list) or isinstance(
                holder.inputs[0], tuple):
            inputs = [i[0] for i in holder.inputs]
        else:
            inputs = holder.inputs

        img0 = imread(inputs[0])

        (ch, cw) = [int(CROP * i) for i in img0.shape]
        ch = None if ch == 0 else ch
        cw = None if cw == 0 else cw

        jitters = calc_jitters_multiple(inputs, ch, cw)
        holder.align = calc_crop_coordinates(jitters, img0.shape)
        logger.debug('holder.align set to {0}'.format(holder.align))
    jt = holder.align[holder.frame]
    logger.debug('Jitter: {0}'.format(jt))
    if img.ndim == 2:
        return img[jt[0]:jt[1], jt[2]:jt[3]]
    if img.ndim == 3:
        return img[jt[0]:jt[1], jt[2]:jt[3], :]
Example #4
0
def caller(inputs_list, inputs_labels_list, output, primary, secondary):
    make_dirs(dirname(abspath(output)))

    inputs_list = [
        inputs_list,
    ] if isinstance(inputs_list[0], str) else inputs_list
    inputs_labels_list = [
        inputs_labels_list,
    ] if isinstance(inputs_labels_list[0], str) else inputs_labels_list

    obj_names = [basename(dirname(i[0]))
                 for i in inputs_labels_list] if primary is None else primary
    ch_names = [basename(dirname(i[0]))
                for i in inputs_list] if secondary is None else secondary

    for inputs, ch in zip(inputs_list, ch_names):
        for inputs_labels, obj in zip(inputs_labels_list, obj_names):
            logger.info("Channel {0}: {1} applied...".format(ch, obj))
            arr = np.ones((MAX_NUMCELL, len(PROP_SAVE), len(inputs)),
                          np.float32) * np.nan
            for frame, (path, pathl) in enumerate(zip(inputs, inputs_labels)):
                img, labels = imread(path), lbread(pathl, nonneg=False)
                cells = regionprops(labels, img)
                if (labels < 0).any():
                    cells = add_parent(cells, labels)
                [setattr(cell, 'frame', frame) for cell in cells]
                cells = [Cell(cell) for cell in cells]
                tarr = _cells2array(cells)
                index = tarr[:, 1].astype(np.int32)
                arr[index, :, frame] = tarr

            logger.info("\tmaking dataframe...")
            cellids = np.where(~np.isnan(arr[:, 0, :]).all(axis=1))[0]
            marr = np.zeros((len(cellids), arr.shape[1], arr.shape[2]))
            for pn, i in enumerate(cellids):
                marr[pn] = arr[i]
            sarr = np.swapaxes(marr, 0, 2)
            narr = sarr.reshape((sarr.shape[0] * sarr.shape[1], sarr.shape[2]),
                                order='F')
            index = pd.MultiIndex.from_product(
                [obj, ch, PROP_SAVE, range(arr.shape[-1])],
                names=['object', 'ch', 'prop', 'frame'])
            df = pd.DataFrame(narr, index=index, columns=cellids)

            if exists(join(output, FILE_NAME + '.csv')):
                ex_df = pd.read_csv(
                    join(output, FILE_NAME + '.csv'),
                    index_col=['object', 'ch', 'prop', 'frame'])
                ex_df.columns = pd.to_numeric(ex_df.columns)
                ex_df = ex_df.astype(np.float32)
                df = pd.concat([df, ex_df])
            df.to_csv(join(output, FILE_NAME + '.csv'))
    larr = df2larr(df)
    larr.save(join(output, FILE_NAME + '.npz'))
    logger.info("\t" + FILE_NAME + ".npz saved.")
Example #5
0
def caller(inputs, output, functions, params):
    holder.inputs = inputs
    make_dirs(output)

    logger.info("Functions {0} for {1} images.".format(functions, len(inputs)))

    for holder.frame, holder.path in enumerate(inputs):
        img = imread(holder.path)
        for function, param in zip(functions, params):
            func = getattr(preprocess_operation, function)
            img = func(img, **param)
        imsave(img, output, holder.path)
        logger.info("\tframe {0} done.".format(holder.frame))
Example #6
0
def correct_uneven_illumination(img, bkgimg='Pos1/min_stack.tif'):
    '''
    correct_uneven_illumination
    bkgimg is background image such as blank image or minimum projection result.
    '''
    img = img.astype(np.float)
    bg = gaussian_blur(imread(bkgimg), 3)
    bg = bg.astype(np.float)
    d0 = img - bg
    d0[d0 < 0] = 0
    m_bg = 1 / (bg / bg.max()) # nega image of background
    img = d0 * m_bg
    return img
Example #7
0
def flatfield_references(img,
                         ff_paths=['Pos0/img00.tif', 'Pos1/img01.tif'],
                         exp_corr=False):
    """
    Use empty images for background subtraction and illumination bias correction.
    Given multiple reference images, it will calculate median profile and use it for subtraction.
    If flatfield image has the same illumination pattern but different exposure to the img,  
    turning on bg_align would calculate correction factor.

    ff_paths (str or List(str)): image path for flat fielding references.
                                 It can be single, multiple or path with wildcards.
        e.g.    ff_paths = "FF/img_000000000_YFP*"
                ff_paths = ["FF/img_01.tif", "FF/img_02.tif"]

    """
    store = []
    if isinstance(ff_paths, str):
        ff_paths = [
            ff_paths,
        ]
    for i in ff_paths:
        for ii in glob(i):
            store.append(ii)
    ff_store = []
    for path in store:
        ff_store.append(imread(path))
    ff = np.median(np.dstack(ff_store), axis=2)

    if exp_corr:
        """If a reference is taken at different exposure, or exposure is not stable over time,
        this will try to correct for it. Majority of image needs to be a backrgound.
        """
        def minimize_bg(img, ff, corr, perctile=50, weight=10):
            thres = np.percentile(img, perctile)
            res = img - corr * ff
            res = res[res < thres]
            return np.sum(res[res > 0]) - weight * np.sum(res[res < 0])

        """avoid having negative values yet suppress positive values in background region.
        """
        func = lambda x: minimize_bg(img, ff, x)
        if not hasattr(holder, 'bg_corr'):
            holder.bg_corr = 1.0
        ret = minimize(func, x0=holder.bg_corr, bounds=((0, None), ))
        holder.bg_corr = ret.x
        ff = ret.x * ff

    img = img - ff
    img[img < 0] = np.nan
    img = interpolate_nan(img)
    return img
Example #8
0
def caller(inputs, output, functions, params):
    make_dirs(output)
    logger.info("Functions {0} for {1} images.".format(functions, len(inputs)))

    for holder.frame, path in enumerate(inputs):
        img = imread(path)
        for function, param in zip(functions, params):
            func = getattr(segment_operation, function)
            img = func(img, **param)
        if isinstance(path, list) or isinstance(path, tuple):
            path = path[0]
        labels = clean_labels(img, radius)
        imsave(labels, output, path, dtype=np.int16)
        logger.info("\tframe {0}: {1} objects segmented.".format(
            holder.frame, len(np.unique(labels))))
Example #9
0
def caller(inputs, inputs_labels, output, functions, params):
    make_dirs(output)

    logger.info("Functions {0} for {1} images.".format(functions, len(inputs)))
    img = None
    for holder.frame, (path,
                       pathl) in enumerate(izip_longest(inputs,
                                                        inputs_labels)):
        if path is not None:
            img = imread(path)
        labels0 = lbread(pathl)
        for function, param in zip(functions, params):
            func = getattr(subdetect_operation, function)
            if img is not None:
                labels = func(labels0, img, **param)
            else:
                labels = func(labels0, **param)
        imsave(labels, output, pathl, dtype=np.int16)
        logger.info("\tframe {0} done.".format(holder.frame))
def flatfield_references(img,
                         ff_paths=['Pos0/img00.tif', 'Pos1/img01.tif'],
                         exp_corr=False,
                         per_frame=False):
    """
    Use empty images for background subtraction and illumination bias correction.
    Given multiple reference images, it will calculate median profile and use it for subtraction.
    If flatfield image has the same illumination pattern but different exposure to the img,
    turning on bg_align would calculate correction factor.

    if per_frame is True, each frame will be corrected by the reference from that frame. False = all frames used.

    ff_paths (str or List(str)): image path for flat fielding references.
                                 It can be single, multiple or path with wildcards.
        e.g.    ff_paths = "FF/img_000000000_YFP*"
                ff_paths = ["FF/img_01.tif", "FF/img_02.tif"]

    """
    if not hasattr(holder, 'ff_store'):
        if isinstance(ff_paths, str):
            ff_paths = [
                ff_paths,
            ]

        ff_store = []
        for i in ff_paths:
            temp_ff = []
            for ii in glob(i):
                temp_ff.append(imread(ii))
            ff_store.append(temp_ff)
        holder.ff_store = ff_store

        if per_frame:
            for sl in holder.ff_store:
                assert len(sl) == len(
                    holder.inputs
                ), 'Different number of reference frames than experimental frames'

    if per_frame:
        flattened = [sl[holder.frame] for sl in holder.ff_store]
    else:
        flattened = [l for sl in holder.ff_store for l in sl]

    ff = np.median(np.dstack(flattened), axis=2)

    if exp_corr:
        """If a reference is taken at different exposure, or exposure is not stable over time,
        this will try to correct for it. Majority of image needs to be a backrgound.
        """
        def minimize_bg(img, ff, corr, perctile=50, weight=10):
            thres = np.percentile(img, perctile)
            res = img - corr * ff
            res = res[res < thres]
            return np.sum(res[res > 0]) - weight * np.sum(res[res < 0])

        """avoid having negative values yet suppress positive values in background region.
        """
        func = lambda x: minimize_bg(img, ff, x)
        if not hasattr(holder, 'bg_corr'):
            holder.bg_corr = 1.0
        ret = minimize(func, x0=holder.bg_corr, bounds=((0, None), ))
        holder.bg_corr = ret.x
        ff = ret.x * ff

    img = img - ff
    img[img < 0] = np.nan
    img = interpolate_nan(img)

    #clear holder.ff_store if last frame
    if holder.path == holder.inputs[-1]:
        del holder.ff_store

    return img