예제 #1
0
def locs_glob_map(func, pattern, args=[], kwargs={}, extension=''):
    '''
    Maps a function to localization files, specified by a unix style path pattern.
    The function must take two arguments: locs and info. It may take additional args and kwargs which
    are supplied to this map function.
    A new locs file will be saved if an extension is provided. In that case the mapped function must return
    new locs and a new info dict.
    '''
    paths = _glob.glob(pattern)
    for path in paths:
        locs, info = _io.load_locs(path)
        result = func(locs, info, path, *args, **kwargs)
        if extension:
            base, ext = _ospath.splitext(path)
            out_path = base + '_' + extension + '.hdf5'
            locs, info = result
            _io.save_locs(out_path, locs, info)
예제 #2
0
def main(locs, info, path, **params):
    '''
    Cluster detection (pick) in localizations by thresholding in number of localizations per cluster.
    Cluster centers are determined by creating images of localization list with set oversampling using picasso.render.
    
    Args:
        locs(numpy.recarray):           (Undrifted) localization list as created by `picasso.render`_.
        info(list(dict)):               Info to localization list when loaded with picasso.io.load_locs().
    
    Keyword Arguments:
        pick_diameter(float=1.2):       Pick diameter in original pixels.
        oversampling(int=5):            Oversampling for rendering of localization list, i.e. sub-pixels per pixel of original image.
        min_n_locs(float=0.2*NoFrames): Detection threshold for number of localizations in cluster.
                                        Standard value is set for `spt`_. 
                                        Set to lower value for usual DNA-PAINT signal (see ``lbfcs``).
        fit_center(bool=False):         False = Center of mass. True = 2D Gaussian fitting of center.
        lbfcs(bool=False):              If set to True will overrun min_n_locs and sets it to 0.02*NoFrames.
        
    Returns:
        list:
            - [0] (dict):             kwargs passed to function.
            - [1] (pandas.DataFrame): Picked localizations, saved with extension _picked.hdf5.
            - [2] (pandas.DataFrame): Center positions and number of localizations per pick, saved with extension _autopick.yaml.
            - [3] (str):              Full save path.

    '''
    ### Path of file that is processed
    path = os.path.splitext(path)[0]

    ### Set standard conditions if not set as input
    NoFrames = info[0]['Frames']

    standard_params = {
        'pick_diameter': 1.2,
        'oversampling': 5,
        'min_n_locs': 0.2 * NoFrames,
        'fit_center': False,
    }
    ### If lbFCS given overrun min_n_locs
    try:
        if params['lbfcs'] == True:
            standard_params['min_n_locs'] = 0.02 * NoFrames
            standard_params['lbfcs'] = True
    except KeyError:
        pass

    ### Remove keys in params that are not needed
    for key, value in standard_params.items():
        try:
            params[key]
            if params[key] == None: params[key] = standard_params[key]
        except:
            params[key] = standard_params[key]

    ### Remove keys in params that are not needed
    delete_key = []
    for key, value in params.items():
        if key not in standard_params.keys():
            delete_key.extend([key])
    for key in delete_key:
        del params[key]

    ### Procsessing marks: generatedby
    params['generatedby'] = 'picasso_addon.autopick.main()'

    print('Minimum number of localizations in pick set to %i' %
          (params['min_n_locs']))

    ### Check if locs is given as numpy.recarray as created by picasso.localize
    if type(locs) is not np.recarray:
        raise SystemExit('locs must be given as numpy.recarray')

    ### Render locs
    print('Rendering locs for pick detection ...(oversampling = %i)' %
          params['oversampling'])
    image = render.render(
        locs,
        info,
        oversampling=params['oversampling'],
    )[1]

    ### Get pick centers in image coordinates ...
    ### ... i.e. first define correct box size for given pick_diameter
    pick_box_half = params['pick_diameter'] * 0.5 * params[
        'oversampling']  # Half the box in oversampled size
    pick_box_half = np.floor(pick_box_half - 0.5)  #
    pick_box = int(
        2 * pick_box_half +
        1)  # Mutliply half by 2x and add 1 to get uneven integer for box
    pick_box = max(3, pick_box)  # Ensure that box >= 3
    print('Identifiying valid picks ...(box = %i)' % pick_box)

    centers_image, fit = spotcenters_in_image(image,
                                              pick_box,
                                              params['min_n_locs'],
                                              fit=params['fit_center'])
    params['fit_center'] = fit  # Update if it was not successful!

    ### Convert image coordinate centers to original values as in locs
    centers = coordinate_convert(
        centers_image,
        (0, 0),
        params['oversampling'],
    )
    ### Save converted centers as picks.yaml for later usage in picasso.render
    addon_io.save_picks(
        centers,
        params['pick_diameter'],
        path + '_autopick.yaml',
    )

    ### Query locs for centers
    print('Build up and query KDtree ...(pick_diameter = %.1f)' %
          params['pick_diameter'])
    picks_idx = query_locs_for_centers(
        locs,
        centers[['x', 'y']].values,
        pick_radius=params['pick_diameter'] / 2,
    )
    ### Assign group ID to locs
    print('Assigning %i groups ...' % len(picks_idx))
    locs_picked = get_picked(
        locs,
        picks_idx,
    )

    ### Apply Chung-Kennedy local mean filter to photons of each group
    print('Applying Chung-Kennedy filter ...')
    tqdm.pandas()
    locs_picked = locs_picked.groupby('group').progress_apply(
        lambda df: df.assign(photons_ck=ck_nlf(df.photons.values).astype(
            np.float32)))

    ### Save locs_picked as .hdf5 and info+params as .yaml
    print('Saving _picked ...')
    info_picked = info.copy() + [params]  # Update info
    next_path = path + '_picked.hdf5'  # Update path
    io.save_locs(
        next_path,
        locs_picked.to_records(index=False),
        info_picked,
    )

    return [params, locs_picked, centers, next_path]
예제 #3
0
def main(file, info, path, **params):
    '''
    Localize movie (least squares, GPU fitting if available) and undrift resulting localizations using rcc.
     
    Args:
        file(picasso.io):          Either raw movie loaded with picasso.io.load_movie() or_locs.hdf5 loaded with picasso.io.load_locs()
        info(list(dicts)):         Info to raw movie/_locs.hdf5 loaded with picasso.io.load_movie() or picasso.io.load_locs()
    
    Keyword Arguments:
        calibrate_movie(bool=True): Pixel calibration of raw movie using offset and gain maps for CMOS sensors
        localize(bool=True):        Localize raw or calibrated movie (CMOS) (see picasso_addon.localize)
        box(int=9):                 Box length (uneven!) of fitted spots (see picasso.localize)
        mng(int or str='auto'):     Minimal net-gradient spot detection threshold(see picasso.localize. If set to 'auto' minimal net_gradient is determined by autodetect_mng().
        baseline(int=113):          Camera spec. baseline (see picasso.localize).
        gain(float=1):              Camera spec. EM gain (see picasso.localize)
        sensitivity(float=0.49):    Camera spec. sensitivity (see picasso.localize)
        qe(float=1):                Camera spec. quantum gain (see picasso.localize), set to 1 to count generated photoelectrons.
        
        undrift(bool=True):              Apply RCC drift correction (see picasso.postprocess)
        segments(int=450 or str='auto'): Segment length (frames) for undrifting by RCC (see picasso.render). If set to 'auto' segment length is ```int(ceil(NoFrames))```.
    
    Returns: 
        list:
        - [0][0](dict):     kwargs passed to localize_movie()
        - [0][1](dict):     kwargs passed to undriftrcc_locs()
        - [1](numpy.array): Undrifted localization list saved as _render.hdf5 (if ``undrift`` was ``True``) otherwise localization list (no undrifting) saved as _locs.hdf5
        - [2](str):         Last full file saving path for input to further modules
    '''
    ############################# Set standard paramters if not given with function call
    standard_params = {
        'use_maps': False,
        'localize': True,
        'box': 5,
        'mng': 'auto',
        'baseline': 113,
        'sensitivity': 0.49,
        'qe': 1,
        'gain': 1,
        'weight_fit': False,
        'undrift': True,
        'segments': 450,
    }
    for key, value in standard_params.items():
        try:
            params[key]
            if params[key] == None: params[key] = standard_params[key]
        except:
            params[key] = standard_params[key]

    #############################  Path of file that is processed
    print()
    path = os.path.splitext(path)[0]

    ############################# Localize
    if params['localize'] == True:

        file = np.array(
            file,
            dtype=np.float32)  # Convert from io.TiffMultiMap to numpy.array

        if params['use_maps']:  ### Calibrate movie using px-maps (CMOS)
            offset = picasso_addon.load_offset_map()  # Load offset map
            gain = picasso_addon.load_gain_map()  # Load gain map
            print('Converting movie to e- using maps ...')
            file = (file - offset[np.newaxis, :, :]
                    ) / gain[np.newaxis, :, :]  # Calibrate movie

        else:  ### Just conversion to e- using standard settings
            offset = params['baseline']  # Equal offset for every px
            gain = (1 / params['sensitivity'])  # Equal gain for every px
            print('Converting movie to e- ...')
            file = (
                file - offset
            ) / gain  # Substract median offset and multiply by median sensitivity

        ### Autodetect mng
        if params['mng'] == 'auto':
            params['mng'] = autodetect_mng(file, info, params['box'])
            params['auto_mng'] = True
            print('Minimum net-gradient set to %i' % (params['mng']))

        ### Localize movie
        locs = localize_movie(
            file,
            params['box'],
            params['mng'],
            0,  # Baseline set to 0 since movie already converted to e-
            1,  # Sensitivity set to 1 since movie already converted to e-
            params['qe'],
            params['gain'],
            params['weight_fit'])

        ### Save _locs and yaml
        print('Saving _locs ...')
        params_localize = params.copy()
        for key in ['undrift', 'segments']:
            params_localize.pop(key)  # Remove keys for undrifting
        info = info + [params_localize]  # Update info
        next_path = path + '_locs.hdf5'  # Update path
        io.save_locs(
            next_path,
            locs,
            info,
        )

        ### Update path
        next_path = os.path.splitext(next_path)[0]  # Remove file extension

    else:
        locs = file
        next_path = path

    ############################## Undrift
    if params['undrift'] == True:
        if params['segments'] == 'auto':
            NoFrames = info[0]['Frames']
            params['segments'] = int(np.ceil(NoFrames / 10))
        try:
            drift, locs_undrift = postprocess.undrift(locs,
                                                      info,
                                                      params['segments'],
                                                      display=False)
            ### Save _locs_render and yaml
            print('Saving _render ...')
            info = info + [{'segments': params['segments']}]  # Update info
            next_path = next_path + '_render.hdf5'  # Update path
            io.save_locs(
                next_path,
                locs_undrift,
                info,
            )
        except:
            print('Undrifting by RCC was not possible')
            locs_undrift = locs

    else:
        print('No undrifting')
        locs_undrift = locs

    return [info, locs_undrift]
예제 #4
0
def main(locs, info, path, **params):
    '''
    Link localizations using trackpy.link_df() (`trackpy`_) on localizations (`picasso.localize`_) with given search_range and 
    memory to get trajectories sorted by group and frame. All tracks shorter or equal to 10 frames are removed. Trajectories 
    will be saved as ``'_picked%i%i.hdf5'%(search_range,memory)`` with corresponding info as .yaml.
    
    Args:
        locs(pandas.DataFrame):    Localization list, i.e. _locs.hdf5 as in `picasso.localize`_
        info(list):                Info _locs.yaml to _locs.hdf5 localizations as list of dictionaries.
        path(str):                 Path to _locs.hdf5 file.
        
    Keyword Args:
        search_range(int):             Localizations within search_range (spatial) will be connected to tracks (see trackpy.link_df)
        memory(int):                   Localizations within memory (temporal) will be connected to tracks (see trackpy.link_df)
    
    Returns:
        list:
            
        - [0](dict):             Dict of keyword arguments passed to function.
        - [1](pandas.DataFrame): Trajectories by application of trackpy.link_df(). See above.
    '''

    ##################################### Params and file handling

    ### Path of file that is processed and number of frames
    path = os.path.splitext(path)[0]

    ### Define standard
    standard_params = {
        'search_range': 5,
        'memory': 3,
    }
    ### Remove keys in params that are not needed
    for key, value in standard_params.items():
        try:
            params[key]
            if params[key] == None: params[key] = standard_params[key]
        except:
            params[key] = standard_params[key]

    ### Remove keys in params that are not needed
    delete_key = []
    for key, value in params.items():
        if key not in standard_params.keys():
            delete_key.extend([key])
    for key in delete_key:
        del params[key]

    ### Processing marks
    params['generatedby'] = 'spt.linklocs.get_link()'

    ### Prepare file extension for saving
    sr = '%i%i' % (
        int(params['search_range'] / 10),
        np.mod(params['search_range'], 10),
    )
    mr = '%i%i' % (
        int(params['memory'] / 10),
        np.mod(params['memory'], 10),
    )

    ##################################### Link
    link = get_link(
        locs,
        search_range=params['search_range'],
        memory=params['memory'],
    )

    ##################################### Save
    #### Save complete link as _picked
    info_picked = info.copy() + [params]
    io.save_locs(
        path + '_picked%s%s.hdf5' % (sr, mr),
        link.to_records(index=False),
        info_picked,
    )

    ### Save reduced version (only first 500 groups!) of link for viewing in render
    try:
        max_group = link.group.unique()[500]
    except:
        max_group = link.group.unique()[-1]
    link_view = link[link.group <= max_group]

    io.save_locs(
        path + '_picked%s%s' % (sr, mr) + 'g500.hdf5',
        link_view.to_records(index=False),
        info_picked,
    )

    return [params, link]
예제 #5
0
                cmap='magma',
                vmin=c_min,
                vmax=c_max,
                interpolation='nearest',
                origin='lower')
### Reference lines
ax_ref.axvline(x_center)
ax_ref.axhline(y_center)
ax_shift.axvline(x_center)
ax_shift.axhline(y_center)
### Limits
ax_ref.set_xlim(x_center - x_width / 2, x_center + x_width / 2)
ax_ref.set_ylim(y_center - y_width / 2, y_center + y_width / 2)
ax_shift.set_xlim(x_center - x_width / 2, x_center + x_width / 2)
ax_shift.set_ylim(y_center - y_width / 2, y_center + y_width / 2)

#%%
############################################# Save
info_shift = info.copy()
addDict = {
    'reference': path[0],
    'dx': dx,
    'dy': dy,
    'extension': '_locs_render'
}
io.save_locs(
    path[1].replace('.hdf5', '_prealign.hdf5'),
    locs_shift,
    info_shift + [addDict],
)