Esempio n. 1
0
def infer_smooth_DD(cells,
                    diffusivity_prior=None,
                    jeffreys_prior=False,
                    min_diffusivity=None,
                    max_iter=None,
                    epsilon=None,
                    rgrad=None,
                    **kwargs):

    # initial values
    index, reverse_index, n, dt_mean, D_initial, min_diffusivity, D_bounds, _ = \
        smooth_infer_init(cells, min_diffusivity=min_diffusivity, jeffreys_prior=jeffreys_prior)
    initial_drift = np.zeros((len(index), cells.dim), dtype=D_initial.dtype)
    drift_bounds = [(None, None)] * initial_drift.size  # no bounds
    dd = ChainArray('D', D_initial, 'drift', initial_drift)

    # gradient options
    grad_kwargs = get_grad_kwargs(epsilon=epsilon, **kwargs)

    # parametrize the optimization algorithm
    if min_diffusivity is not None:
        kwargs['bounds'] = D_bounds + drift_bounds
    if max_iter:
        options = kwargs.get('options', {})
        options['maxiter'] = max_iter
        kwargs['options'] = options

    # posterior function
    if rgrad in ('delta', 'delta1'):
        fun = dd_neg_posterior1
    else:
        if rgrad not in (None, 'grad', 'grad1', 'gradn'):
            warn('unsupported rgrad: {}'.format(rgrad), RuntimeWarning)
        fun = smooth_dd_neg_posterior

    # run the optimization
    #cell.cache = None # no cache needed
    localization_error = cells.get_localization_error(kwargs, 0.03, True)
    args = (dd, cells, localization_error, diffusivity_prior, jeffreys_prior,
            dt_mean, min_diffusivity, index, reverse_index, grad_kwargs)
    result = minimize(smooth_dd_neg_posterior,
                      dd.combined,
                      args=args,
                      **kwargs)

    # collect the result
    dd.update(result.x)
    D, drift = dd['D'], dd['drift']
    DD = pd.DataFrame(np.hstack((D[:,np.newaxis], drift)), index=index, \
        columns=[ 'diffusivity' ] + \
            [ 'drift ' + col for col in cells.space_cols ])

    return DD
Esempio n. 2
0
def infer_smooth_DF(cells,
                    diffusivity_prior=None,
                    potential_prior=None,
                    jeffreys_prior=False,
                    min_diffusivity=None,
                    max_iter=None,
                    epsilon=None,
                    **kwargs):

    # initial values
    index, reverse_index, n, dt_mean, D_initial, min_diffusivity, D_bounds, _ = \
        smooth_infer_init(cells, min_diffusivity=min_diffusivity, jeffreys_prior=jeffreys_prior)
    F_initial = np.zeros((len(index), cells.dim), dtype=D_initial.dtype)
    F_bounds = [(None, None)] * F_initial.size  # no bounds
    df = ChainArray('D', D_initial, 'F', F_initial)

    # gradient options
    grad_kwargs = {}
    if epsilon is not None:
        if compatibility:
            warn(
                'epsilon should be None for backward compatibility with InferenceMAP',
                RuntimeWarning)
        grad_kwargs['eps'] = epsilon

    # parametrize the optimization algorithm
    if min_diffusivity is not None:
        kwargs['bounds'] = D_bounds + F_bounds
    if max_iter:
        options = kwargs.get('options', {})
        options['maxiter'] = max_iter
        kwargs['options'] = options

    # run the optimization
    #cell.cache = None # no cache needed
    localization_error = cells.get_localization_error(kwargs, 0.03, True)
    args = (df, cells, localization_error, diffusivity_prior, potential_prior,
            jeffreys_prior, dt_mean, min_diffusivity, index, reverse_index,
            grad_kwargs)
    result = minimize(smooth_df_neg_posterior,
                      df.combined,
                      args=args,
                      **kwargs)

    # collect the result
    df.update(result.x)
    D, F = df['D'], df['F']
    DF = pd.DataFrame(np.concatenate((D[:,np.newaxis], F), axis=1), index=index, \
        columns=[ 'diffusivity' ] + \
            [ 'force ' + col for col in cells.space_cols ])

    return DF
Esempio n. 3
0
def infer_smooth_DD(cells,
                    diffusivity_prior=None,
                    drift_prior=None,
                    jeffreys_prior=False,
                    min_diffusivity=None,
                    max_iter=None,
                    epsilon=None,
                    rgrad=None,
                    verbose=False,
                    **kwargs):

    # initial values
    localization_error = cells.get_localization_error(kwargs, 0.03, True)
    index, reverse_index, n, dt_mean, D_initial, min_diffusivity, D_bounds, _ = \
        smooth_infer_init(cells, min_diffusivity=min_diffusivity, jeffreys_prior=jeffreys_prior,
        sigma2=localization_error)
    initial_drift = np.zeros((len(index), cells.dim), dtype=D_initial.dtype)
    dd = ChainArray('D', D_initial, 'drift', initial_drift)

    # gradient options
    grad_kwargs = get_grad_kwargs(epsilon=epsilon, **kwargs)

    # parametrize the optimization algorithm
    default_lBFGSb_options = dict(maxiter=1e3, maxfun=1e10, ftol=1e-6)
    # in L-BFGS-B the number of iterations is usually very low (~10-100) while the number of
    # function evaluations is much higher (~1e4-1e5);
    # with maxfun defined, an iteration can stop anytime and the optimization may terminate
    # with an error message
    if min_diffusivity is None:
        options = {}
    else:
        drift_bounds = [(None, None)] * initial_drift.size  # no bounds
        kwargs['bounds'] = D_bounds + drift_bounds
        options = dict(default_lBFGSb_options)
    options.update(kwargs.pop('options', {}))
    if max_iter:
        options['maxiter'] = max_iter
    if verbose:
        options['disp'] = verbose
    if options:
        kwargs['options'] = options

    # posterior function
    if rgrad in ('delta', 'delta0', 'delta1'):
        fun = dd_neg_posterior1
    else:
        if rgrad not in (None, 'grad', 'grad1', 'gradn'):
            warn('unsupported rgrad: {}'.format(rgrad), RuntimeWarning)
        fun = smooth_dd_neg_posterior

    # run the optimization
    #cell.cache = None # no cache needed
    args = (dd, cells, localization_error, diffusivity_prior, drift_prior, jeffreys_prior, \
            dt_mean, min_diffusivity, index, reverse_index, grad_kwargs)
    result = minimize(fun, dd.combined, args=args, **kwargs)
    if not (result.success or verbose):
        warn('{}'.format(result.message), OptimizationWarning)

    # collect the result
    dd.update(result.x)
    D, drift = dd['D'], dd['drift']
    DD = pd.DataFrame(np.hstack((D[:,np.newaxis], drift)), index=index, \
        columns=[ 'diffusivity' ] + \
            [ 'drift ' + col for col in cells.space_cols ])

    return DD
Esempio n. 4
0
def infer_smooth_DF(cells,
                    diffusivity_prior=None,
                    force_prior=None,
                    potential_prior=None,
                    jeffreys_prior=False,
                    min_diffusivity=None,
                    max_iter=None,
                    epsilon=None,
                    rgrad=None,
                    verbose=False,
                    **kwargs):
    """
    Argument `potential_prior` is an alias for `force_prior` which penalizes the large force amplitudes.
    """

    # initial values
    localization_error = cells.get_localization_error(kwargs, 0.03, True)
    (
        index,
        reverse_index,
        n,
        dt_mean,
        D_initial,
        min_diffusivity,
        D_bounds,
        _,
    ) = smooth_infer_init(
        cells,
        min_diffusivity=min_diffusivity,
        jeffreys_prior=jeffreys_prior,
        sigma2=localization_error,
    )
    F_initial = np.zeros((len(index), cells.dim), dtype=D_initial.dtype)
    df = ChainArray("D", D_initial, "F", F_initial)

    # gradient options
    grad_kwargs = get_grad_kwargs(epsilon=epsilon, **kwargs)

    # parametrize the optimization algorithm
    default_lBFGSb_options = dict(maxiter=1e3, maxfun=1e10, ftol=1e-6)
    # in L-BFGS-B the number of iterations is usually very low (~10-100) while the number of
    # function evaluations is much higher (~1e4-1e5);
    # with maxfun defined, an iteration can stop anytime and the optimization may terminate
    # with an error message
    if min_diffusivity is None:
        options = {}
    else:
        F_bounds = [(None, None)] * F_initial.size  # no bounds
        kwargs["bounds"] = D_bounds + F_bounds
        options = dict(default_lBFGSb_options)
    options.update(kwargs.pop("options", {}))
    if max_iter:
        options["maxiter"] = max_iter
    if verbose:
        options["disp"] = verbose
    if options:
        kwargs["options"] = options

    # posterior function
    if rgrad in ("delta", "delta0", "delta1"):
        fun = df_neg_posterior1
    else:
        if rgrad not in (None, "grad", "grad1", "gradn"):
            warn("unsupported rgrad: {}".format(rgrad), RuntimeWarning)
        fun = smooth_df_neg_posterior

    if force_prior is None:
        if potential_prior is not None:
            raise ValueError("potential prior: {}".format(potential_prior))
            warn(
                "please use `force_prior` instead of `potential_prior`",
                PendingDeprecationWarning,
            )
        force_prior = potential_prior

    # run the optimization
    # cell.cache = None # no cache needed
    args = (
        df,
        cells,
        localization_error,
        diffusivity_prior,
        force_prior,
        jeffreys_prior,
        dt_mean,
        min_diffusivity,
        index,
        reverse_index,
        grad_kwargs,
    )
    result = interruptible_minimize(fun, df.combined, args=args, **kwargs)
    if not (result.success or verbose):
        warn("{}".format(result.message), OptimizationWarning)

    # collect the result
    df.update(result.x)
    D, F = df["D"], df["F"]
    DF = pd.DataFrame(
        np.concatenate((D[:, np.newaxis], F), axis=1),
        index=index,
        columns=["diffusivity"] + ["force " + col for col in cells.space_cols],
    )

    return DF