Esempio n. 1
0
 def __init__(self, diffusivity, potential, diffusivity_prior=None, potential_prior=None, \
     minimum_diffusivity=None, positive_diffusivity=None, prior_include=None):
     # positive_diffusivity is for backward compatibility
     ChainArray.__init__(self, 'D', diffusivity, 'V', potential)
     self._diffusivity_prior = diffusivity_prior
     self._potential_prior = potential_prior
     self.minimum_diffusivity = minimum_diffusivity
     if minimum_diffusivity is None and positive_diffusivity is True:
         self.minimum_diffusivity = 0
     self.prior_include = prior_include
Esempio n. 2
0
def infer_smooth_DD(cells,
                    diffusivity_prior=None,
                    jeffreys_prior=False,
                    min_diffusivity=None,
                    max_iter=None,
                    epsilon=None,
                    rgrad=None,
                    **kwargs):

    # initial values
    index, reverse_index, n, dt_mean, D_initial, min_diffusivity, D_bounds, _ = \
        smooth_infer_init(cells, min_diffusivity=min_diffusivity, jeffreys_prior=jeffreys_prior)
    initial_drift = np.zeros((len(index), cells.dim), dtype=D_initial.dtype)
    drift_bounds = [(None, None)] * initial_drift.size  # no bounds
    dd = ChainArray('D', D_initial, 'drift', initial_drift)

    # gradient options
    grad_kwargs = get_grad_kwargs(epsilon=epsilon, **kwargs)

    # parametrize the optimization algorithm
    if min_diffusivity is not None:
        kwargs['bounds'] = D_bounds + drift_bounds
    if max_iter:
        options = kwargs.get('options', {})
        options['maxiter'] = max_iter
        kwargs['options'] = options

    # posterior function
    if rgrad in ('delta', 'delta1'):
        fun = dd_neg_posterior1
    else:
        if rgrad not in (None, 'grad', 'grad1', 'gradn'):
            warn('unsupported rgrad: {}'.format(rgrad), RuntimeWarning)
        fun = smooth_dd_neg_posterior

    # run the optimization
    #cell.cache = None # no cache needed
    localization_error = cells.get_localization_error(kwargs, 0.03, True)
    args = (dd, cells, localization_error, diffusivity_prior, jeffreys_prior,
            dt_mean, min_diffusivity, index, reverse_index, grad_kwargs)
    result = minimize(smooth_dd_neg_posterior,
                      dd.combined,
                      args=args,
                      **kwargs)

    # collect the result
    dd.update(result.x)
    D, drift = dd['D'], dd['drift']
    DD = pd.DataFrame(np.hstack((D[:,np.newaxis], drift)), index=index, \
        columns=[ 'diffusivity' ] + \
            [ 'drift ' + col for col in cells.space_cols ])

    return DD
Esempio n. 3
0
def infer_smooth_DF(cells,
                    diffusivity_prior=None,
                    potential_prior=None,
                    jeffreys_prior=False,
                    min_diffusivity=None,
                    max_iter=None,
                    epsilon=None,
                    **kwargs):

    # initial values
    index, reverse_index, n, dt_mean, D_initial, min_diffusivity, D_bounds, _ = \
        smooth_infer_init(cells, min_diffusivity=min_diffusivity, jeffreys_prior=jeffreys_prior)
    F_initial = np.zeros((len(index), cells.dim), dtype=D_initial.dtype)
    F_bounds = [(None, None)] * F_initial.size  # no bounds
    df = ChainArray('D', D_initial, 'F', F_initial)

    # gradient options
    grad_kwargs = {}
    if epsilon is not None:
        if compatibility:
            warn(
                'epsilon should be None for backward compatibility with InferenceMAP',
                RuntimeWarning)
        grad_kwargs['eps'] = epsilon

    # parametrize the optimization algorithm
    if min_diffusivity is not None:
        kwargs['bounds'] = D_bounds + F_bounds
    if max_iter:
        options = kwargs.get('options', {})
        options['maxiter'] = max_iter
        kwargs['options'] = options

    # run the optimization
    #cell.cache = None # no cache needed
    localization_error = cells.get_localization_error(kwargs, 0.03, True)
    args = (df, cells, localization_error, diffusivity_prior, potential_prior,
            jeffreys_prior, dt_mean, min_diffusivity, index, reverse_index,
            grad_kwargs)
    result = minimize(smooth_df_neg_posterior,
                      df.combined,
                      args=args,
                      **kwargs)

    # collect the result
    df.update(result.x)
    D, F = df['D'], df['F']
    DF = pd.DataFrame(np.concatenate((D[:,np.newaxis], F), axis=1), index=index, \
        columns=[ 'diffusivity' ] + \
            [ 'force ' + col for col in cells.space_cols ])

    return DF
Esempio n. 4
0
def infer_DD(cells,
             localization_error=None,
             jeffreys_prior=False,
             min_diffusivity=None,
             **kwargs):
    if isinstance(cells, Distributed):  # multiple cells
        localization_error = cells.get_localization_error(kwargs, 0.03, True, \
                localization_error=localization_error)
        if min_diffusivity is None:
            if jeffreys_prior:
                min_diffusivity = 0.01
            else:
                min_diffusivity = 0
        elif min_diffusivity is False:
            min_diffusivity = None
        args = (localization_error, jeffreys_prior, min_diffusivity)
        index, inferred = [], []
        for i in cells:
            cell = cells[i]
            index.append(i)
            inferred.append(infer_DD(cell, *args, **kwargs))
        any_cell = cell
        inferred = pd.DataFrame(np.stack(inferred, axis=0), \
            index=index, \
            columns=[ 'diffusivity' ] + \
                [ 'drift ' + col for col in any_cell.space_cols ])
        return inferred
    else:  # single cell
        cell = cells
        # sanity checks
        if not bool(cell):
            raise ValueError('empty cells')
        if cell.dr.shape[1] == 0:
            raise ValueError('translocation array has no column')
        if cell.dt.shape[1:]:
            raise ValueError(
                'time deltas are structured in multiple dimensions')
        # ensure that translocations are properly oriented in time
        if not np.all(0 < cell.dt):
            warn('translocation dts are non-positive', RuntimeWarning)
            cell.dr[cell.dt < 0] *= -1.
            cell.dt[cell.dt < 0] *= -1.
        #
        dt_mean = np.mean(cell.dt)
        D_initial = np.mean(cell.dr * cell.dr) / (2. * dt_mean)
        initial_drift = np.zeros(cell.dim, dtype=D_initial.dtype)
        dd = ChainArray('D', D_initial, 'drift', initial_drift)
        if min_diffusivity is not None:
            if 'bounds' in kwargs:
                print(kwargs['bounds'])
            kwargs['bounds'] = [(min_diffusivity, None)
                                ] + [(None, None)] * cell.dim
        #cell.cache = None # no cache needed
        result = minimize(dd_neg_posterior, dd.combined, \
            args=(dd, cell, localization_error, jeffreys_prior, dt_mean, min_diffusivity), \
            **kwargs)
        #dd.update(result.x)
        #return (dd['D'], dd['drift'])
        return result.x  # needless to split dd.combined into D and drift
Esempio n. 5
0
def infer_DF(cells, localization_error=None, jeffreys_prior=False, min_diffusivity=None, debug=False, \
        **kwargs):
    if isinstance(cells, Distributed): # multiple cells
        localization_error = cells.get_localization_error(kwargs, 0.03, True, \
                localization_error=localization_error)
        args = (localization_error, jeffreys_prior, min_diffusivity)
        index, inferred = [], []
        for i in cells:
            cell = cells[i]
            # sanity checks
            if not bool(cell):
                raise ValueError('empty cells')
            if cell.dr.shape[1] == 0:
                raise ValueError('translocation array has no column')
            if cell.dt.shape[1:]:
                raise ValueError('time deltas are structured in multiple dimensions')
            # ensure that translocations are properly oriented in time
            if not np.all(0 < cell.dt):
                warn('translocation dts are non-positive', RuntimeWarning)
                cell.dr[cell.dt < 0] *= -1.
                cell.dt[cell.dt < 0] *= -1.
            index.append(i)
            inferred.append(infer_DF(cell, *args, **kwargs))
        inferred = np.stack(inferred, axis=0)
        #D = inferred[:,0]
        #gradD = []
        #for i in index:
        #       gradD.append(cells.grad(i, D))
        #gradD = np.stack(gradD, axis=0)
        inferred = pd.DataFrame(inferred, \
            index=index, \
            columns=[ 'diffusivity' ] + \
                [ 'force ' + col for col in cells.space_cols ])
        #for j, col in enumerate(cells.space_cols):
        #       inferred['gradD '+col] = gradD[:,j]
        if debug:
            xy = np.vstack([ cells[i].center for i in index ])
            inferred = inferred.join(pd.DataFrame(xy, index=index, \
                columns=cells.space_cols))
        return inferred
    else: # single cell
        cell = cells
        dt_mean = np.mean(cell.dt)
        D_initial = np.mean(cell.dr * cell.dr) / (2. * dt_mean)
        F_initial = np.zeros(cell.dim, dtype=D_initial.dtype)
        df = ChainArray('D', D_initial, 'F', F_initial)
        if min_diffusivity is not False:
            if min_diffusivity is None:
                noise_dt = localization_error
                min_diffusivity = (1e-16 - noise_dt) / np.max(cell.dt)
            kwargs['bounds'] = [(min_diffusivity, None)] + [(None, None)] * cell.dim
        #cell.cache = None # no cache needed
        result = minimize(df_neg_posterior, df.combined, \
            args=(df, cell, localization_error, jeffreys_prior, dt_mean, min_diffusivity), \
            **kwargs)
        #df.update(result.x)
        #return (df['D'], df['F'])
        return result.x # needless to split df.combined into D and F
Esempio n. 6
0
def infer_smooth_DD(cells,
                    diffusivity_prior=None,
                    drift_prior=None,
                    jeffreys_prior=False,
                    min_diffusivity=None,
                    max_iter=None,
                    epsilon=None,
                    rgrad=None,
                    verbose=False,
                    **kwargs):

    # initial values
    localization_error = cells.get_localization_error(kwargs, 0.03, True)
    index, reverse_index, n, dt_mean, D_initial, min_diffusivity, D_bounds, _ = \
        smooth_infer_init(cells, min_diffusivity=min_diffusivity, jeffreys_prior=jeffreys_prior,
        sigma2=localization_error)
    initial_drift = np.zeros((len(index), cells.dim), dtype=D_initial.dtype)
    dd = ChainArray('D', D_initial, 'drift', initial_drift)

    # gradient options
    grad_kwargs = get_grad_kwargs(epsilon=epsilon, **kwargs)

    # parametrize the optimization algorithm
    default_lBFGSb_options = dict(maxiter=1e3, maxfun=1e10, ftol=1e-6)
    # in L-BFGS-B the number of iterations is usually very low (~10-100) while the number of
    # function evaluations is much higher (~1e4-1e5);
    # with maxfun defined, an iteration can stop anytime and the optimization may terminate
    # with an error message
    if min_diffusivity is None:
        options = {}
    else:
        drift_bounds = [(None, None)] * initial_drift.size  # no bounds
        kwargs['bounds'] = D_bounds + drift_bounds
        options = dict(default_lBFGSb_options)
    options.update(kwargs.pop('options', {}))
    if max_iter:
        options['maxiter'] = max_iter
    if verbose:
        options['disp'] = verbose
    if options:
        kwargs['options'] = options

    # posterior function
    if rgrad in ('delta', 'delta0', 'delta1'):
        fun = dd_neg_posterior1
    else:
        if rgrad not in (None, 'grad', 'grad1', 'gradn'):
            warn('unsupported rgrad: {}'.format(rgrad), RuntimeWarning)
        fun = smooth_dd_neg_posterior

    # run the optimization
    #cell.cache = None # no cache needed
    args = (dd, cells, localization_error, diffusivity_prior, drift_prior, jeffreys_prior, \
            dt_mean, min_diffusivity, index, reverse_index, grad_kwargs)
    result = minimize(fun, dd.combined, args=args, **kwargs)
    if not (result.success or verbose):
        warn('{}'.format(result.message), OptimizationWarning)

    # collect the result
    dd.update(result.x)
    D, drift = dd['D'], dd['drift']
    DD = pd.DataFrame(np.hstack((D[:,np.newaxis], drift)), index=index, \
        columns=[ 'diffusivity' ] + \
            [ 'drift ' + col for col in cells.space_cols ])

    return DD
Esempio n. 7
0
def infer_DD(
    cells, localization_error=None, jeffreys_prior=False, min_diffusivity=None, **kwargs
):
    if isinstance(cells, Distributed):  # multiple cells
        localization_error = cells.get_localization_error(
            kwargs, 0.03, True, localization_error=localization_error
        )
        args = (localization_error, jeffreys_prior, min_diffusivity)
        index, inferred = [], []
        for i in cells:
            cell = cells[i]
            index.append(i)
            inferred.append(infer_DD(cell, *args, **kwargs))
        any_cell = cell
        inferred = pd.DataFrame(
            np.stack(inferred, axis=0),
            index=index,
            columns=["diffusivity"] + ["drift " + col for col in any_cell.space_cols],
        )
        return inferred
    else:  # single cell
        cell = cells
        # sanity checks
        if not bool(cell):
            raise ValueError("empty cells")
        if cell.dr.shape[1] == 0:
            raise ValueError("translocation array has no column")
        if cell.dt.shape[1:]:
            raise ValueError("time deltas are structured in multiple dimensions")
        # ensure that translocations are properly oriented in time
        if not np.all(0 < cell.dt):
            warn("translocation dts are non-positive", RuntimeWarning)
            cell.dr[cell.dt < 0] *= -1.0
            cell.dt[cell.dt < 0] *= -1.0
        #
        dt_mean = np.mean(cell.dt)
        D_initial = np.mean(cell.dr * cell.dr) / (2.0 * dt_mean)
        initial_drift = np.zeros(cell.dim, dtype=D_initial.dtype)
        dd = ChainArray("D", D_initial, "drift", initial_drift)
        if min_diffusivity is not False:
            if min_diffusivity is None:
                noise_dt = localization_error
                min_diffusivity = (1e-16 - noise_dt) / np.max(cell.dt)
            kwargs["bounds"] = [(min_diffusivity, None)] + [(None, None)] * cell.dim
        # cell.cache = None # no cache needed
        result = minimize(
            dd_neg_posterior,
            dd.combined,
            args=(
                dd,
                cell,
                localization_error,
                jeffreys_prior,
                dt_mean,
                min_diffusivity,
            ),
            **kwargs
        )
        # dd.update(result.x)
        # return (dd['D'], dd['drift'])
        return result.x  # needless to split dd.combined into D and drift
Esempio n. 8
0
def infer_smooth_DF(cells,
                    diffusivity_prior=None,
                    force_prior=None,
                    potential_prior=None,
                    jeffreys_prior=False,
                    min_diffusivity=None,
                    max_iter=None,
                    epsilon=None,
                    rgrad=None,
                    verbose=False,
                    **kwargs):
    """
    Argument `potential_prior` is an alias for `force_prior` which penalizes the large force amplitudes.
    """

    # initial values
    localization_error = cells.get_localization_error(kwargs, 0.03, True)
    (
        index,
        reverse_index,
        n,
        dt_mean,
        D_initial,
        min_diffusivity,
        D_bounds,
        _,
    ) = smooth_infer_init(
        cells,
        min_diffusivity=min_diffusivity,
        jeffreys_prior=jeffreys_prior,
        sigma2=localization_error,
    )
    F_initial = np.zeros((len(index), cells.dim), dtype=D_initial.dtype)
    df = ChainArray("D", D_initial, "F", F_initial)

    # gradient options
    grad_kwargs = get_grad_kwargs(epsilon=epsilon, **kwargs)

    # parametrize the optimization algorithm
    default_lBFGSb_options = dict(maxiter=1e3, maxfun=1e10, ftol=1e-6)
    # in L-BFGS-B the number of iterations is usually very low (~10-100) while the number of
    # function evaluations is much higher (~1e4-1e5);
    # with maxfun defined, an iteration can stop anytime and the optimization may terminate
    # with an error message
    if min_diffusivity is None:
        options = {}
    else:
        F_bounds = [(None, None)] * F_initial.size  # no bounds
        kwargs["bounds"] = D_bounds + F_bounds
        options = dict(default_lBFGSb_options)
    options.update(kwargs.pop("options", {}))
    if max_iter:
        options["maxiter"] = max_iter
    if verbose:
        options["disp"] = verbose
    if options:
        kwargs["options"] = options

    # posterior function
    if rgrad in ("delta", "delta0", "delta1"):
        fun = df_neg_posterior1
    else:
        if rgrad not in (None, "grad", "grad1", "gradn"):
            warn("unsupported rgrad: {}".format(rgrad), RuntimeWarning)
        fun = smooth_df_neg_posterior

    if force_prior is None:
        if potential_prior is not None:
            raise ValueError("potential prior: {}".format(potential_prior))
            warn(
                "please use `force_prior` instead of `potential_prior`",
                PendingDeprecationWarning,
            )
        force_prior = potential_prior

    # run the optimization
    # cell.cache = None # no cache needed
    args = (
        df,
        cells,
        localization_error,
        diffusivity_prior,
        force_prior,
        jeffreys_prior,
        dt_mean,
        min_diffusivity,
        index,
        reverse_index,
        grad_kwargs,
    )
    result = interruptible_minimize(fun, df.combined, args=args, **kwargs)
    if not (result.success or verbose):
        warn("{}".format(result.message), OptimizationWarning)

    # collect the result
    df.update(result.x)
    D, F = df["D"], df["F"]
    DF = pd.DataFrame(
        np.concatenate((D[:, np.newaxis], F), axis=1),
        index=index,
        columns=["diffusivity"] + ["force " + col for col in cells.space_cols],
    )

    return DF