Пример #1
0
                ts[solver][i] = Timer(
                    lambda: foopsi(y, g=[g], lam=2.4, solver=solver)).timeit(
                        number=runs) / runs
            except SolverError:
                print("The solver " + solver +
                      " is actually not installed, hence skipping it.")
                break
constrained_ts = {}
for solver in solvers[:-1]:  # GUROBI failed
    constrained_ts[solver] = np.nan * np.zeros(N)
    print(
        'running %7s with p=1 and optimizing lambda such that noise constraint is tight'
        % solver)
    for i, y in enumerate(Y):
        if solver == 'OASIS':
            constrained_ts[solver][i] = Timer(lambda: constrained_oasisAR1(
                y, g=g, sn=sn)).timeit(number=runs) / runs
        else:
            try:
                constrained_ts[solver][i] = Timer(lambda: constrained_foopsi(
                    y, g=[g], sn=sn, solver=solver)).timeit(number=runs) / runs
            except SolverError:
                print("The solver " + solver +
                      " is actually not installed, hence skipping it.")
                break
constrained_ts['GUROBI'] = np.zeros(N) * np.nan  # GUROBI failed

# plot
fig = plt.figure(figsize=(7, 5))
fig.add_axes([.14, .17, .79, .82])
plt.errorbar(range(len(solvers)), [np.mean(ts[s]) for s in solvers],
             [np.std(ts[s]) / np.sqrt(N) for s in solvers],
Пример #2
0
def deconvolve(y,
               g=(None, ),
               sn=None,
               b=None,
               optimize_g=0,
               penalty=0,
               **kwargs):
    """Infer the most likely discretized spike train underlying an fluorescence trace

    Solves the noise constrained sparse non-negative deconvolution problem
    min |s|_q subject to |c-y|^2 = sn^2 T and s = Gc >= 0
    where q is either 1 or 0, rendering the problem convex or non-convex.

    Parameters:
    -----------
    y : array, shape (T,)
        Fluorescence trace.
    g : tuple of float, optional, default (None,)
        Parameters of the autoregressive model, cardinality equivalent to p.
        Estimated from the autocovariance of the data if no value is given.
    sn : float, optional, default None
        Standard deviation of the noise distribution.  If no value is given, 
        then sn is estimated from the data based on power spectral density if not provided.
    b : float, optional, default None
        Fluorescence baseline value. If no value is given, then b is optimized.
    optimize_g : int, optional, default 0
        Number of large, isolated events to consider for optimizing g.
        If optimize_g=0 the provided or estimated g is not further optimized.
    penalty : int, optional, default 1
        Sparsity penalty. 1: min |s|_1  0: min |s|_0
    kwargs : dict
        Further keywords passed on to constrained_oasisAR1 or constrained_onnlsAR2.

    Returns:
    --------
    c : array, shape (T,)
        The inferred denoised fluorescence signal at each time-bin.
    s : array, shape (T,)
        Discretized deconvolved neural activity (spikes).
    b : float
        Fluorescence baseline value.
    g : tuple of float
        Parameters of the AR(2) process that models the fluorescence impulse response.
    lam: float
        Optimal Lagrange multiplier for noise constraint under L1 penalty
    """

    if g[0] is None or sn is None:
        est = estimate_parameters(y, p=len(g), fudge_factor=.98)
        if g[0] is None:
            g = est[0]
        if sn is None:
            sn = est[1]
    if len(g) == 1:
        return constrained_oasisAR1(y,
                                    g[0],
                                    sn,
                                    optimize_b=True if b is None else False,
                                    optimize_g=optimize_g,
                                    penalty=penalty,
                                    **kwargs)
    elif len(g) == 2:
        if optimize_g > 0:
            raise NotImplementedError(
                'Optimization of AR parameters currenty only supported for AR(1)'
            )
        return constrained_onnlsAR2(y,
                                    g,
                                    sn,
                                    optimize_b=True if b is None else False,
                                    optimize_g=optimize_g,
                                    penalty=penalty,
                                    **kwargs)
    else:
        print 'g must have length 1 or 2, cause only AR(1) and AR(2) are currently implemented'
Пример #3
0
def constrained_onnlsAR2(y,
                         g,
                         sn,
                         optimize_b=True,
                         optimize_g=0,
                         decimate=5,
                         shift=100,
                         window=200,
                         tol=1e-9,
                         max_iter=1,
                         penalty=1):
    """ Infer the most likely discretized spike train underlying an AR(2) fluorescence trace

    Solves the noise constrained sparse non-negative deconvolution problem
    min |s|_1 subject to |c-y|^2 = sn^2 T and s_t = c_t-g1 c_{t-1}-g2 c_{t-2} >= 0

    Parameters
    ----------
    y : array of float
        One dimensional array containing the fluorescence intensities (with baseline
        already subtracted) with one entry per time-bin.
    g : (float, float)
        Parameters of the AR(2) process that models the fluorescence impulse response.
    sn : float
        Standard deviation of the noise distribution.
    optimize_b : bool, optional, default True
        Optimize baseline if True else it is set to 0, see y.
    optimize_g : int, optional, default 0
        Number of large, isolated events to consider for optimizing g.
        No optimization if optimize_g=0.
    decimate : int, optional, default 5
        Decimation factor for estimating hyper-parameters faster on decimated data.
    max_iter : int, optional, default 1
        Maximal number of iterations.
    penalty : int, optional, default 1
        Sparsity penalty. 1: min |s|_1  0: min |s|_0

    Returns
    -------
    c : array of float
        The inferred denoised fluorescence signal at each time-bin.
    s : array of float
        Discretized deconvolved neural activity (spikes).
    b : float
        Fluorescence baseline value.
    (g1, g2) : tuple of float
        Parameters of the AR(2) process that models the fluorescence impulse response.
    lam : float
        Sparsity penalty parameter lambda of dual problem.

    References
    ----------
    * Friedrich J and Paninski L, NIPS 2016
    * Friedrich J, Zhou P, and Paninski L, arXiv 2016
    """

    T = len(y)
    d = (g[0] + sqrt(g[0] * g[0] + 4 * g[1])) / 2
    r = (g[0] - sqrt(g[0] * g[0] + 4 * g[1])) / 2
    if not optimize_g:
        g11 = (np.exp(log(d) * np.arange(1, T + 1)) -
               np.exp(log(r) * np.arange(1, T + 1))) / (d - r)
        g12 = np.append(0, g[1] * g11[:-1])
        g11g11 = np.cumsum(g11 * g11)
        g11g12 = np.cumsum(g11 * g12)
        Sg11 = np.cumsum(g11)
        f_lam = 1 - g[0] - g[1]
    thresh = sn * sn * T
    # get initial estimate of b and lam on downsampled data using AR1 model
    if decimate > 0:
        _, s, b, aa, lam = constrained_oasisAR1(y.reshape(-1,
                                                          decimate).mean(1),
                                                d**decimate,
                                                sn / sqrt(decimate),
                                                optimize_b=optimize_b,
                                                optimize_g=optimize_g)
        if optimize_g > 0:
            d = aa**(1. / decimate)
            g[0] = d + r
            g[1] = -d * r
            g11 = (np.exp(log(d) * np.arange(1, T + 1)) -
                   np.exp(log(r) * np.arange(1, T + 1))) / (d - r)
            g12 = np.append(0, g[1] * g11[:-1])
            g11g11 = np.cumsum(g11 * g11)
            g11g12 = np.cumsum(g11 * g12)
            Sg11 = np.cumsum(g11)
            f_lam = 1 - g[0] - g[1]
        lam *= (1 - d**decimate) / f_lam
        ff = np.hstack([
            a * decimate + np.arange(-decimate, decimate)
            for a in np.where(s > 1e-6)[0]
        ])  # this window size seems necessary and sufficient
        ff = np.unique(ff[(ff >= 0) * (ff < T)])
        mask = np.zeros(T, dtype=bool)
        mask[ff] = True
    else:
        b = np.percentile(y, 15) if optimize_b else 0
        lam = 2 * sn * np.linalg.norm(g11)
        mask = None
    # run ONNLS
    c, s = onnls(y - b, g, lam=lam, mask=mask)

    if not optimize_b:  # don't optimize b, just the dual variable lambda
        for i in range(max_iter - 1):
            res = y - c
            RSS = res.dot(res)
            if np.abs(RSS - thresh) < 1e-4:
                break
            # calc shift dlam, here attributed to sparsity penalty
            tmp = np.empty(T)
            ls = np.append(np.where(s > 1e-6)[0], T)
            l = ls[0]
            tmp[:l] = (1 + d) / (1 + d**l) * np.exp(
                log(d) * np.arange(l))  # first pool
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f - 1
                # if and elif correct last 2 time points for |s|_1 instead |c|_1
                if i == len(ls) - 2:  # last pool
                    tmp[f] = (1. / f_lam if l == 0 else
                              (Sg11[l] + g[1] / f_lam * g11[l - 1] +
                               (g[0] + g[1]) / f_lam * g11[l] -
                               g11g12[l] * tmp[f - 1]) / g11g11[l])
                # secondlast pool if last one has length 1
                elif i == len(ls) - 3 and ls[-2] == T - 1:
                    tmp[f] = (Sg11[l] + g[1] / f_lam * g11[l] -
                              g11g12[l] * tmp[f - 1]) / g11g11[l]
                else:  # all other pools
                    tmp[f] = (Sg11[l] - g11g12[l] * tmp[f - 1]) / g11g11[l]
                l += 1
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]

            aa = tmp.dot(tmp)
            bb = res.dot(tmp)
            cc = RSS - thresh
            try:
                db = (-bb + sqrt(bb * bb - aa * cc)) / aa
            except:
                os.write(1, 'shit happens\n')
                db = -bb / aa
            # perform shift
            b += db
            c, s = onnls(y - b, g, lam=lam, mask=mask)
            db = np.mean(y - c) - b
            b += db
            lam -= db / f_lam

    else:  # optimize b
        db = np.mean(y - c) - b
        b += db
        lam -= db / (1 - g[0] - g[1])
        for i in range(max_iter - 1):
            res = y - c - b
            RSS = res.dot(res)
            if np.abs(RSS - thresh) < 1e-4:
                break
            # calc shift db, here attributed to baseline
            tmp = np.empty(T)
            ls = np.append(np.where(s > 1e-6)[0], T)
            l = ls[0]
            tmp[:l] = (1 + d) / (1 + d**l) * np.exp(
                log(d) * np.arange(l))  # first pool
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f
                tmp[f] = (Sg11[l - 1] -
                          g11g12[l - 1] * tmp[f - 1]) / g11g11[l - 1]
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
            tmp -= tmp.mean()
            aa = tmp.dot(tmp)
            bb = res.dot(tmp)
            cc = RSS - thresh
            try:
                db = (-bb + sqrt(bb * bb - aa * cc)) / aa
            except:
                os.write(1, 'shit happens\n')
                db = -bb / aa
            # perform shift
            b += db
            c, s = onnls(y - b, g, lam=lam, mask=mask)
            db = np.mean(y - c) - b
            b += db
            lam -= db / f_lam

    if penalty == 0:  # get (locally optimal) L0 solution

        def c4smin(y, s, s_min):
            ls = np.append(np.where(s > s_min)[0], T)
            tmp = np.zeros_like(s)
            l = ls[0]  # first pool
            tmp[:l] = max(
                0,
                np.exp(log(d) * np.arange(l)).dot(y[:l]) * (1 - d * d) /
                (1 - d**(2 * l))) * np.exp(log(d) * np.arange(l))
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f
                tmp[f] = (g11[:l].dot(y[f:f + l]) -
                          g11g12[l - 1] * tmp[f - 1]) / g11g11[l - 1]
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
            return tmp

        spikesizes = np.sort(s[s > 1e-6])
        i = len(spikesizes) / 2
        l = 0
        u = len(spikesizes) - 1
        while u - l > 1:
            s_min = spikesizes[i]
            tmp = c4smin(y - b, s, s_min)
            res = y - b - tmp
            RSS = res.dot(res)
            if RSS < thresh or i == 0:
                l = i
                i = (l + u) / 2
                res0 = tmp
            else:
                u = i
                i = (l + u) / 2
        if i > 0:
            c = res0
            s = np.append([0, 0], c[2:] - g[0] * c[1:-1] - g[1] * c[:-2])

    return c, s, b, g, lam
Пример #4
0
def constrained_onnlsAR2(y,
                         g,
                         sn,
                         optimize_b=True,
                         b_nonneg=True,
                         optimize_g=0,
                         decimate=5,
                         shift=100,
                         window=None,
                         tol=1e-9,
                         max_iter=1,
                         penalty=1):
    """ Infer the most likely discretized spike train underlying an AR(2) fluorescence trace

    Solves the noise constrained sparse non-negative deconvolution problem
    min |s|_1 subject to |c-y|^2 = sn^2 T and s_t = c_t-g1 c_{t-1}-g2 c_{t-2} >= 0

    Parameters
    ----------
    y : array of float
        One dimensional array containing the fluorescence intensities (with baseline
        already subtracted) with one entry per time-bin.
    g : (float, float)
        Parameters of the AR(2) process that models the fluorescence impulse response.
    sn : float
        Standard deviation of the noise distribution.
    optimize_b : bool, optional, default True
        Optimize baseline if True else it is set to 0, see y.
    b_nonneg: bool, optional, default True
        Enforce strictly non-negative baseline if True.
    optimize_g : int, optional, default 0
        Number of large, isolated events to consider for optimizing g.
        No optimization if optimize_g=0.
    decimate : int, optional, default 5
        Decimation factor for estimating hyper-parameters faster on decimated data.
    shift : int, optional, default 100
        Number of frames by which to shift window from on run of NNLS to the next.
    window : int, optional, default None (200 or larger dependend on g)
        Window size.
    tol : float, optional, default 1e-9
        Tolerance parameter.
    max_iter : int, optional, default 1
        Maximal number of iterations.
    penalty : int, optional, default 1
        Sparsity penalty. 1: min |s|_1  0: min |s|_0

    Returns
    -------
    c : array of float
        The inferred denoised fluorescence signal at each time-bin.
    s : array of float
        Discretized deconvolved neural activity (spikes).
    b : float
        Fluorescence baseline value.
    (g1, g2) : tuple of float
        Parameters of the AR(2) process that models the fluorescence impulse response.
    lam : float
        Sparsity penalty parameter lambda of dual problem.

    References
    ----------
    * Friedrich J and Paninski L, NIPS 2016
    * Friedrich J, Zhou P, and Paninski L, PLOS Computational Biology 2017
    """

    T = len(y)
    d = (g[0] + sqrt(g[0] * g[0] + 4 * g[1])) / 2
    r = (g[0] - sqrt(g[0] * g[0] + 4 * g[1])) / 2
    if window is None:
        window = int(min(T, max(200, -5 / log(d))))
    if not optimize_g:
        g11 = (np.exp(log(d) * np.arange(1, T + 1)) * np.arange(1, T + 1)) \
            if d == r else \
            (np.exp(log(d) * np.arange(1, T + 1)) -
             np.exp(log(r) * np.arange(1, T + 1))) / (d - r)
        g12 = np.append(0, g[1] * g11[:-1])
        g11g11 = np.cumsum(g11 * g11)
        g11g12 = np.cumsum(g11 * g12)
        Sg11 = np.cumsum(g11)
        f_lam = 1 - g[0] - g[1]
    elif decimate == 0:  # need to run AR1 anyways for estimating AR coeffs
        decimate = 1
    thresh = sn * sn * T
    # get initial estimate of b and lam on downsampled data using AR1 model
    if decimate > 0:
        _, s, b, aa, lam = constrained_oasisAR1(
            y[:len(y) // decimate * decimate].reshape(-1, decimate).mean(1),
            d**decimate,
            sn / sqrt(decimate),
            optimize_b=optimize_b,
            b_nonneg=b_nonneg,
            optimize_g=optimize_g)
        if optimize_g:
            d = aa**(1. / decimate)
            if decimate > 1:
                s = oasisAR1(y - b, d, lam=lam * (1 - aa) / (1 - d))[1]
            r = estimate_time_constant(s, 1, fudge_factor=.98)[0]
            g[0] = d + r
            g[1] = -d * r
            g11 = (np.exp(log(d) * np.arange(1, T + 1)) -
                   np.exp(log(r) * np.arange(1, T + 1))) / (d - r)
            g12 = np.append(0, g[1] * g11[:-1])
            g11g11 = np.cumsum(g11 * g11)
            g11g12 = np.cumsum(g11 * g12)
            Sg11 = np.cumsum(g11)
            f_lam = 1 - g[0] - g[1]
        elif decimate > 1:
            s = oasisAR1(y - b, d, lam=lam * (1 - aa) / (1 - d))[1]
        lam *= (1 - d**decimate) / f_lam
        # s = oasisAR1(s, r)[1]
        # this window size seems necessary and sufficient
        ff = np.ravel(
            [a + np.arange(-2, 2) for a in np.where(s > s.max() / 10.)[0]])
        ff = np.unique(ff[(ff >= 0) * (ff < T)]).astype(int)
        mask = np.zeros(T, dtype=bool)
        mask[ff] = True
    else:
        b = np.percentile(y, 15) if optimize_b else 0
        lam = 2 * sn * np.linalg.norm(g11)
        mask = None
    if b_nonneg:
        b = max(b, 0)
    # run ONNLS
    c, s = onnls(y - b,
                 g,
                 lam=lam,
                 mask=mask,
                 shift=shift,
                 window=window,
                 tol=tol)
    g_converged = False
    if not optimize_b:  # don't optimize b, just the dual variable lambda and g if optimize_g
        for i in range(max_iter - 1):
            res = y - c
            RSS = res.dot(res)
            if np.abs(RSS - thresh) < 1e-4:
                break
            # calc shift dlam, here attributed to sparsity penalty
            tmp = np.empty(T)
            ls = np.append(np.where(s > 1e-6)[0], T)
            l = ls[0]
            tmp[:l] = (1 + d) / (1 + d**l) * np.exp(
                log(d) * np.arange(l))  # first pool
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f - 1
                # if and elif correct last 2 time points for |s|_1 instead |c|_1
                if i == len(ls) - 2:  # last pool
                    tmp[f] = (1. / f_lam if l == 0 else
                              (Sg11[l] + g[1] / f_lam * g11[l - 1] +
                               (g[0] + g[1]) / f_lam * g11[l] -
                               g11g12[l] * tmp[f - 1]) / g11g11[l])
                # secondlast pool if last one has length 1
                elif i == len(ls) - 3 and ls[-2] == T - 1:
                    tmp[f] = (Sg11[l] + g[1] / f_lam * g11[l] -
                              g11g12[l] * tmp[f - 1]) / g11g11[l]
                else:  # all other pools
                    tmp[f] = (Sg11[l] - g11g12[l] * tmp[f - 1]) / g11g11[l]
                l += 1
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]

            aa = tmp.dot(tmp)
            bb = res.dot(tmp)
            cc = RSS - thresh
            try:
                dlam = (-bb + sqrt(bb * bb - aa * cc)) / aa
            except:
                dlam = -bb / aa
            # perform shift
            lam += dlam / f_lam
            c, s = onnls(y,
                         g,
                         lam=lam,
                         mask=mask,
                         shift=shift,
                         window=window,
                         tol=tol)

            # update g
            if optimize_g and (not g_converged):
                lengths = np.where(s)[0][1:] - np.where(s)[0][:-1]

                def getRSS(y, opt):
                    ld, lr = opt
                    if ld < lr:
                        return 1e3 * thresh
                    d, r = exp(ld), exp(lr)
                    g1, g2 = d + r, -d * r
                    tmp = onnls(y, [g1, g2], lam,
                                mask=(s > 1e-2 * s.max()))[0] - y
                    return tmp.dot(tmp)

                result = minimize(lambda x: getRSS(y, x), (log(d), log(r)),
                                  bounds=((None, -1e-4), (None, -1e-3)),
                                  method='L-BFGS-B',
                                  options={
                                      'gtol': 1e-04,
                                      'maxiter': 10,
                                      'ftol': 1e-05
                                  })
                if abs(result['x'][1] - log(d)) < 1e-4:
                    g_converged = True
                ld, lr = result['x']
                d, r = exp(ld), exp(lr)
                g = (d + r, -d * r)
                c, s = onnls(y,
                             g,
                             lam=lam,
                             mask=mask,
                             shift=shift,
                             window=window,
                             tol=tol)

    else:  # optimize b
        db = max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
        b += db
        lam -= db / (1 - g[0] - g[1])
        for i in range(max_iter - 1):
            res = y - c - b
            RSS = res.dot(res)
            if np.abs(RSS - thresh) < 1e-4:
                break
            # calc shift db, here attributed to baseline
            tmp = np.empty(T)
            ls = np.append(np.where(s > 1e-6)[0], T)
            l = ls[0]
            tmp[:l] = (1 + d) / (1 + d**l) * np.exp(
                log(d) * np.arange(l))  # first pool
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f
                tmp[f] = (Sg11[l - 1] -
                          g11g12[l - 1] * tmp[f - 1]) / g11g11[l - 1]
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
            tmp -= tmp.mean()
            aa = tmp.dot(tmp)
            bb = res.dot(tmp)
            cc = RSS - thresh
            try:
                db = (-bb + sqrt(bb * bb - aa * cc)) / aa
            except:
                db = -bb / aa
            # perform shift
            if b_nonneg:
                db = max(db, -b)
            b += db
            c, s = onnls(y - b,
                         g,
                         lam=lam,
                         mask=mask,
                         shift=shift,
                         window=window,
                         tol=tol)
            # update b and lam
            db = max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
            b += db
            lam -= db / f_lam

            # update g and b
            if optimize_g and (not g_converged):
                lengths = np.where(s)[0][1:] - np.where(s)[0][:-1]

                def getRSS(y, opt):
                    b, ld, lr = opt
                    if ld < lr:
                        return 1e3 * thresh
                    d, r = exp(ld), exp(lr)
                    g1, g2 = d + r, -d * r
                    tmp = b + onnls(
                        y - b, [g1, g2], lam, mask=(s > 1e-2 * s.max()))[0] - y
                    return tmp.dot(tmp)

                result = minimize(lambda x: getRSS(y, x), (b, log(d), log(r)),
                                  bounds=((0 if b_nonneg else None, None),
                                          (None, -1e-4), (None, -1e-3)),
                                  method='L-BFGS-B',
                                  options={
                                      'gtol': 1e-04,
                                      'maxiter': 10,
                                      'ftol': 1e-05
                                  })
                if abs(result['x'][1] - log(d)) < 1e-3:
                    g_converged = True
                b, ld, lr = result['x']
                d, r = exp(ld), exp(lr)
                g = (d + r, -d * r)
                c, s = onnls(y - b,
                             g,
                             lam=lam,
                             mask=mask,
                             shift=shift,
                             window=window,
                             tol=tol)
                # update b and lam
                db = max(np.mean(y - c), 0 if b_nonneg else -np.inf) - b
                b += db
                lam -= db

    if penalty == 0:  # get (locally optimal) L0 solution

        def c4smin(y, s, s_min):
            ls = np.append(np.where(s > s_min)[0], T)
            tmp = np.zeros_like(s)
            l = ls[0]  # first pool
            tmp[:l] = max(
                0,
                np.exp(log(d) * np.arange(l)).dot(y[:l]) * (1 - d * d) /
                (1 - d**(2 * l))) * np.exp(log(d) * np.arange(l))
            for i, f in enumerate(ls[:-1]):  # all other pools
                l = ls[i + 1] - f
                tmp[f] = (g11[:l].dot(y[f:f + l]) -
                          g11g12[l - 1] * tmp[f - 1]) / g11g11[l - 1]
                tmp[f + 1:f + l] = g11[1:l] * tmp[f] + g12[1:l] * tmp[f - 1]
            return tmp

        spikesizes = np.sort(s[s > 1e-6])
        i = len(spikesizes) // 2
        l = 0
        u = len(spikesizes) - 1
        while u - l > 1:
            s_min = spikesizes[i]
            tmp = c4smin(y - b, s, s_min)
            res = y - b - tmp
            RSS = res.dot(res)
            if RSS < thresh or i == 0:
                l = i
                i = (l + u) // 2
                res0 = tmp
            else:
                u = i
                i = (l + u) // 2
        if i > 0:
            c = res0
            s = np.append([0, 0], c[2:] - g[0] * c[1:-1] - g[1] * c[:-2])

    return c, s, b, g, lam
Пример #5
0
plot_trace(n)

# plot result after rerunning oasis to fix violations
solution, active_set = foo(active_set, g, ll)
ax = fig.add_axes([ax1, .31, 1 - ax1, .12])
plot_trace(n)

# do few more iterations
for _ in range(3):
    solution, active_set, lam = update_lam(y, solution, active_set, g, lam,
                                           sn * sn * len(y))
    solution, active_set, g = update_g(y, active_set, g, lam)

# plot converged results with comparison traces
ax = fig.add_axes([ax1, .07, 1 - ax1, .12])
sol_given_g = constrained_oasisAR1(y, .95, sn)[0]
estimated_g = estimate_parameters(y, p=1)[0][0]
print('estimated gamma via autocorrelation: ', estimated_g)
print('optimized gamma                    : ', g)
sol_PSD_g = oasisAR1(y, estimated_g, 0)[0]
# print((sol_PSD_g-y).dot(sol_PSD_g-y), sn*sn*T # renders constraint problem infeasible
plt.plot(sol_given_g, '--', c=col[6], label=r'true $\gamma$', zorder=11)
plt.plot(sol_PSD_g, c=col[5], label=r'$\gamma$ from autocovariance', zorder=10)
plt.legend(frameon=False, loc=(.1, .62), ncol=2)
plot_trace(n)
plt.xticks([300, 600, 900, 1200], [10, 20, 30, 40])
plt.xlabel('Time [s]', labelpad=-10)
plt.show()

print('correlation with ground truth calcium for   given   gamma ',
      np.corrcoef(sol_given_g, trueC[n])[0, 1])
Пример #6
0
results = {}
for opt in [
        '-', 'l', 'lb', 'lbg', 'lbg10', 'lbg5', 'lbg_ds', 'lbg10_ds', 'lbg5_ds'
]:
    results[opt] = {}
    results[opt]['time'] = []
    results[opt]['distance'] = []
    results[opt]['correlation'] = []
    for i, y in enumerate(Y):
        g, sn = estimate_parameters(y, p=1, fudge_factor=.99)
        lam = 2 * sn * (1 - g * g)**(-.5)
        b = np.percentile(y, 15)
        if opt == '-':
            foo = lambda y: oasisAR1(y - b, g, lam)
        elif opt == 'l':
            foo = lambda y: constrained_oasisAR1(y - b, g, sn)
        elif opt == 'lb':
            foo = lambda y: constrained_oasisAR1(y, g, sn, optimize_b=True)
        elif opt == 'lbg':
            foo = lambda y: constrained_oasisAR1(
                y, g, sn, optimize_b=True, optimize_g=len(y))
        elif opt == 'lbg10':
            foo = lambda y: constrained_oasisAR1(
                y, g, sn, optimize_b=True, optimize_g=10)
        elif opt == 'lbg5':
            foo = lambda y: constrained_oasisAR1(
                y, g, sn, optimize_b=True, optimize_g=5)
        elif opt == 'lbg_ds':
            foo = lambda y: constrained_oasisAR1(
                y, g, sn, optimize_b=True, optimize_g=len(y), decimate=10)
        elif opt == 'lbg10_ds':
Пример #7
0
def denoise(s):
    tmp = cse.deconvolution.estimate_parameters(s, 1, fudge_factor=.97)
    constrained_oasisAR1(s, tmp[0][0], tmp[1], True)
Пример #8
0
def constrained_foopsi(fluor, bl=None,  c1=None, g=None,  sn=None, p=None, method='cvxpy', bas_nonneg=True,
                       noise_range=[.25, .5], noise_method='logmexp', lags=5, fudge_factor=1.,
                       verbosity=False, solvers=None, optimize_g=0, penalty=1, **kwargs):
    """ Infer the most likely discretized spike train underlying a fluorescence trace

    It relies on a noise constrained deconvolution approach


    Parameters
    ----------
    fluor: np.ndarray
        One dimensional array containing the fluorescence intensities with
        one entry per time-bin.
    bl: [optional] float
        Fluorescence baseline value. If no value is given, then bl is estimated
        from the data.
    c1: [optional] float
        value of calcium at time 0
    g: [optional] list,float
        Parameters of the AR process that models the fluorescence impulse response.
        Estimated from the data if no value is given
    sn: float, optional
        Standard deviation of the noise distribution.  If no value is given,
        then sn is estimated from the data.
    p: int
        order of the autoregression model
    method: [optional] string
        solution method for basis projection pursuit 'cvx' or 'cvxpy'
    bas_nonneg: bool
        baseline strictly non-negative
    noise_range:  list of two elms
        frequency range for averaging noise PSD
    noise_method: string
        method of averaging noise PSD
    lags: int
        number of lags for estimating time constants
    fudge_factor: float
        fudge factor for reducing time constant bias
    verbosity: bool
         display optimization details
    solvers: list string
            primary and secondary (if problem unfeasible for approx solution) solvers to be used with cvxpy, default is ['ECOS','SCS']
    Returns
    -------
    c: np.ndarray float
        The inferred denoised fluorescence signal at each time-bin.
    bl, c1, g, sn : As explained above
    sp: ndarray of float
        Discretized deconvolved neural activity (spikes)

    References
    ----------
    * Pnevmatikakis et al. 2016. Neuron, in press, http://dx.doi.org/10.1016/j.neuron.2015.11.037
    * Machado et al. 2015. Cell 162(2):338-350
    """

    if p is None:
        raise Exception("You must specify the value of p")

    if g is None or sn is None:
        g, sn = estimate_parameters(fluor, p=p, sn=sn, g=g, range_ff=noise_range,
                                    method=noise_method, lags=lags, fudge_factor=fudge_factor)
    if p == 0:
        c1 = 0
        g = np.array(0)
        bl = 0
        c = np.maximum(fluor, 0)
        sp = c.copy()
    else:
        if method == 'cvx':
            c, bl, c1, g, sn, sp = cvxopt_foopsi(
                fluor, b=bl, c1=c1, g=g, sn=sn, p=p, bas_nonneg=bas_nonneg, verbosity=verbosity)

        elif method == 'cvxpy':

            c, bl, c1, g, sn, sp = cvxpy_foopsi(
                fluor,  g, sn, b=bl, c1=c1, bas_nonneg=bas_nonneg, solvers=solvers)

        elif method == 'oasis':
            from oasis import constrained_oasisAR1
            if p == 1:
                if bl is None:
                    c, sp, bl, g, _ = constrained_oasisAR1(
                        fluor, g[0], sn, optimize_b=True, b_nonneg=bas_nonneg,
                        optimize_g=optimize_g, penalty=penalty)
                else:
                    c, sp, _, g, _ = constrained_oasisAR1(
                        fluor - bl, g[0], sn, optimize_b=False, penalty=1)
                c1 = c[0]
                # remove intial calcium to align with the other foopsi methods
                # it is added back in function constrained_foopsi_parallel of temporal.py
                c -= c1 * g**np.arange(len(fluor))
            elif p == 2:
                if bl is None:
                    c, sp, bl, g, _ = constrained_oasisAR2(
                        fluor, g, sn, optimize_b=True, b_nonneg=bas_nonneg,
                        optimize_g=optimize_g, penalty=penalty)
                else:
                    c, sp, _, g, _ = constrained_oasisAR2(
                        fluor - bl, g, sn, optimize_b=False, penalty=1)
                c1 = c[0]
                d = (g[0] + sqrt(g[0] * g[0] + 4 * g[1])) / 2
                c -= c1 * d**np.arange(len(fluor))
            else:
                raise Exception('OASIS is currently only implemented for p=1 and p=2')
            g = np.ravel(g)

        else:
            raise Exception('Undefined Deconvolution Method')

    return c, bl, c1, g, sn, sp