def fnn_deconvolution(F, C0=None, theta0=None, dt=0.02,
                      learn_theta=(0, 0, 0, 0), params_tol=1E-3,
                      spikes_tol=1E-3, params_maxiter=10, spikes_maxiter=100,
                      verbosity=1, plot=False):
    """
    Infer spike trains from fluorescence using Fast Non-Negative Deconvolution
    ---------------------------------------------------------------------------

    This function uses an interior point method to solve the following
    optimization problem:

        n_best = argmax_{n >= 0} P(n | F)

    where n_best is a maximum a posteriori estimate for the most likely spike
    train, given the fluorescence signal F, and the model:

    C_{t} = gamma * C_{t-1} + n_t,          n_t ~ Poisson(lambda * dt)
    F_{t} = C_t + beta + epsilon,           epsilon ~ N(0, sigma)

    It is also possible to estimate the model parameters sigma, beta and lambda
    from the data using pseudo-EM updates.

    Arguments:
    ---------------------------------------------------------------------------
    F: ndarray, [nt]
        measured fluorescence values

    C0: ndarray, [nt]
        initial estimate of the calcium concentration for each time bin.

    theta0: len(4) sequence
        initial estimates of the model parameters (sigma, beta, lambda, gamma).

    dt: float scalar
        duration of each time bin (s)

    learn_theta: len(4) bool sequence
        specifies which of the model parameters to attempt learn via pseudo-EM
        iterations. currently gamma cannot be learned, and will raise an error.

    spikes_tol: float scalar
        termination condition for interior point spike train estimation:
            params_tol > abs((LL_prev - LL) / LL)

    params_tol: float scalar
        as above, but for the model parameter pseudo-EM estimation

    spikes_maxiter: int scalar
        maximum number of interior point iterations to estimate MAP spike train

    params_maxiter: int scalar
        maximum number of pseudo-EM iterations to estimate model parameters

    verbosity: int scalar
        0: no convergence messages (default)
        1: convergence messages for model parameters
        2: convergence messages for model parameters & MAP spike train

    plot: bool scalar
        live plot of n and (C + beta), updated during parameter estimation

    Returns:
    ---------------------------------------------------------------------------
    n_best: ndarray, [nt]
        MAP estimate of the most likely spike train

    C_best: ndarray, [nt]
        estimated intracellular calcium concentration (A.U.)

    LL_best: float scalar
        posterior log-likelihood of F given n_best and theta_best

    theta_best: len(4) tuple
        model parameters, updated according to learn_theta

    Reference:
    ---------------------------------------------------------------------------
    Vogelstein, J. T., Packer, A. M., Machado, T. a, Sippy, T., Babadi, B.,
    Yuste, R., & Paninski, L. (2010). Fast nonnegative deconvolution for spike
    train inference from population calcium imaging. Journal of
    Neurophysiology, 104(6), 3691-704. doi:10.1152/jn.01073.2009

    """

    tstart = time.time()

    nt = F.shape[0]

    if theta0 is None:
        theta_best = _init_theta(F, dt, hz=0.3, tau=0.5)
    else:
        theta_best = theta0

    # scale F to be between 0 and 1
    offset = F.min()
    scale = F.max() - offset
    F = (F - offset) / scale

    sigma, beta, lamb, gamma = theta_best

    # apply scale and offset to beta and sigma
    beta = (beta - offset) / scale
    sigma = sigma / scale

    theta_best = np.vstack((sigma, beta, lamb, gamma))

    if C0 is None:
        C = _init_C(F, dt)
    else:
        C = C0

    if plot:
        fig, ax = plt.subplots(2, 1, sharex=True, figsize=(10, 10))
        xmax = min(2000, nt)
        fr_t = np.arange(xmax) * dt
        ax[0].set_ylim(F.min(), F.max())
        ax[0].set_xlim(0, fr_t[-1])
        ax[0].hold(True)
        ax[1].set_ylim(0, 1)
        fig.canvas.draw()
        C_bg = fig.canvas.copy_from_bbox(ax[0].bbox)
        nt_bg = fig.canvas.copy_from_bbox(ax[1].bbox)
        F_line, = ax[0].plot(fr_t, F[:xmax], '-b')
        C_line, = ax[0].plot(fr_t, C[:xmax] + beta[0], '-r',
                             scalex=False, scaley=False)
        nt_line, = ax[1].plot(fr_t[1:xmax], np.ones(xmax - 1) * lamb * dt,
                              '-r', scalex=False, scaley=False)
        fig.canvas.draw()
        time.sleep(0.1)

    # if we're not learning the parameters, this step is all we need to do
    n_best, C_best, LL_best = estimate_MAP_spikes(
        F, C, theta_best, dt, spikes_tol, spikes_maxiter,
        verbosity
    )

    if plot:
        F_line.set_data(fr_t, F[:xmax])
        C_line.set_data(fr_t, C_best[:xmax] + theta_best[1])
        nt_line.set_data(fr_t[1:], n_best[:xmax - 1])
        fig.canvas.restore_region(C_bg)
        fig.canvas.restore_region(nt_bg)
        ax[0].draw_artist(C_line)
        ax[0].draw_artist(F_line)
        ax[1].draw_artist(nt_line)
        fig.canvas.blit(ax[0].bbox)
        fig.canvas.blit(ax[1].bbox)

    # pseudo-EM iterations to optimize the model parameters
    if np.any(learn_theta):

        if verbosity >= 1:
            sigma, beta, lamb, gamma = theta_best
            print('Params: iter=%3i; sigma=%6.4f, beta=%6.4f, '
                  'lambda=%6.4f, gamma=%6.4f; LL=%12.2f; delta_LL= N/A'
                  % (0, sigma, beta, lamb, gamma, LL_best))

        n = n_best
        C = C_best
        LL = LL_best
        theta = theta_best
        nloop_params = 1
        done = False

        while not done:

            # update the parameter estimates
            theta1 = _update_theta(n, C, F, theta, dt, learn_theta)

            # get the new n, C, and LL
            n1, C1, LL1 = estimate_MAP_spikes(
                F, C, theta1, dt, spikes_tol,
                spikes_maxiter, verbosity
            )

            # test for convergence
            delta_LL = -((LL1 - LL) / LL)

            if verbosity >= 1:
                sigma, beta, lamb, gamma = theta1

                print('Params: iter=%3i; sigma=%6.4f, beta=%6.4f, '
                      'lambda=%6.4f, gamma=%6.4f; LL=%12.2f; delta_LL= %8.4g'
                      % (nloop_params, sigma, beta, lamb, gamma, LL1,
                          delta_LL))

            if plot:
                C_line.set_data(fr_t, C1[:xmax] + theta1[1])
                nt_line.set_data(fr_t[1:xmax], n1[:xmax - 1])
                fig.canvas.restore_region(C_bg)
                fig.canvas.restore_region(nt_bg)
                ax[0].draw_artist(F_line)
                ax[0].draw_artist(C_line)
                ax[1].draw_artist(nt_line)
                fig.canvas.blit(ax[0].bbox)
                fig.canvas.blit(ax[1].bbox)
                time.sleep(0.1)

            # if the LL improved, keep these parameters
            if LL1 > LL_best:
                n_best, C_best, LL_best, theta_best = (
                    n1, C1, LL1, theta1)

            if (np.abs(delta_LL) < params_tol):
                if verbosity >= 1:
                    print("Parameters converged after %i iterations"
                          % (nloop_params))
                    print "Last delta log-likelihood:\t%8.4g" % delta_LL
                    print "Best posterior log-likelihood:\t%11.4f" % (
                        LL_best)
                done = True

            elif delta_LL < 0:
                if verbosity >= 1:
                    print 'Terminating because solution is diverging'
                done = True

            elif nloop_params > params_maxiter:
                if verbosity >= 1:
                    print 'Solution failed to converge before maxiter'
                done = True

            n, C, LL, theta = n1, C1, LL1, theta1
            nloop_params += 1

    if verbosity >= 1:
        time_taken = time.time() - tstart
        print "Completed: %s" % s2h(time_taken)

    sigma, beta, lamb, gamma = theta_best

    # correct for the offset and scaling we originally applied to F
    C_best *= scale
    beta *= scale
    beta += offset
    sigma *= scale

    # since we can't use FNND to estimate the spike probabilities in the 0th
    # timebin, for convenience we just concatenate (lamb * dt) to the start of
    # n so that it has the same shape as F and C
    n_best = np.concatenate((lamb * dt, n_best), axis=0)

    theta_best = np.hstack((sigma, beta, lamb, gamma))

    return n_best, C_best, LL_best, theta_best
def fnn_deconvolution(F,
                      C0=None,
                      theta0=None,
                      dt=0.02,
                      learn_theta=(0, 0, 0, 0),
                      params_tol=1E-3,
                      spikes_tol=1E-3,
                      params_maxiter=10,
                      spikes_maxiter=100,
                      verbosity=1,
                      plot=False):
    """
    Infer spike trains from fluorescence using Fast Non-Negative Deconvolution
    ---------------------------------------------------------------------------

    This function uses an interior point method to solve the following
    optimization problem:

        n_best = argmax_{n >= 0} P(n | F)

    where n_best is a maximum a posteriori estimate for the most likely spike
    train, given the fluorescence signal F, and the model:

    C_{t} = gamma * C_{t-1} + n_t,          n_t ~ Poisson(lambda * dt)
    F_{t} = C_t + beta + epsilon,           epsilon ~ N(0, sigma)

    It is also possible to estimate the model parameters sigma, beta and lambda
    from the data using pseudo-EM updates.

    Arguments:
    ---------------------------------------------------------------------------
    F: ndarray, [nt]
        measured fluorescence values

    C0: ndarray, [nt]
        initial estimate of the calcium concentration for each time bin.

    theta0: len(4) sequence
        initial estimates of the model parameters (sigma, beta, lambda, gamma).

    dt: float scalar
        duration of each time bin (s)

    learn_theta: len(4) bool sequence
        specifies which of the model parameters to attempt learn via pseudo-EM
        iterations. currently gamma cannot be learned, and will raise an error.

    spikes_tol: float scalar
        termination condition for interior point spike train estimation:
            params_tol > abs((LL_prev - LL) / LL)

    params_tol: float scalar
        as above, but for the model parameter pseudo-EM estimation

    spikes_maxiter: int scalar
        maximum number of interior point iterations to estimate MAP spike train

    params_maxiter: int scalar
        maximum number of pseudo-EM iterations to estimate model parameters

    verbosity: int scalar
        0: no convergence messages (default)
        1: convergence messages for model parameters
        2: convergence messages for model parameters & MAP spike train

    plot: bool scalar
        live plot of n and (C + beta), updated during parameter estimation

    Returns:
    ---------------------------------------------------------------------------
    n_best: ndarray, [nt]
        MAP estimate of the most likely spike train

    C_best: ndarray, [nt]
        estimated intracellular calcium concentration (A.U.)

    LL_best: float scalar
        posterior log-likelihood of F given n_best and theta_best

    theta_best: len(4) tuple
        model parameters, updated according to learn_theta

    Reference:
    ---------------------------------------------------------------------------
    Vogelstein, J. T., Packer, A. M., Machado, T. a, Sippy, T., Babadi, B.,
    Yuste, R., & Paninski, L. (2010). Fast nonnegative deconvolution for spike
    train inference from population calcium imaging. Journal of
    Neurophysiology, 104(6), 3691-704. doi:10.1152/jn.01073.2009

    """

    tstart = time.time()

    nt = F.shape[0]

    if theta0 is None:
        theta_best = _init_theta(F, dt, hz=0.3, tau=0.5)
    else:
        theta_best = theta0

    # scale F to be between 0 and 1
    offset = F.min()
    scale = F.max() - offset
    F = (F - offset) / scale

    sigma, beta, lamb, gamma = theta_best

    # apply scale and offset to beta and sigma
    beta = (beta - offset) / scale
    sigma = sigma / scale

    theta_best = np.vstack((sigma, beta, lamb, gamma))

    if C0 is None:
        C = _init_C(F, dt)
    else:
        C = C0

    if plot:
        fig, ax = plt.subplots(2, 1, sharex=True, figsize=(10, 10))
        xmax = min(2000, nt)
        fr_t = np.arange(xmax) * dt
        ax[0].set_ylim(F.min(), F.max())
        ax[0].set_xlim(0, fr_t[-1])
        ax[0].hold(True)
        ax[1].set_ylim(0, 1)
        fig.canvas.draw()
        C_bg = fig.canvas.copy_from_bbox(ax[0].bbox)
        nt_bg = fig.canvas.copy_from_bbox(ax[1].bbox)
        F_line, = ax[0].plot(fr_t, F[:xmax], '-b')
        C_line, = ax[0].plot(fr_t,
                             C[:xmax] + beta[0],
                             '-r',
                             scalex=False,
                             scaley=False)
        nt_line, = ax[1].plot(fr_t[1:xmax],
                              np.ones(xmax - 1) * lamb * dt,
                              '-r',
                              scalex=False,
                              scaley=False)
        fig.canvas.draw()
        time.sleep(0.1)

    # if we're not learning the parameters, this step is all we need to do
    n_best, C_best, LL_best = estimate_MAP_spikes(F, C, theta_best, dt,
                                                  spikes_tol, spikes_maxiter,
                                                  verbosity)

    if plot:
        F_line.set_data(fr_t, F[:xmax])
        C_line.set_data(fr_t, C_best[:xmax] + theta_best[1])
        nt_line.set_data(fr_t[1:], n_best[:xmax - 1])
        fig.canvas.restore_region(C_bg)
        fig.canvas.restore_region(nt_bg)
        ax[0].draw_artist(C_line)
        ax[0].draw_artist(F_line)
        ax[1].draw_artist(nt_line)
        fig.canvas.blit(ax[0].bbox)
        fig.canvas.blit(ax[1].bbox)

    # pseudo-EM iterations to optimize the model parameters
    if np.any(learn_theta):

        if verbosity >= 1:
            sigma, beta, lamb, gamma = theta_best
            print(
                'Params: iter=%3i; sigma=%6.4f, beta=%6.4f, '
                'lambda=%6.4f, gamma=%6.4f; LL=%12.2f; delta_LL= N/A' %
                (0, sigma, beta, lamb, gamma, LL_best))

        n = n_best
        C = C_best
        LL = LL_best
        theta = theta_best
        nloop_params = 1
        done = False

        while not done:

            # update the parameter estimates
            theta1 = _update_theta(n, C, F, theta, dt, learn_theta)

            # get the new n, C, and LL
            n1, C1, LL1 = estimate_MAP_spikes(F, C, theta1, dt, spikes_tol,
                                              spikes_maxiter, verbosity)

            # test for convergence
            delta_LL = -((LL1 - LL) / LL)

            if verbosity >= 1:
                sigma, beta, lamb, gamma = theta1

                print(
                    'Params: iter=%3i; sigma=%6.4f, beta=%6.4f, '
                    'lambda=%6.4f, gamma=%6.4f; LL=%12.2f; delta_LL= %8.4g' %
                    (nloop_params, sigma, beta, lamb, gamma, LL1, delta_LL))

            if plot:
                C_line.set_data(fr_t, C1[:xmax] + theta1[1])
                nt_line.set_data(fr_t[1:xmax], n1[:xmax - 1])
                fig.canvas.restore_region(C_bg)
                fig.canvas.restore_region(nt_bg)
                ax[0].draw_artist(F_line)
                ax[0].draw_artist(C_line)
                ax[1].draw_artist(nt_line)
                fig.canvas.blit(ax[0].bbox)
                fig.canvas.blit(ax[1].bbox)
                time.sleep(0.1)

            # if the LL improved, keep these parameters
            if LL1 > LL_best:
                n_best, C_best, LL_best, theta_best = (n1, C1, LL1, theta1)

            if (np.abs(delta_LL) < params_tol):
                if verbosity >= 1:
                    print("Parameters converged after %i iterations" %
                          (nloop_params))
                    print "Last delta log-likelihood:\t%8.4g" % delta_LL
                    print "Best posterior log-likelihood:\t%11.4f" % (LL_best)
                done = True

            elif delta_LL < 0:
                if verbosity >= 1:
                    print 'Terminating because solution is diverging'
                done = True

            elif nloop_params > params_maxiter:
                if verbosity >= 1:
                    print 'Solution failed to converge before maxiter'
                done = True

            n, C, LL, theta = n1, C1, LL1, theta1
            nloop_params += 1

    if verbosity >= 1:
        time_taken = time.time() - tstart
        print "Completed: %s" % s2h(time_taken)

    sigma, beta, lamb, gamma = theta_best

    # correct for the offset and scaling we originally applied to F
    C_best *= scale
    beta *= scale
    beta += offset
    sigma *= scale

    # since we can't use FNND to estimate the spike probabilities in the 0th
    # timebin, for convenience we just concatenate (lamb * dt) to the start of
    # n so that it has the same shape as F and C
    n_best = np.concatenate((lamb * dt, n_best), axis=0)

    theta_best = np.hstack((sigma, beta, lamb, gamma))

    return n_best, C_best, LL_best, theta_best
def run_simulation(adjacency_matrix=None,
                   weight=None,
                   noise_rate=NOISE_RATE,
                   noise_weight=NOISE_WEIGHT,
                   resolution=RESOLUTION,
                   simtime=SIMTIME_RUN,
                   save=False, output_path='data/', basename='nest_sim_',
                   overwrite=False, verbose=True, print_time=False):

    if adjacency_matrix is None:
        # construct a network according to defaults
        adjacency_matrix = fake_network.construct_network()

    if weight is None:
        # if unspecified, find the weight automatically according to defaults
        weight, _ = adjust_weight(adjacency_matrix, noise_weight=noise_weight,
                                  noise_rate=noise_rate, resolution=resolution,
                                  verbose=verbose, print_time=print_time)

    # convert into network_object
    network_obj = adjacency2netobj(adjacency_matrix)

    ncells, ncons, neuronsE, espikes, noise, GIDoffset = create_network(
        network_obj, weight, noise_weight, noise_rate, resolution=resolution,
        verbose=verbose, print_time=print_time,
    )

    if verbose:
        print 'Simulating %s of activity for %i neurons' % (
            s2h(simtime / 1000.), ncells)

    startsimulate = time.time()
    nest.Simulate(simtime)
    endsimulate = time.time()

    sim_elapsed = endsimulate - startsimulate

    totalspikes = nest.GetStatus(espikes, "n_events")[0]
    events = nest.GetStatus(espikes, "events")[0]

    # NEST increments the GID whenever a new node is created. we therefore
    # subtract the GID offset, so that the output cell indices are correct
    # regardless of when the corresponding nodes were created in NEST
    cell_indices = events["senders"] - GIDoffset
    spike_times = events["times"]

    burst_rate = determine_burst_rate(spike_times, simtime, ncells)

    if verbose:
        print "\n" + "-" * 60
        print "Number of neurons: ", ncells
        print "Number of spikes recorded: ", totalspikes
        print "Avg. spike rate of neurons: %.2f Hz" % (
            totalspikes / (ncells * simtime / 1000.))
        print "Network burst rate: %.2f Hz" % burst_rate
        print "Simulation time: %s" % s2h(sim_elapsed)
        print "-" * 60

    # resample at 50Hz to make an [ncells, ntimesteps] array of bin counts
    resampled = resample_spikes(spike_times, cell_indices,
                                output_resolution=20, simtime=simtime,
                                ncells=ncells)

    if save:

        today = str(datetime.date.today())
        fname = basename + today
        if os.path.exists(fname) & ~overwrite:
            suffix = 0
            while os.path.exists(fname):
                suffix += 1
                fname = '%s%s_%i.npz' % (basename, today, suffix)
        fullpath = os.path.join(output_path, fname)
        np.savez(fullpath, spike_times=spike_times, cell_indices=cell_indices,
                 resampled=resampled)

        if verbose:
            print "Saved output in '%s'" % fullpath

    return spike_times, cell_indices, resampled