예제 #1
0
def pdf(x, a, b, mu = 0, sigma = 1):
    """ Probablity density function of the truncated normal distribution
    on the interval [a, b].
    
    Args:
        x: a value between a and b to calculate its pdf.
        a: the left boundary of the truncated normal distribution.
        b: the right boundary of the truncated normal distribution.
        mu: the mean value.
        sigma: the standard derivation.

    Returns:
        The probablity density at point x

    Raises:
        ValueError: the error is raised the value of x, a, and b are invalid.
    """
    if a >= b:
        raise ValueError("The interval's left boundary is larger than \
        the right boundary ")
    if x < a or x > b:
        raise ValueError("The query position is outside of the interval")
    if a == np.inf:
        raise ValueError("The interval's left boundary can not be np.inf")
    if b == -np.inf:
        raise ValueError("The interval's right boundary can not be -np.inf")
            
    x = (x - mu) / sigma
    a = (a - mu) / sigma
    b = (b - mu) / sigma

    if a * b <= 0:
        area = special.ndtr(b) - special.ndtr(a)
        p = 1 / np.sqrt(2*np.pi) * np.exp(-0.5*x**2) / area
        return p
    else:
        x = -np.abs(x)
        if a > 0:            
            low = -np.abs(b)
            up = -np.abs(a)
        else:
            low = -np.abs(a)
            up = -np.abs(b)
            
        low_log_area = special.log_ndtr(low)
        up_log_area = special.log_ndtr(up)

        low_log_area += 0.5 * up**2
        up_log_area += 0.5 * up**2

        area = np.exp(up_log_area) - np.exp(low_log_area)
        p = 1 / np.sqrt(2*np.pi) * np.exp(-0.5*(x**2 - up**2)) / area
        return p
예제 #2
0
def _compute_rdp_with_order(sample_rate, noise_stddev, order):
    """
    Compute rdp for each order.

    Args:
        sample_rate (float): Sampling probability.
        noise_stddev (float): Noise multiplier.
        order: The order used for computing rdp.

    Returns:
        float, rdp value.
    """
    if float(order).is_integer():
        log_integrate = -np.inf
        for k in range(order + 1):
            term_k = (np.log(special.binom(order, k)) +
                      k * np.log(sample_rate) +
                      (order - k) * np.log(1 - sample_rate)
                      ) + (k * k - k) / (2 * (noise_stddev**2))
            log_integrate = _log_add(log_integrate, term_k)
        return float(log_integrate) / (order - 1)
    log_part_0, log_part_1 = -np.inf, -np.inf
    k = 0
    z0 = noise_stddev**2 * np.log(1 / sample_rate - 1) + 1 / 2
    while True:
        bi_coef = special.binom(order, k)
        log_coef = np.log(abs(bi_coef))
        j = order - k

        term_k_part_0 = log_coef + k * np.log(sample_rate) + j * np.log(
            1 - sample_rate) + (k * k -
                                k) / (2 *
                                      (noise_stddev**2)) + special.log_ndtr(
                                          (z0 - k) / noise_stddev)

        term_k_part_1 = log_coef + j * np.log(sample_rate) + k * np.log(
            1 - sample_rate) + (j * j -
                                j) / (2 *
                                      (noise_stddev**2)) + special.log_ndtr(
                                          (j - z0) / noise_stddev)

        if bi_coef > 0:
            log_part_0 = _log_add(log_part_0, term_k_part_0)
            log_part_1 = _log_add(log_part_1, term_k_part_1)
        else:
            log_part_0 = _log_subtract(log_part_0, term_k_part_0)
            log_part_1 = _log_subtract(log_part_1, term_k_part_1)

        k += 1
        if np.max([term_k_part_0, term_k_part_1]) < -30:
            break

    return _log_add(log_part_0, log_part_1) / (order - 1)
def _log_erfc(x: float) -> float:
    r"""Computes :math:`log(erfc(x))` with high accuracy for large ``x``.

    Helper function used in computation of :math:`log(A_\alpha)`
    for a fractional alpha.
    """
    return math.log(2) + special.log_ndtr(-x * 2**0.5)
예제 #4
0
def logq(t, tau, sigma):
    """
    Logarithm of exponential pdf with scale tau convoluted with a normal with
    scale sigma without the 1/tau factor.
    """
    return -t / tau + 1 / 2 * (sigma / tau)**2 + special.log_ndtr(t / sigma -
                                                                  sigma / tau)
예제 #5
0
 def test_log_ndtr_values_gt31(self):
     x = np.array([31.6, 32.8, 34.9, 37.1])
     expected = [
         -1.846036234858162e-219, -2.9440539964066835e-236,
         -3.71721649450857e-267, -1.4047119663106221e-301
     ]
     y = sc.log_ndtr(x)
     assert_allclose(y, expected, rtol=3e-13)
예제 #6
0
def tobit_neg_log_likelihood_der(xs, ys, params):
    x_left, x_mid, x_right = xs
    y_left, y_mid, y_right = ys

    b = params[:-1]
    # s = math.exp(params[-1]) # in censReg, not using chain rule as below; they optimize in terms of log(s)
    s = params[-1]

    beta_jac = np.zeros(len(b))
    sigma_jac = 0

    if y_left is not None:
        left_stats = (y_left - np.dot(x_left, b)) / s
        l_pdf = scipy.stats.norm.logpdf(left_stats)
        l_cdf = log_ndtr(left_stats)
        left_frac = np.exp(l_pdf - l_cdf)
        beta_left = np.dot(left_frac, x_left / s)
        beta_jac -= beta_left

        left_sigma = np.dot(left_frac, left_stats)
        sigma_jac -= left_sigma

    if y_right is not None:
        right_stats = (np.dot(x_right, b) - y_right) / s
        r_pdf = scipy.stats.norm.logpdf(right_stats)
        r_cdf = log_ndtr(right_stats)
        right_frac = np.exp(r_pdf - r_cdf)
        beta_right = np.dot(right_frac, x_right / s)
        beta_jac += beta_right

        right_sigma = np.dot(right_frac, right_stats)
        sigma_jac -= right_sigma

    if y_mid is not None:
        mid_stats = (y_mid - np.dot(x_mid, b)) / s
        beta_mid = np.dot(mid_stats, x_mid / s)
        beta_jac += beta_mid

        mid_sigma = (np.square(mid_stats) - 1).sum()
        sigma_jac += mid_sigma

    combo_jac = np.append(
        beta_jac, sigma_jac /
        s)  # by chain rule, since the expression above is dloglik/dlogsigma

    return -combo_jac
예제 #7
0
 def test_log_ndtr_values_16_31(self):
     x = np.array([16.15, 20.3, 21.4, 26.2, 30.9])
     expected = [
         -5.678084565148492e-59, -6.429244467698346e-92,
         -6.680402412553295e-102, -1.328698078458869e-151,
         -5.972288641838264e-210
     ]
     y = sc.log_ndtr(x)
     assert_allclose(y, expected, rtol=2e-13)
def tobit_neg_log_likelihood_der(xs, ys, params):
    x_left, x_mid, x_right = xs
    y_left, y_mid, y_right = ys

    b = params[:-1]
    # s = math.exp(params[-1]) # in censReg, not using chain rule as below; they optimize in terms of log(s)
    s = params[-1]

    beta_jac = np.zeros(len(b))
    sigma_jac = 0

    if y_left is not None:
        left_stats = (y_left - np.dot(x_left, b)) / s
        l_pdf = scipy.stats.norm.logpdf(left_stats)
        l_cdf = log_ndtr(left_stats)
        left_frac = np.exp(l_pdf - l_cdf)
        beta_left = np.dot(left_frac, x_left / s)
        beta_jac -= beta_left

        left_sigma = np.dot(left_frac, left_stats)
        sigma_jac -= left_sigma

    if y_right is not None:
        right_stats = (np.dot(x_right, b) - y_right) / s
        r_pdf = scipy.stats.norm.logpdf(right_stats)
        r_cdf = log_ndtr(right_stats)
        right_frac = np.exp(r_pdf - r_cdf)
        beta_right = np.dot(right_frac, x_right / s)
        beta_jac += beta_right

        right_sigma = np.dot(right_frac, right_stats)
        sigma_jac -= right_sigma

    if y_mid is not None:
        mid_stats = (y_mid - np.dot(x_mid, b)) / s
        beta_mid = np.dot(mid_stats, x_mid / s)
        beta_jac += beta_mid

        mid_sigma = (np.square(mid_stats) - 1).sum()
        sigma_jac += mid_sigma

    combo_jac = np.append(beta_jac, sigma_jac / s)  # by chain rule, since the expression above is dloglik/dlogsigma

    return -combo_jac
예제 #9
0
 def test_log_ndtr_values_8_16(self):
     x = np.array([8.001, 8.06, 8.15, 8.5, 10, 12, 14, 16])
     expected = [
         -6.170639424817055e-16, -3.814722443652823e-16,
         -1.819621363526629e-16, -9.479534822203318e-18,
         -7.619853024160525e-24, -1.776482112077679e-33,
         -7.7935368191928e-45, -6.388754400538087e-58
     ]
     y = sc.log_ndtr(x)
     assert_allclose(y, expected, rtol=5e-14)
예제 #10
0
 def test_log_ndtr_moderate_le8(self):
     x = np.array([-0.75, -0.25, 0, 0.5, 1.5, 2.5, 3, 4, 5, 7, 8])
     expected = np.array([
         -1.4844482299196562, -0.9130617648111351, -0.6931471805599453,
         -0.3689464152886564, -0.06914345561223398, -0.006229025485860002,
         -0.0013508099647481938, -3.167174337748927e-05,
         -2.866516129637636e-07, -1.279812543886654e-12,
         -6.220960574271786e-16
     ])
     y = sc.log_ndtr(x)
     assert_allclose(y, expected, rtol=1e-14)
예제 #11
0
def _log_erfc(x):
    """Compute log(erfc(x)) with high accuracy for large x."""
    try:
        return math.log(2) + special.log_ndtr(-x * 2**.5)
    except NameError:
        # If log_ndtr is not available, approximate as follows:
        r = special.erfc(x)
        if r == 0.0:
            return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
                    .625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
        else:
            return math.log(r)
예제 #12
0
def _log_erfc(x):
    try:
        return math.log(2) + special.log_ndtr(-x * 2**.5)
    except NameError:
        # If log_ndtr is not available, approximate as follows:
        r = special.erfc(x)
        if r == 0.0:
            # Using the Laurent series at infinity for the tail of the erfc function:
            #     erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
            # To verify in Mathematica:
            #     Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
            return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
                    .625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
        else:
            return math.log(r)
예제 #13
0
  def _test_grid_log(self, dtype, grid_spec, error_spec):
    with self.test_session():
      grid = _make_grid(dtype, grid_spec)
      actual = sm.log_ndtr(grid).eval()

      # Basic tests.
      self.assertTrue(np.isfinite(actual).all())
      # On the grid, -inf < log_cdf(x) < 0.  In this case, we should be able
      # to use a huge grid because we have used tricks to escape numerical
      # difficulties.
      self.assertTrue((actual < 0).all())
      _check_strictly_increasing(actual)

      # Versus scipy.
      expected = special.log_ndtr(grid)
      # Scipy prematurely goes to zero at some places that we don't.  So don't
      # include these in the comparison.
      self.assertAllClose(expected.astype(np.float64)[expected < 0],
                          actual.astype(np.float64)[expected < 0],
                          rtol=error_spec.rtol, atol=error_spec.atol)
예제 #14
0
    def _test_grid_log(self, dtype, grid_spec, error_spec):
        with self.test_session():
            grid = _make_grid(dtype, grid_spec)
            actual = sm.log_ndtr(grid).eval()

            # Basic tests.
            self.assertTrue(np.isfinite(actual).all())
            # On the grid, -inf < log_cdf(x) < 0.  In this case, we should be able
            # to use a huge grid because we have used tricks to escape numerical
            # difficulties.
            self.assertTrue((actual < 0).all())
            _check_strictly_increasing(actual)

            # Versus scipy.
            expected = special.log_ndtr(grid)
            # Scipy prematurely goes to zero at some places that we don't.  So don't
            # include these in the comparison.
            self.assertAllClose(expected.astype(np.float64)[expected < 0],
                                actual.astype(np.float64)[expected < 0],
                                rtol=error_spec.rtol,
                                atol=error_spec.atol)
예제 #15
0
 def _logcdf(x):
   #if x > 0: return math.log1p(-math.exp(TrueSkill.logcdf(-x)))  
   return log_ndtr(x)
예제 #16
0
def main():
    pass  # For compatibility between running under Spyder and the CLI

    #%% load data

    fname = os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')
    Y = cm.load(fname).astype(np.float32)  #
    # used as a background image
    Cn = cm.local_correlations(Y.transpose(1, 2, 0))
    #%% set up some parameters

    # frame rate (Hz)
    fr = 10
    # approximate length of transient event in seconds
    decay_time = 0.5
    # expected half size of neurons
    gSig = [6, 6]
    # order of AR indicator dynamics
    p = 1
    # minimum SNR for accepting new components
    min_SNR = 3.5
    # correlation threshold for new component inclusion
    rval_thr = 0.90
    # number of background components
    gnb = 3

    # set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics)

    # number of shapes to be updated each time (put this to a finite small value to increase speed)
    max_comp_update_shape = np.inf
    # maximum number of expected components used for memory pre-allocation (exaggerate here)
    expected_comps = 50
    # number of timesteps to consider when testing new neuron candidates
    N_samples = np.ceil(fr * decay_time)
    # exceptionality threshold
    thresh_fitness_raw = log_ndtr(-min_SNR) * N_samples
    # total length of file
    T1 = Y.shape[0]

    # set up CNMF initialization parameters

    # merging threshold, max correlation allowed
    merge_thresh = 0.8
    # number of frames for initialization (presumably from the first file)
    initbatch = 400
    # size of patch
    patch_size = 32
    # amount of overlap between patches
    stride = 3
    # max number of components in each patch
    K = 4

    #%% obtain initial batch file used for initialization
    # memory map file (not needed)
    fname_new = Y[:initbatch].save(os.path.join(caiman_datadir(),
                                                'example_movies', 'demo.mmap'),
                                   order='C')
    Yr, dims, T = cm.load_memmap(fname_new)
    images = np.reshape(Yr.T, [T] + list(dims), order='F')
    Cn_init = cm.local_correlations(np.reshape(Yr, dims + (T, ), order='F'))

    #%% RUN (offline) CNMF algorithm on the initial batch
    pl.close('all')
    cnm_init = cnmf.CNMF(2,
                         k=K,
                         gSig=gSig,
                         merge_thresh=merge_thresh,
                         fr=fr,
                         p=p,
                         rf=patch_size // 2,
                         stride=stride,
                         skip_refinement=False,
                         normalize_init=False,
                         options_local_NMF=None,
                         minibatch_shape=100,
                         minibatch_suff_stat=5,
                         update_num_comps=True,
                         rval_thr=rval_thr,
                         thresh_fitness_delta=-50,
                         gnb=gnb,
                         decay_time=decay_time,
                         thresh_fitness_raw=thresh_fitness_raw,
                         batch_update_suff_stat=False,
                         max_comp_update_shape=max_comp_update_shape,
                         expected_comps=expected_comps,
                         dview=None,
                         min_SNR=min_SNR)

    cnm_init = cnm_init.fit(images)

    print(('Number of components:' + str(cnm_init.estimates.A.shape[-1])))

    pl.figure()
    crd = plot_contours(cnm_init.estimates.A.tocsc(), Cn_init, thr=0.9)

    #%% run (online) OnACID algorithm

    cnm = deepcopy(cnm_init)
    cnm.params.data['dims'] = (60, 80)
    cnm._prepare_object(np.asarray(Yr), T1)

    t = initbatch

    Y_ = cm.load(fname)[initbatch:].astype(np.float32)
    for frame_count, frame in enumerate(Y_):
        cnm.fit_next(t, frame.copy().reshape(-1, order='F'))
        t += 1

#%% extract the results

    C, f = cnm.estimates.C_on[gnb:cnm.M], cnm.estimates.C_on[:gnb]
    A, b = cnm.estimates.Ab[:, gnb:cnm.M], cnm.estimates.Ab[:, :gnb]
    print(('Number of components:' + str(A.shape[-1])))

    #%% pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes)
    use_CNN = True
    if use_CNN:
        # threshold for CNN classifier
        thresh_cnn = 0.1
        from caiman.components_evaluation import evaluate_components_CNN
        predictions, final_crops = evaluate_components_CNN(
            A,
            dims,
            gSig,
            model_name=os.path.join(caiman_datadir(), 'model', 'cnn_model'))
        A_exclude, C_exclude = A[:, predictions[:, 1] < thresh_cnn], C[
            predictions[:, 1] < thresh_cnn]
        A, C = A[:,
                 predictions[:,
                             1] >= thresh_cnn], C[predictions[:,
                                                              1] >= thresh_cnn]
        noisyC = cnm.estimates.noisyC[gnb:cnm.M]
        YrA = noisyC[predictions[:, 1] >= thresh_cnn] - C
    else:
        YrA = cnm.estimates.noisyC[gnb:cnm.M] - C

#%% plot results
    pl.figure()
    crd = cm.utils.visualization.plot_contours(A, Cn, thr=0.9)

    view_patches_bar(Yr, A, C, b, f, dims[0], dims[1], YrA, img=Cn)
예제 #17
0
def log_p_exp_gauss(t, tau, sigma):
    """
    (Logarithm of) exponential pdf with scale tau convoluted with a normal with
    scale sigma.
    """
    return -np.log(tau) - t/tau + 1/2 * (sigma / tau) ** 2 + special.log_ndtr(t / sigma - sigma / tau)
예제 #18
0
    #%% set up some parameters

    fr = 10                                                             # frame rate (Hz)
    decay_time = 0.5                                                    # approximate length of transient event in seconds
    gSig = [6,6]                                                        # expected half size of neurons
    p = 1                                                               # order of AR indicator dynamics
    min_SNR = 3.5                                                       # minimum SNR for accepting new components
    rval_thr = 0.90                                                     # correlation threshold for new component inclusion
    gnb = 3                                                             # number of background components

    # set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics)

    max_comp_update_shape = np.inf                                      # number of shapes to be updated each time (put this to a finite small value to increase speed)
    expected_comps = 250                                                 # maximum number of expected components used for memory pre-allocation (exaggerate here)
    N_samples = np.ceil(fr*decay_time)                                  # number of timesteps to consider when testing new neuron candidates
    thresh_fitness_raw = log_ndtr(-min_SNR)*N_samples                   # exceptionality threshold
    T1 = Y.shape[0]                                                     # total length of file

    # set up CNMF initialization parameters 

    merge_thresh = 0.8                                                  # merging threshold, max correlation allowed
    initbatch = 20000                                                     # number of frames for initialization (presumably from the first file)
    patch_size = 32                                                     # size of patch
    stride = 3                                                          # amount of overlap between patches
    K = 4                                                               # max number of components in each patch


#********************************************************************************************************************************
#********************************************************************************************************************************
#********************************************************************************************************************************
#%% obtain initial batch file used for initialization
예제 #19
0
min_SNR = 3.5
# correlation threshold for new component inclusion
rval_thr = 0.90
# number of background components
gnb = 3

# set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics)

# number of shapes to be updated each time (put this to a finite small value to increase speed)
max_comp_update_shape = np.inf
# maximum number of expected components used for memory pre-allocation (exaggerate here)
expected_comps = 50
# number of timesteps to consider when testing new neuron candidates
N_samples = np.ceil(fr * decay_time)
# exceptionality threshold
thresh_fitness_raw = log_ndtr(-min_SNR) * N_samples
# total length of file
T1 = Y.shape[0]

# set up CNMF initialization parameters

# merging threshold, max correlation allowed
merge_thresh = 0.8
# number of frames for initialization (presumably from the first file)
initbatch = 400
# size of patch
patch_size = 32
# amount of overlap between patches
stride = 3
# max number of components in each patch
K = 4
예제 #20
0
def _log_ndtr_cpu(x, dtype):
    from scipy import special
    return special.log_ndtr(x).astype(dtype)
예제 #21
0
def log_ndtr_ndtri_exp(y):
    return log_ndtr(ndtri_exp(y))
예제 #22
0
def normcdfln(x): # pragma: no cover
    return special.log_ndtr(x)
예제 #23
0
 def _logcdf(x):
     #if x > 0: return math.log1p(-math.exp(TrueSkill.logcdf(-x)))
     return log_ndtr(x)
예제 #24
0
def Caiman_online(root):
    for k, ele in enumerate(root.winfo_children()):
        if k > 0: ele.destroy()

    #print '...text************'

    root.minsize(width=1000, height=600)
    root.data = emptyObject()
    #root.data_folder root.data.root_dir =  '/media/cat/4TB/in_vivo/rafa/alejandro/G2M5/20170511/000/'
    root.data.file_name = ''

    #root.caiman_folder = np.loadtxt('caiman_folder_location.txt',dtype=str)

    #******** Filename Selector
    def button0():
        print("...selecting file...")
        root.data.file_name = tkFileDialog.askopenfilename(
            initialdir=root.data_folder,
            defaultextension=".tif",
            filetypes=(("tif", "*.tif"), ("npy", "*.npy"), ("All Files",
                                                            "*.*")))

        print(root.data.file_name)
        root.data_folder = os.path.split(root.data.file_name)[0]
        np.savetxt('data_folder_location.txt', [root.data_folder], fmt="%s")
        e.delete(0, END)
        e.insert(0, root.data.file_name)
        root.title(os.path.split(root.data.file_name)[1])

    b0 = Button(root, text="Filename:", anchor="w",
                command=button0)  #Label(root, text="Filename: ").grid(row=0)
    b0.place(x=0, y=0)

    e = Entry(root, justify='left')  #text entry for the filename
    e.delete(0, END)
    e.insert(0, root.data.file_name)
    e.place(x=110, y=4, width=600)

    x_offset = 0
    y_offset = 30

    l00 = Label(root, text='_' * 200)
    l00.place(x=x_offset, y=y_offset, height=30, width=1000)

    #******** CNMF Parameters ******************
    #
    x_offset = 0
    y_offset = 55
    l0 = Label(root,
               text='CNMF Initialization Parameters',
               fg="red",
               justify='left')
    l0.place(x=x_offset, y=y_offset, height=30, width=190)

    #Param 1
    x_offset = 10
    y_offset = +80
    l1 = Label(root, text='Merge Threshold')
    l1.place(x=x_offset, y=y_offset, height=30, width=100)

    e1 = Entry(root, justify='left', width=4)  #text entry for the filename
    e1.delete(0, END)
    e1.insert(0, 0.8)
    e1.place(x=x_offset + 103, y=y_offset + 5)
    x_offset += 140

    ##Param 2
    #l2 = Label(root, text='Autoregress order')
    #l2.place(x=x_offset,y=y_offset, height=30,width=130)

    #e2 = Entry(root, justify='left', width=3)       #text entry for the filename
    #e2.delete(0, END)
    #e2.insert(0, 1)
    #e2.place(x=x_offset+120,y=y_offset+5)
    #x_offset+=150

    #Param 3
    l3 = Label(root, text='Initial Batch')
    l3.place(x=x_offset, y=y_offset, height=30, width=100)

    e3 = Entry(root, justify='left', width=5)  #text entry for the filename
    e3.delete(0, END)
    e3.insert(0, 20000)
    e3.place(x=x_offset + 88, y=y_offset + 5)
    x_offset += 145

    #Param 4
    l4 = Label(root, text='patch_size')
    l4.place(x=x_offset, y=y_offset, height=30, width=100)

    e4 = Entry(root, justify='left', width=3)  #text entry for the filename
    e4.delete(0, END)
    e4.insert(0, 32)
    e4.place(x=x_offset + 85, y=y_offset + 5)
    x_offset += 160

    #Param 5
    l5 = Label(root, text='stride')
    l5.place(x=x_offset, y=y_offset, height=30, width=40)

    e5 = Entry(root, justify='left', width=3)  #text entry for the filename
    e5.delete(0, END)
    e5.insert(0, 3)
    e5.place(x=x_offset + 38, y=y_offset + 5)
    x_offset += 100

    #Param 6
    l6 = Label(root, text='K')
    l6.place(x=x_offset, y=y_offset, height=30, width=15)

    e6 = Entry(root, justify='left', width=3)  #text entry for the filename
    e6.delete(0, END)
    e6.insert(0, 4)
    e6.place(x=x_offset + 15, y=y_offset + 5)

    #***************************************************
    #Recording Defaults
    #NEW LINE
    x_offset = 0
    y_offset += 50
    print(x_offset, y_offset)
    l_1 = Label(root, text='Recording Defaults', fg="blue", justify='left')
    l_1.place(x=x_offset, y=y_offset, height=30, width=120)

    y_offset += 25
    #Param 2
    l7 = Label(root, text='frame_rate (hz)')
    l7.place(x=x_offset, y=y_offset, height=30, width=110)

    x_offset += 105
    e7 = Entry(root, justify='left', width=4)  #text entry for the filename
    e7.delete(0, END)
    e7.insert(0, 10)
    e7.place(x=x_offset, y=y_offset + 5)

    #Param 3
    x_offset += 30
    l8 = Label(root, text='decay_time (s)')
    l8.place(x=x_offset, y=y_offset, height=30, width=110)

    x_offset += 100
    e8 = Entry(root, justify='left', width=4)  #text entry for the filename
    e8.delete(0, END)
    e8.insert(0, 0.5)
    e8.place(x=x_offset, y=y_offset + 5)

    #Param 3
    x_offset += 50
    l9 = Label(root, text='neuron (pixels)')
    l9.place(x=x_offset, y=y_offset, height=30, width=100)

    x_offset += 100
    e9 = Entry(root, justify='left', width=4)  #text entry for the filename
    e9.delete(0, END)
    e9.insert(0, '6, 6')
    e9.place(x=x_offset, y=y_offset + 5)

    #Param 3
    x_offset += 40
    l10 = Label(root, text='order AR dynamics')
    l10.place(x=x_offset, y=y_offset, height=30, width=145)

    x_offset += 135
    e10 = Entry(root, justify='left', width=3)  #text entry for the filename
    e10.delete(0, END)
    e10.insert(0, 1)
    e10.place(x=x_offset, y=y_offset + 5)

    #Param
    x_offset += 40
    l11 = Label(root, text='min_SNR')
    l11.place(x=x_offset, y=y_offset, height=30, width=65)

    x_offset += 62
    e11 = Entry(root, justify='left', width=4)  #text entry for the filename
    e11.delete(0, END)
    e11.insert(0, 3.5)
    e11.place(x=x_offset, y=y_offset + 5)

    #Param
    x_offset += 40
    l12 = Label(root, text='rval_thr')
    l12.place(x=x_offset, y=y_offset, height=30, width=65)

    x_offset += 60
    e12 = Entry(root, justify='left', width=4)  #text entry for the filename
    e12.delete(0, END)
    e12.insert(0, 0.90)
    e12.place(x=x_offset, y=y_offset + 5)

    #Param
    x_offset += 40
    l13 = Label(root, text='# bkgr comp')
    l13.place(x=x_offset, y=y_offset, height=30, width=105)

    x_offset += 95
    e13 = Entry(root, justify='left', width=4)  #text entry for the filename
    e13.delete(0, END)
    e13.insert(0, 3)
    e13.place(x=x_offset, y=y_offset + 5)

    #***************************************************
    #Temporary Initalization Defaults
    #NEW LINE
    x_offset = 0
    y_offset += 50
    print(x_offset, y_offset)
    l_1 = Label(root,
                text='Initialization Defaults',
                fg="green",
                justify='left')
    l_1.place(x=x_offset, y=y_offset, height=30, width=140)

    y_offset += 30
    #Param
    x_offset = 0
    x_width = 120
    l14 = Label(root, text='# updated shapes')
    l14.place(x=x_offset, y=y_offset, height=30, width=x_width)

    x_offset += x_width
    e14 = Entry(root, justify='left', width=4)  #text entry for the filename
    e14.delete(0, END)
    e14.insert(0, 'inf')
    e14.place(x=x_offset, y=y_offset + 5)

    #Param
    x_offset += 45
    x_width = 125
    l15 = Label(root, text='# expected shapes')
    l15.place(x=x_offset, y=y_offset, height=30, width=x_width)

    x_offset += x_width
    e15 = Entry(root, justify='left', width=4)  #text entry for the filename
    e15.delete(0, END)
    e15.insert(0, 2000)
    e15.place(x=x_offset, y=y_offset + 5)

    #Param
    x_offset += 45
    x_width = 80
    l16 = Label(root, text='# timesteps')
    l16.place(x=x_offset, y=y_offset, height=30, width=x_width)

    x_offset += x_width
    e16 = Entry(root, justify='left', width=4)  #text entry for the filename
    e16.delete(0, END)
    N_samples = np.ceil(float(e7.get()) * float(e8.get()))
    e16.insert(0, N_samples)
    e16.place(x=x_offset, y=y_offset + 5)

    #Param
    from scipy.special import log_ndtr
    x_offset += 45
    x_width = 140
    l17 = Label(root, text='exceptionality thresh')
    l17.place(x=x_offset, y=y_offset, height=30, width=x_width)

    x_offset += x_width
    e17 = Entry(root, justify='left', width=5)  #text entry for the filename
    e17.delete(0, END)
    e17.insert(0, log_ndtr(-float(e11.get())) * N_samples)
    e17.place(x=x_offset, y=y_offset + 5)

    #Param
    x_offset += 55
    x_width = 105
    l18 = Label(root, text='total len of file')
    l18.place(x=x_offset, y=y_offset, height=30, width=x_width)

    x_offset += x_width
    e18 = Entry(root, justify='left', width=5)  #text entry for the filename
    e18.delete(0, END)
    e18.insert(0, 'all')
    e18.place(x=x_offset, y=y_offset + 5)

    y_offset += 30
    x_offset = 0
    l000 = Label(root, text='_' * 200)
    l000.place(x=x_offset, y=y_offset, height=30, width=1000)

    #********** COMMAND LINE OUTPUT BOX **********
    tkinter_window = False  #Redirect command line outputs to text box in tkinter;
    if tkinter_window:
        t = Text(root, wrap='word', height=20, width=100)
        t.place(x=10, y=250, in_=root)

    #********* DEMO_ONACID BUTTON **********************
    def button1(l):
        l.config(foreground='red')
        root.update()

        #Save existing config file
        np.savez(str(root.data.file_name)[:-4]+"_runtime_params", \
            merge_thr=e1.get(),     #merge threshold
            initibatch = e3.get(),     #Initial batch
            patch_size=e4.get(),     #patch size
            stride=e5.get(),     #stride
            K=e6.get(),     #K
            frame_rate=e7.get(),      #frame_rate
            decay_time=e8.get(),     #decay_time
            neuron_size=e9.get(),     #neuron size pixesl
            AR_dynamics=e10.get(),    #AR dynamics order
            min_SNR=e11.get(),  #min_SNR
            rval_threshold = e12.get(),    # rval_threshold
            no_bkgr_components=e13.get(),    # #bkground componenets
            no_updated_shapes=e14.get(),    # #udpated shapes
            no_expected_shapes=e15.get(),    # #expected shapes
            no_timesteps=e16.get(),    # #timesteps
            exceptionality_threshold=e17.get(),    # exceptionatliy threshold
            total_len_file=e18.get(),     # total len of file
            caiman_location = str(root.caiman_folder)
            )
        print(type(str(root.caiman_folder)))
        print(type(root.caiman_folder))

        if tkinter_window:
            import io, subprocess
            #proc = subprocess.Popen(["python", "-u", "/home/cat/code/CaImAn/demo_OnACID.py", root.data.file_name], stdout=subprocess.PIPE)
            proc = subprocess.Popen([
                "python", "-u",
                str(root.caiman_folder) + "/demo_OnACID_2.py",
                root.data.file_name
            ],
                                    stdout=subprocess.PIPE)

            while True:
                line = proc.stdout.readline()
                if line != '':
                    t.insert(END, '%s\n' % line.rstrip())
                    t.see(END)
                    t.update_idletasks()
                    sys.stdout.flush()
                else:
                    break
        else:
            print("python -u ")
            print(root.caiman_folder)
            #p = os.system("python -u "+str(root.caiman_folder)+"/demo_OnACID_2.py "+root.data.file_name)
            print("python -u " + str(root.caiman_folder) +
                  "/demo_OnACID_2.py " + str(root.data.file_name))
            p = os.system("python -u " + str(root.caiman_folder) +
                          "/demo_OnACID_2.py " + str(root.data.file_name))

    l = Label(root, textvariable='green', fg='red')
    b1 = Button(root,
                text="demo_OnACID",
                foreground='blue',
                command=lambda: button1(l))
    b1.place(x=0, y=y_offset + 50, in_=root)
def _log_erfc(x):
    """Compute log(erfc(x)) with high accuracy for large x."""
    return math.log(2) + special.log_ndtr(-x * 2**0.5)