Example #1
0
    def func(abg):
        'rediscretize:'
        xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg, S._theta)
        dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin, factor = 2.)
#        print abg, dt, S._dx, xmin
        S.rediscretize(S._dx, dt, S.getTf(), xmin)
        
        'Solve it:'
#        print abg
#        start = time.clock()
        Fs = S.c_solve(abg)
#        solve_end = time.clock()
        spike_t_indexes = mleBinnedTrain.getTindxs(S._ts)
#        t_indxs_end = time.clock();
#        print abg, ' : solvetime = %f; indx_time = %f'%(solve_end  - start, t_indxs_end - solve_end) 
       
        'form (approximate) likelihood:'
        pdf = -diff(Fs[:,:,-1], axis = 1) / S._dt;
        likelihoods = pdf[minus_idxs, spike_t_indexes]*minus_weights + \
                      pdf[plus_idxs, spike_t_indexes]*plus_weights
        if amin(likelihoods) <= .0:
            likelihoods[likelihoods<=.0] = 1e-8
        
        'Return '
        return -sum(log(likelihoods))
    def func(abg):
        xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg)
        dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin)
        S.rediscretize(S._dx, dt, S.getTf(), xmin)
        
        'Solve it:'
        Fss = S.c_solve(abg);
        Fs = Fss[:,:,-1];
        Ss = S.transformSurvivorData(binnedTrain)
        Ls = Fs - Ss
        
        'Return '
        G = .5*sum(Ls*Ls)*S._dt 
        
        print 'abg = ', abg, '; G=', G
#        G = .0;
#        for phi, phi_idx in zip(S._phis, xrange(S._num_phis() )):
#            unique_Is = bins[phi]['unique_Is']
#            SDF = bins[phi]['SDF']
#            Ls = SDF - interp(unique_Is, ts, Fs[phi_idx,:])
#            G += sum(Ls*Ls) 
        
        del Fss, Fs, Ss, Ls;
        
        return G
    def func(abgt):
        'Solve it:'
        abg = abgt[:3]
        xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg)
        dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin)
        S.rediscretize(S._dx, dt, S.getTf(), xmin)
        
        Fs = S.solve_tau(abgt, visualize=False)[:,:,-1]
        Ss = S.transformSurvivorData(binnedTrain)
        Ls = Fs - Ss

        'Return '
        G = .5*sum(Ls*Ls)*S._dt 
        
        return G
def obtainRenewalDensities(phis, abg, theta, Tf):
    xmin = FPMultiPhiSolver.calculate_xmin(Tf, abg)
    dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg, xmin, factor = 5.)
    S = FPMultiPhiSolver(theta, phis,
                             dx, dt, Tf, xmin);
      
    Fs = S.solve(abg)
    ts = S._ts;
            
    iG = squeeze(Fs[:, :,-1])
    
    g = -diff(iG)/ S._dt;
    
    t_mids = (S._ts[1:]+S._ts[:-1])/2.0
    
    return g, S._ts ,t_mids
Example #5
0
            def loglikelihood(abg):     
                'rediscretize:'
                xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg, S._theta)
                dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin)
                S.rediscretize(S._dx, dt, S.getTf(), xmin)
                'Solve it:'
                Fs = S.c_solve(abg)
                spike_t_indexes = mleBinnedTrain.getTindxs(S._ts)
                'form (approximate) likelihood:'
                pdf = -diff(Fs[:,:,-1], axis = 1) / S._dt;
                likelihoods = pdf[minus_idxs, spike_t_indexes]*minus_weights +\
                              pdf[plus_idxs, spike_t_indexes]*plus_weights
#                if amin(likelihoods) <= .0:
#                    likelihoods[likelihoods<=.0] = 1e-8
                normalized_log_likelihood = sum(log(likelihoods))
                'Return '
                return -normalized_log_likelihood
    def func(abg):
        xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg, S._theta)
        dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin)
        S.rediscretize(S._dx, dt, S.getTf(), xmin)
        
        'Solve it:'
        Fss = S.c_solve(abg)
        Fs = Fss[:,:,-1]
        Ss = S.transformSurvivorData(binnedTrain)
        lSups = amax(abs(Fs - Ss) , axis = 1)

        'Return '
        G = dot(weight_vector, lSups)
        
        if verbose:
            print 'abg = ', abg, '; G=', G
        return G
    def func(abg):
        xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg)
        dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin)
        S.rediscretize(S._dx, dt, S.getTf(), xmin)
        
        'Solve it:'
        Fss = S.c_solve(abg)
        Fs = Fss[:,:,-1]
        Ss = S.transformSurvivorData(binnedTrain)
        Ls = Fs - Ss

        'Return '
        G = .5*sum( dot(weight_vector, Ls*Ls) )*S._dt / sum(weight_vector)
        
#        'clean up'
#        del Fss, Fs, Ss, Ls;
        
        print 'abg = ', abg, '; G=', G
        return G
Example #8
0
def calculateExactSDFs():
    N_phi = 4;
    print 'N_phi = ', N_phi
    
    phis =  2*pi*array([0,.25, .5, .75])
       
    
    for regime_idx, regime_name in enumerate(['superT', 'superSin', 'crit','subT']):
    #        for regime_name in ['superT']:
        regime_label = 'sinusoidal_spike_train_N=1000_' + regime_name + '_12' 
        binnedTrain = BinnedSpikeTrain.initFromFile(regime_label, phis)
        Tf = binnedTrain.getTf()
        print 'Tf = ', Tf
        
        theta = binnedTrain.theta;
        print 'theta = ', theta
        ps = binnedTrain._Train._params
        abg_true = array((ps._alpha, ps._beta, ps._gamma))
        abg = abg_true
        xmin = FPMultiPhiSolver.calculate_xmin(Tf, abg, theta)
#        dx = .0125#        dx = .0125; ;
        dx = .0125; 
        dt = FPMultiPhiSolver.calculate_dt(dx, abg, xmin, factor = 5.)
        print 'xmin = ', xmin, ', dx, dt = ', dx, dt
        
        S = FPMultiPhiSolver(theta, phis,
                         dx, dt, Tf, xmin)
   
        S.setTf(Tf)
        Fs = S.c_solve(abg)
        ts = S._ts;
         
        
        filename= RESULTS_DIR + '/Fs_%s'%regime_name
        print 'saving Fs to ', filename
        savez(filename, ts=ts,
                         Gs=squeeze(Fs[:,:,-1]),
                         phis=phis,
                         Tf = Tf);
def MixedEstimator(abg_init, binnedTrain, dp_tol = 1e-2):
    phis = binnedTrain.bins.keys();
    theta = binnedTrain.theta

    dp = dp_tol*2.0;
    abg = abg_init
    
    while dp > dp_tol:
        Tf = binnedTrain.getTf()
   
        xmin = FPMultiPhiSolver.calculate_xmin(Tf, abg)
        dx = FPMultiPhiSolver.calculate_dx(abg, xmin)
        dt = FPMultiPhiSolver.calculate_dt(dx, abg, xmin, factor = 8.)

        S = FPMultiPhiSolver(theta, phis,
                             dx, dt, Tf, xmin)

        Fs = S.solve(abg, visualize=False)
        Ss = S.transformSurvivorData(binnedTrain)
        Ls = Fs[:,:,-1] - Ss
        Nus = S.solveAdjoint(abg, Ls)
    
        dGdp = S.estimateParameterGradient(abg, Fs, Nus)

        from numpy.linalg.linalg import norm
        
        dG_normalized = dGdp/ norm(dGdp) 
        
        dp = FortetLineEstimator(binnedTrain, abg, dG_normalized, dp_tol)
        
        abg = abg - dp*dG_normalized

        print 'dG = ', dG_normalized
        print 'dp = ', dp
        print 'abg = (%.3g, %.3g, %.3g)'%(abg[0],abg[1],abg[2])
        print '-'

    return abg
Example #10
0
def adapted_sandbox(save_figs=False):
#    file_name = 'sinusoidal_spike_train_N=1000_subT_11'
#    file_name = 'sinusoidal_spike_train_N=1000_superSin_13'
    N_samples = 16;

    N_phi_per_quarter = 4;
    N_phi = 4*N_phi_per_quarter;
    normalized_phis =  getAdaptedPhis(N_phi_per_quarter) 
    diFs = empty((4, 3, N_samples))
    errors = empty((4, 2, N_samples))
    
    seed(2013)
    for regime_idx, tag in enumerate(['superSin', 'crit', 'superT', 'subT']):
        file_name = 'sinusoidal_spike_train_N=1000_%s_13'%tag
        binnedTrain = BinnedSpikeTrain.initFromFile(file_name, normalized_phis)
        theta = binnedTrain.theta;
        real_phis = normalized_phis * 2.0 * pi / theta;
                
        for sample_idx in xrange(N_samples): 
            phi_star,I_star = binnedTrain.getRandomPhiInterval()
            
            print 'phi_star, I_star: ', phi_star, I_star
            
            phi_m, phi_minus, phi_plus = getApproximatePhis(phi_star, real_phis,theta)
            delta_phi_minus_weight, delta_phi_plus_weight = getDeltaPhiWeights(phi_star, phi_minus, phi_plus)
            
            solver_phis = [phi_star, phi_m, phi_minus, phi_plus]
            
            ps = binnedTrain._Train._params
            abg = array((ps._alpha, ps._beta, ps._gamma))
            
            Tf = I_star + .2;
            
            dx = .025; 
            x_min = FPMultiPhiSolver.calculate_xmin(Tf, abg, theta)
            dt = FPMultiPhiSolver.calculate_dt(dx,abg, x_min, factor = 1.0)
            
            S = FPMultiPhiSolver(theta, solver_phis,
                                    dx, dt, Tf, x_min)
            S.setTf(Tf)
            
            Fth = S.c_solve(abg)[:,:,-1]
            ts = S._ts;
            
            tm_idx, tp_idx = gettsIndex(ts, I_star)
            
            diF_star = -diff(Fth[0, [tm_idx, tp_idx]]) / (S._dt)
            diF_m    = -diff(Fth[1, [tm_idx, tp_idx]]) / (S._dt)
            diF_plus_minus    = -(diff(Fth[2, [tm_idx, tp_idx]])*delta_phi_minus_weight + \
                                  diff(Fth[3, [tm_idx, tp_idx]])*delta_phi_plus_weight)/ (S._dt)
            
#            print 'di_F: %.4f,%.4f,%.4f' %(diF_star, diF_m, diF_plus_minus)
#            print 'error: %.4f,%.4f' %(abs(diF_star - diF_m), abs(diF_star-diF_plus_minus) )
            
            diFs[regime_idx, :, sample_idx] = r_[diF_star, diF_m, diF_plus_minus]
            errors[regime_idx, :, sample_idx] = r_[abs(diF_star - diF_m), abs(diF_star-diF_plus_minus)]
            
        figure()
        plot(errors[regime_idx, 0,:], 'b', label='F_m')
        plot(errors[regime_idx, 1,:], 'r', label='F_min + F_plus')
        legend(); title(tag, fontsize = 32)
    
    from numpy import save
    save('Ls_adapted',diFs)
    save('errors_adapted', errors)
Example #11
0
def thetas_sandbox(save_figs=False):
    N_samples = 100;
    N_phi = 64;
    normalized_phis =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)
    
    #results banks:
#    seed(2013)
    thetas = [10, 20]
    likelihoods = empty((len(thetas), 4, N_samples))
    errors = empty((len(thetas), 3, N_samples))
    
    base_name = 'sinusoidal_spike_train_N=1000_critical_theta='
    for regime_idx, theta in enumerate(thetas):    
        sample_id = 17
        regime_name = 'theta%d'%theta
        regime_label = base_name + '%d'%theta            
        file_name = regime_label + '_%d'%sample_id
        print file_name
        binnedTrain = BinnedSpikeTrain.initFromFile(file_name, normalized_phis)
        theta = binnedTrain.theta;
        real_phis = normalized_phis * 2.0 * pi / theta;
        
        for sample_idx in xrange(N_samples): 
            phi_star,I_star = binnedTrain.getRandomPhiInterval()
            print 'phi_star_normalized, I_star: %.3f, %.3f' %(phi_star/ (2*pi/theta), I_star)
            
            phi_m, phi_minus, phi_plus = getApproximatePhis(phi_star, real_phis,theta)
            delta_phi_minus_weight, delta_phi_plus_weight = getDeltaPhiWeights(phi_star, phi_minus, phi_plus)
            
            #phi_star_idx = 0; phi_m_idx = 1; etc...
            solver_phis = [phi_star, phi_m, phi_minus, phi_plus]
#            print 'solver_phis = ', solver_phis
#            print 'weights = %.3f, %.3f'%(delta_phi_minus_weight, delta_phi_plus_weight)   
            
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
            
            abg = abg_true
            
            Tf = I_star + .2;
            
            dx = .025; 
            x_min = FPMultiPhiSolver.calculate_xmin(Tf, abg, theta)
            dt = FPMultiPhiSolver.calculate_dt(dx,abg, x_min, factor = 1.0)
            
            S = FPMultiPhiSolver(theta, solver_phis,
                                    dx, dt, Tf, x_min)
            S.setTf(Tf)
            
            Fs =  S.c_solve(abg)
            Fth = Fs[:,:,-1]
#            Fth_phis = S.solveFphi(abg, Fs)[:,:,-1]
            ts = S._ts;
            
            tm_idx, tp_idx = gettsIndex(ts, I_star)
            delta_t = S._dt
            delta_phi = phi_star - phi_m
            
            #various approximations to the likelihood, L
            L_star = -diff(Fth[0, [tm_idx, tp_idx]]) / (delta_t)
            L_m    = -diff(Fth[1, [tm_idx, tp_idx]]) / (delta_t)
            L_plus_minus    = -(diff(Fth[2, [tm_idx, tp_idx]])*delta_phi_minus_weight + \
                                  diff(Fth[3, [tm_idx, tp_idx]])*delta_phi_plus_weight)/ (delta_t)
#            gradphi_g = -diff(Fth_phis[1, [tm_idx, tp_idx]]) / (delta_t);
#            logL_gradphi = log(L_m) +  delta_phi * gradphi_g / L_m
#            L_gradphi = exp(logL_gradphi)
            L_gradphi = L_plus_minus
              
            
#            print 'di_F: %.4f,%.4f,%.4f' %(diF_star, diF_m, diF_plus_minus)
#            print 'error: %.4f,%.4f' %(abs(diF_star - diF_m), abs(diF_star-diF_plus_minus) )
            
            likelihoods[regime_idx, :, sample_idx] = r_[L_star,
                                                         L_m,
                                                          L_plus_minus,
                                                           L_gradphi]
            errors[regime_idx, :, sample_idx] = r_[abs(L_star - L_m),
                                                    abs(L_star-L_plus_minus),
                                                    abs(L_star - L_gradphi)]
            
        figure()
        plot(errors[regime_idx, 0,:], 'b', label='F_m')
        plot(errors[regime_idx, 1,:], 'r', label='F_min + F_plus')
        legend(); title('theta= %.2f'%theta, fontsize = 32)
    
    from numpy import save
    save('likelihoods_thetas',likelihoods)
    save('errors_thetas', errors)
Example #12
0
def supersin_sandbox(N_phi = 32):
    N_sub_samples = 10
    N_samples = N_sub_samples*5;

    normalized_phis =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)
    
    #results banks:
    likelihoods = empty((4, 4, N_samples))
    errors = empty((4, 3, N_samples))
    
    seed(2013)
    for regime_idx, tag in enumerate(['superSin']):
        file_name = 'sinusoidal_spike_train_N=1000_%s_22'%(tag)
        print file_name
        binnedTrain = BinnedSpikeTrain.initFromFile(file_name, normalized_phis)
        theta = binnedTrain.theta;
        real_phis = normalized_phis * 2.0 * pi / theta;
    
        for sample_idx in xrange(N_samples):
            if 0 == mod(sample_idx, N_sub_samples):
                train_id = randint(1,101) 
                file_name = 'sinusoidal_spike_train_N=1000_%s_%d'%(tag, train_id)
                print file_name
                binnedTrain = BinnedSpikeTrain.initFromFile(file_name, normalized_phis)
                
            phi_star,I_star = binnedTrain.getRandomPhiInterval()
            print 'phi_star_normalized, I_star: %.3f, %.3f' %(phi_star/ (2*pi/theta), I_star)
            
            phi_m, phi_minus, phi_plus = getApproximatePhis(phi_star, real_phis,theta)
            delta_phi_minus_weight, delta_phi_plus_weight = getDeltaPhiWeights(phi_star, phi_minus, phi_plus)
            
            #phi_star_idx = 0; phi_m_idx = 1; etc...
            solver_phis = [phi_star, phi_m, phi_minus, phi_plus]
#            print 'solver_phis = ', solver_phis
#            print 'weights = %.3f, %.3f'%(delta_phi_minus_weight, delta_phi_plus_weight)   
            
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
            
            abg = abg_true
            
            Tf = I_star + .2;
            
            dx = .025; 
            x_min = FPMultiPhiSolver.calculate_xmin(Tf, abg, theta)
            dt = FPMultiPhiSolver.calculate_dt(dx,abg, x_min, factor = 1.0)
            
            S = FPMultiPhiSolver(theta, solver_phis,
                                    dx, dt, Tf, x_min)
            S.setTf(Tf)
            
            Fs =  S.c_solve(abg)
            Fth = Fs[:,:,-1]
            Fth_phis = S.solveFphi(abg, Fs)[:,:,-1]
            ts = S._ts;
            
            tm_idx, tp_idx = gettsIndex(ts, I_star)
            delta_t = S._dt
            delta_phi = phi_star - phi_m
            
            #various approximations to the likelihood, L
            L_star = -diff(Fth[0, [tm_idx, tp_idx]]) / (delta_t)
            L_m    = -diff(Fth[1, [tm_idx, tp_idx]]) / (delta_t)
            L_plus_minus    = -(diff(Fth[2, [tm_idx, tp_idx]])*delta_phi_minus_weight + \
                                  diff(Fth[3, [tm_idx, tp_idx]])*delta_phi_plus_weight)/ (delta_t)
            gradphi_g = -diff(Fth_phis[1, [tm_idx, tp_idx]]) / (delta_t);
            logL_gradphi = log(L_m) +  delta_phi * gradphi_g / L_m
            L_gradphi = exp(logL_gradphi)  
            
            'sanity check'
            approx_Fth_phi = .5 * sum(Fth[0, [tm_idx, tp_idx]] - Fth[1, [tm_idx, tp_idx]]) / (phi_star - phi_m)
            lFth_phi = .5* (sum(Fth_phis[1, [tm_idx, tp_idx]]))  
            if (approx_Fth_phi * lFth_phi < .0):
                print 'sanity inverse: approx:%.3f , adjoint_calc: %.3f'%(approx_Fth_phi,lFth_phi) 
            
            if (.0 >= L_star*L_m*L_plus_minus*L_gradphi):
                print 'negative likelihood encountered'
#            print 'di_F: %.4f,%.4f,%.4f' %(diF_star, diF_m, diF_plus_minus)
#            print 'error: %.4f,%.4f' %(abs(diF_star - diF_m), abs(diF_star-diF_plus_minus) )
            
            likelihoods[regime_idx, :, sample_idx] = r_[L_star,
                                                         L_m,
                                                          L_plus_minus,
                                                          L_gradphi]
            errors[regime_idx, :, sample_idx] = r_[abs(L_star - L_m),
                                                    abs(L_star-L_plus_minus),
                                                    abs(L_star - L_gradphi)]
            
        figure()
        plot(errors[regime_idx, 0,:], 'b', label='F_m')
        plot(errors[regime_idx, 1,:], 'r', label='F_min + F_plus')
        legend(); title(tag, fontsize = 32)
    
    from numpy import save
    save('likelihoods_supersin',likelihoods)
    save('errors_supersin', errors)
def AdjointEstimator():
    N_phi = 20;
    print 'N_phi = ', N_phi
    
    phis =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)
    
    file_name = 'sinusoidal_spike_train_N=1000_crit_1'
    print file_name
    
    binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phis)
    
    phi_omit = None
    binnedTrain.pruneBins(phi_omit, N_thresh = 100, T_thresh = 10.0)
    print 'N_bins = ', len(binnedTrain.bins.keys())
    
    Tf = binnedTrain.getTf()
    print 'Tf = ', Tf
        
    phis = binnedTrain.bins.keys();
    theta = binnedTrain.theta
    
    
    ps = binnedTrain._Train._params
    abg_true = array((ps._alpha, ps._beta, ps._gamma))
    print 'abg_true = ', abg_true
    
    abg = abg_true
    xmin = FPMultiPhiSolver.calculate_xmin(Tf, abg)
    dx = FPMultiPhiSolver.calculate_dx(abg, xmin)
    dt = FPMultiPhiSolver.calculate_dt(dx, abg, xmin, factor = 8.)
    print 'xmin, dx, dt = ', xmin, dx, dt
    S = FPMultiPhiSolver(theta, phis,
                     dx, dt, Tf, xmin)

    abg_init = initialize5(binnedTrain)
    print 'abg_init = ', abg_init
        
#    start = time.clock()
#    abg_est = TNCEstimator(S, binnedTrain, abg_init)
#    print 'Est. time = ', time.clock() - start
#    print 'abg_est = ', abg_est
    
#    start = time.clock()
#    abg_est = NMEstimator(S, binnedTrain, abg_init)
#    print 'Est. time = ', time.clock() - start
#    print 'abg_est = ', abg_est
#
#    start = time.clock()
#    abg_est = COBYLAEstimator(S, binnedTrain, abg_init)
#    print 'Est. time = ', time.clock() - start
#    print 'abg_est = ', abg_est

#    start = time.clock()
#    abg_est = CGEstimator(S, binnedTrain, abg_init)
#    print 'Est. time = ', time.clock() - start
#    print 'abg_est = ', abg_est

    start = time.clock()
    abg_est = BFGSEstimator(S, binnedTrain, abg_init)
    print 'Est. time = ', time.clock() - start
    print 'abg_est = ', abg_est