예제 #1
0
def calculateInitSDFs():
    N_phi = 16;
    print 'N_phi = ', N_phi
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)
    
    for regime_idx, regime_name in enumerate(['superT', 'superSin', 'crit','subT']):
    #        for regime_name in ['superT']:
        filename = RESULTS_DIR + '/Fs_%s.npz'%regime_name 
        npzfile = load(filename)
        ts = npzfile['ts']
        phis = npzfile['phis']
        
        regime_label = 'sinusoidal_spike_train_N=1000_' + regime_name + '_12' 
        binnedTrain = BinnedSpikeTrain.initFromFile(regime_label, phi_norms)
        binnedTrain.pruneBins(None, N_thresh = 16, T_thresh=128.)
           
        ps = binnedTrain._Train._params
        abg = initialize_right_2std(binnedTrain)
#        abg = array((ps._alpha, ps._beta, ps._gamma))
        
        GsInit = calcG_U_Const(abg,ts,phis);
        
        filename= RESULTS_DIR + '/Gs_%s'%regime_name
        print 'saving Gs to ', filename
        savez(filename, GsInit=GsInit);
예제 #2
0
def FP_L2_vs_Sup(N_spikes = 1000, N_trains=20):
    N_phi = 20;
    print 'N_phi = ', N_phi
    
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

    batch_start = time.clock()    
    base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes

    D = DataHarvester('FPvsWFP_4x%d_N=%d'%(N_trains,N_spikes))
    for regime_name, T_thresh in zip(['subT','superT', 'crit', 'superSin'],
                                     4*[64.]):
        regime_label = base_name + regime_name
            
        for sample_id in xrange(1,N_trains +1):
            file_name = regime_label + '_' + str(sample_id)
            print file_name
            
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
            D.setRegime(regime_name,abg_true, Tsim=-1.0)
            
            #### N_thresh = 10
            binnedTrain.pruneBins(None, N_thresh = 10, T_thresh=T_thresh)
            D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount())
                        
            abg_init = initialize_right_2std(binnedTrain)
            abg_init[1] = amax([.1, abg_init[1]])
            abg_init[2] = amax([.0, abg_init[2]])
            D.addEstimate(sample_id, 'init_N10', abg_init,.0) 


            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
                       
            dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0)            
            theta = binnedTrain.theta            
            binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh)

            phis = binnedTrain.bins.keys();

            S = FPMultiPhiSolver(theta, phis,
                                 dx, dt,
                                 binnedTrain.getTf(), X_min = -1.0)            
            
            start = time.clock()
            abg_est = FPL2Estimator(S,binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'FP_L2', abg_est, finish-start)
            
            start = time.clock()
            abg_est = FPSupEstimator(S,binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'FP_Sup', abg_est, finish-start)
                    
    D.closeFile() 
   
    print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
예제 #3
0
def ThetaEstimate(N_spikes = 1000, N_trains=100, N_phi=20, 
                  thetas = [1, 5, 10, 20]):
    print 'N_phi = ', N_phi
    
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

    batch_start = time.clock()
    base_name = 'sinusoidal_spike_train_N=%d_critical_theta='%N_spikes

    T_thresh = 64.
    
    D = DataHarvester('ThetaEstimate_%dx%d_N=%d'%(len(thetas),N_trains,N_spikes))
    for sample_id in xrange(1,N_trains +1):
        for theta in thetas:    
            regime_name = 'theta%d'%theta
            regime_label = base_name + '%d'%theta            
            file_name = regime_label + '_%d'%sample_id
            print file_name
            
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
            D.setRegime(regime_name,abg_true, Tsim=-1.0)
            
            binnedTrain.pruneBins(None, N_thresh = 5, T_thresh=T_thresh)
            D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount())
                        
            abg_init = initialize_right_2std(binnedTrain)
            abg_init[1] = amax([.1, abg_init[1]])
            abg_init[2] = amax([.0, abg_init[2]])
            D.addEstimate(sample_id, 'Initializer', abg_init,.0, warnflag = 0) 
                       
            #RELOAD ALL DATA:               
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh)
            
            #Weighted Fortet:            
            start = time.clock()
            abg_est, warnflag = FortetEstimatorSup(binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'Fortet', abg_est, finish-start, warnflag)
            
            #Weighted F-P:
            dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) 
            phis = binnedTrain.bins.keys();
            S = FPMultiPhiSolver(binnedTrain.theta, phis,
                                 dx, dt,
                                 binnedTrain.getTf(), X_min = -1.0)            
            
            start = time.clock()
            abg_est, warnflag = FPSupEstimator(S, binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'FP', abg_est, finish-start, warnflag)
                    
    D.closeFile() 
   
    print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
예제 #4
0
def CustomEstimate(spike_trains):   
    D = DataPrinter('')
    for spike_train in spike_trains:
        regime_name = spike_train[0];
        sample_id = spike_train[1]
        N_spikes = spike_train[2];
        N_phi = spike_train[3];
    
        phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

        base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes

        T_thresh = 128.0;
        regime_label = base_name + regime_name
            
        file_name = regime_label + '_' + str(sample_id)
        print file_name
        
        binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
        ps = binnedTrain._Train._params
        abg_true = array((ps._alpha, ps._beta, ps._gamma))
        
        D.setRegime(regime_name,abg_true, Tsim=-1.0)
        
        binnedTrain.pruneBins(None, N_thresh = 5, T_thresh=T_thresh)
        D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount())
                    
        abg_init = initialize_right_2std(binnedTrain)
        abg_init[1] = amax([.1, abg_init[1]])
        abg_init[2] = amax([.0, abg_init[2]])
        D.addEstimate(sample_id, 'Initializer', abg_init,.0, warnflag = 0) 
                   
        #RELOAD ALL DATA:               
        binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
        binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh)
        
        #Weighted Fortet:            
        start = time.clock()
        abg_est, warnflag = FortetEstimatorSup(binnedTrain, abg_init, verbose = True)
        finish = time.clock()
        D.addEstimate(sample_id, 'Fortet', abg_est, finish-start, warnflag)
        
        #Weighted F-P:
        dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) 
        phis = binnedTrain.bins.keys();
        S = FPMultiPhiSolver(binnedTrain.theta, phis,
                             dx, dt,
                             binnedTrain.getTf(), X_min = -1.0)            
        
        start = time.clock()
        abg_est, warnflag = FPSupEstimator(S, binnedTrain, abg_init, verbose = True)
        finish = time.clock()
        D.addEstimate(sample_id, 'FP', abg_est, finish-start, warnflag)
                    
    del D  
예제 #5
0
def TestEstimate(N_spikes = 100, N_trains=5, N_phi=8):
    print 'N_phi = ', N_phi
    
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

    batch_start = time.clock()    
    base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes
    
    D = DataPrinter('')

    for regime_name, T_thresh in zip(['superT'],
                                     [64]):
        regime_label = base_name + regime_name
            
        for sample_id in xrange(1,N_trains +1):
            file_name = regime_label + '_' + str(sample_id)
            print file_name
            
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
            D.setRegime(regime_name,abg_true, Tsim=-1.0)
            
            binnedTrain.pruneBins(None, N_thresh = 5, T_thresh=T_thresh)
            D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount())
                        
            abg_init = initialize_right_2std(binnedTrain)
            abg_init[1] = amax([.1, abg_init[1]])
            abg_init[2] = amax([.0, abg_init[2]])
            D.addEstimate(sample_id, 'Initializer', abg_init,.0, warnflag = 0) 
                       
            #RELOAD ALL DATA:               
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh)
            
            #Weighted Fortet:            
            start = time.clock()
            abg_est, warnflag = FortetEstimatorSup(binnedTrain, abg_init, verbose = True)
            finish = time.clock()
            D.addEstimate(sample_id, 'Fortet', abg_est, finish-start, warnflag)
            
            #Weighted F-P:
            dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) 
            phis = binnedTrain.bins.keys();
            S = FPMultiPhiSolver(binnedTrain.theta, phis,
                                 dx, dt,
                                 binnedTrain.getTf(), X_min = -1.0)            
            
            start = time.clock()
            abg_est, warnflag = FPSupEstimator(S, binnedTrain, abg_init, verbose = True)
            finish = time.clock()
            D.addEstimate(sample_id, 'FP', abg_est, finish-start, warnflag)
                    
    del D  
    print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
예제 #6
0
def CvsPyEstimate():
    N_phi = 20;
    print 'N_phi = ', N_phi
    
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

    batch_start = time.clock()    
    base_name = 'sinusoidal_spike_train_N=1000_'

    D = DataHarvester('CvsPY_2x4')
    for regime_name, T_thresh in zip(['subT', 'superSin'],
                                                       [32, 16.]):
        regime_label = base_name + regime_name
            
        for sample_id in xrange(1,4):
            file_name = regime_label + '_' + str(sample_id)
            print file_name
            
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
            D.setRegime(regime_name,abg_true, Tsim=-1.0)
            
            phi_omit = None
            binnedTrain.pruneBins(phi_omit, N_thresh = 64, T_thresh=T_thresh)
            Tf = binnedTrain.getTf()
            D.addSample(sample_id, Tf, binnedTrain.getBinCount(), binnedTrain.getSpikeCount())
             
            start = time.clock()
            abg_init = initialize_right_2std(binnedTrain)
            finish = time.clock()
            D.addEstimate(sample_id, 'Initializer', abg_init, finish-start) 
             
            dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0)
            
            phis = binnedTrain.bins.keys();
            theta = binnedTrain.theta
            
            S = FPMultiPhiSolver(theta, phis,
                                 dx, dt,
                                 Tf, X_min = -1.0)

            start = time.clock()
            abg_est = cNMEstimator(S, binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'FP-C', abg_est, finish-start)
               
            start = time.clock()
            abg_est = NMEstimator(S, binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'FP-PY', abg_est, finish-start) 
            
        
    D.closeFile()
예제 #7
0
def FortetVsWeightedFortet():
    N_phi = 20;
    print 'N_phi = ', N_phi
    
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

    batch_start = time.clock()    
    base_name = 'sinusoidal_spike_train_N=1000_'

    D = DataHarvester('FvsWF_4x16')
    for regime_name, T_thresh in zip(['superT', 'subT', 'crit', 'superSin'],
                                                       [6., 64, 32., 32.]):
        regime_label = base_name + regime_name
            
        for sample_id in xrange(1,17):
            file_name = regime_label + '_' + str(sample_id)
            print file_name
            
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
            D.setRegime(regime_name,abg_true, Tsim=-1.0)
            
            phi_omit = None
            binnedTrain.pruneBins(phi_omit, N_thresh = 10, T_thresh=T_thresh)
            Tf = binnedTrain.getTf()
            D.addSample(sample_id, Tf, binnedTrain.getBinCount(), binnedTrain.getSpikeCount())
             
            start = time.clock()
            abg_init = initialize_right_2std(binnedTrain)
            finish = time.clock()
            D.addEstimate(sample_id, 'Initializer', abg_init, finish-start) 
            
            start = time.clock()
            abg_est = FortetEstimator(binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'Fortet10', abg_est, finish-start)
            
            start = time.clock()
            abg_est = WeightedFortetEstimator(binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'WeghtedFortet', abg_est, finish-start)
            
            binnedTrain.pruneBins(phi_omit, N_thresh = 64, T_thresh=T_thresh)
            start = time.clock()
            abg_est = FortetEstimator(binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'Fortet64', abg_est, finish-start)
            
        
    D.closeFile() 
   
    print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
예제 #8
0
def MixedDriver():
    N_phi = 20;
    print 'N_phi = ', N_phi
    
    phis =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)
    
#    file_name = 'sinusoidal_spike_train_N=1000_superT_13'
    file_name = 'sinusoidal_spike_train_N=1000_subT_1'
#    file_name = 'sinusoidal_spike_train_N=1000_crit_5'

#    intervalStats(file_name)
    
    binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phis)

    phi_omit = None
#    phi_omit = r_[(linspace(.15, .45, 4),
#                   linspace(.55,.95, 5) )]  *2*pi/ binnedTrain.theta
    binnedTrain.pruneBins(phi_omit, N_thresh = 80, T_thresh= 16.)
    print 'N_bins = ', len(binnedTrain.bins.keys())
    
    Tf = binnedTrain.getTf() #/ 2.
    print 'Tf = ', Tf

    params = binnedTrain._Train._params
    abg_true = (params._alpha, params._beta, params._gamma)
    print 'true = ',     abg_true

    abg_init = initialize_right_2std(binnedTrain)
    print 'init = ',     abg_init

    
    start = time.clock()
    abg_est = MixedEstimator(abg_init, binnedTrain)
    finish = time.clock()
    print 'Mixed est = ', abg_est
    mixed_time = finish-start
    print 'Mixed time = ', mixed_time
    
    from AdjointEstimator import FortetEstimator 
    start = time.clock()
    abg_est = FortetEstimator(binnedTrain, abg_est)
    finish = time.clock()
    print 'Mixed+Fortet est = ', abg_est
    print 'MIxed+Fortet time = ', finish-start + mixed_time

    #Compare with straight up Fortet:
    start = time.clock()
    abg_est = FortetEstimator(binnedTrain, abg_init)
    finish = time.clock()
    print 'Fortet est = ', abg_est
    print 'Fortet time = ', finish-start
예제 #9
0
def estimateTau(regime = 'crit', number=11,
                     N_thresh = 64, T_thresh = 16. ):
    file_name = 'sinusoidal_spike_train_N=1000_' + regime + '_' + str(number)
    print file_name
    
    N_phi = 20;
    print 'N_phi = ', N_phi
    
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

    binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
    binnedTrain.pruneBins(None, N_thresh, T_thresh)
    print 'N_bins = ', len(binnedTrain.bins.keys())
    
    Tf = binnedTrain.getTf()
    print 'Tf = ', Tf 
    abg_init = initialize_right_2std(binnedTrain)

    
    phis = binnedTrain.bins.keys();
    theta = binnedTrain.theta

    dx = .02; dt = FPMultiPhiSolver.calculate_dt(dx, abg_init,x_min= -1.0)
                
    S = FPMultiPhiSolver(theta, phis, dx, dt,
                       Tf, X_min = -2.)
        
    abgt_init = [abg_init[0],
                 abg_init[1],
                 abg_init[2],                     
                 .5]
    
    print 'abgt_init = ', abgt_init
    
    start = time.clock()
    abgt_est = TaucharEstimator(S, binnedTrain, abgt_init)
    finish = time.clock()
    
    print 'abgt_est = ', abgt_est
    print 'compute time = ', finish-start
    
    print 'No tau comparison:'
    start = time.clock()
    abg_est = NMEstimator(S, binnedTrain, abg_init)
    finish = time.clock()
    
    print 'abg_est = ', abg_est
    print 'compute time = ', finish-start
    
    return abgt_est
예제 #10
0
def Fortet_SupVsL2(N_spikes = 1000, N_trains = 16):
    N_phi = 20;
    print 'N_phi = ', N_phi
    
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

    batch_start = time.clock()    
    base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes

    D = DataHarvester('Fortet_SupVsL2_4x%d'%N_trains)
#    for regime_name, T_thresh in zip(['subT', 'crit', 'superSin', 'superT'],
#                                     [64., 64, 32., 32.]):
    for regime_name, T_thresh in zip(['crit', 'superSin', 'superT'],
                                     [64, 32., 32.]):
        regime_label = base_name + regime_name
        
            
        for sample_id in xrange(1,N_trains+1):
            file_name = regime_label + '_' + str(sample_id)
            print file_name
            
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
            D.setRegime(regime_name,abg_true, Tsim=-1.0)
            
            phi_omit = None
            binnedTrain.pruneBins(phi_omit, N_thresh = 10, T_thresh=T_thresh)
            D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount())
                
            abg_init = initialize_right_2std(binnedTrain)
            
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
            
            start = time.clock()
            abg_est = FortetEstimatorL2(binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'FortetL2', abg_est, finish-start)
            print abg_est, ' | %.2f'%(finish-start)
            
            start = time.clock()
            abg_est = FortetEstimatorSup(binnedTrain, abg_init)
            finish = time.clock()
            D.addEstimate(sample_id, 'FortetSup', abg_est, finish-start)
            print abg_est, ' | %.2f'%(finish-start)
        
    D.closeFile() 
   
    print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
예제 #11
0
def ThetaBox(thetas, sample_id = 1):   
    D = DataPrinter('')
    for theta in thetas:
        N_phi = 20;
        phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

        file_name = 'sinusoidal_spike_train_N=1000_critical_theta=%d_%d'%(theta, sample_id)
        print file_name
                    
        T_thresh = 128.0;
        
        binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
        ps = binnedTrain._Train._params
        abg_true = array((ps._alpha, ps._beta, ps._gamma))
        print 'ps = ', ps.getParams()
        
        regime_name = 'theta=%d'%int(ps._theta)
        print regime_name
        D.setRegime(regime_name,abg_true, Tsim=-1.0)
        
        binnedTrain.pruneBins(None, N_thresh = 5, T_thresh=T_thresh)
        D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount())
                    
        abg_init = initialize_right_2std(binnedTrain, cap_beta_gamma=True)
        D.addEstimate(sample_id, 'Initializer', abg_init,.0, warnflag = 0) 
                   
        #RELOAD ALL DATA:               
        binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
        binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh)
        
        #Weighted Fortet:            
        start = time.clock()
        abg_est, warnflag = FortetEstimatorSup(binnedTrain, abg_init, verbose = False)
        finish = time.clock()
        D.addEstimate(sample_id, 'Fortet', abg_est, finish-start, warnflag)
        
        #Weighted F-P:
        dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) 
        phis = binnedTrain.bins.keys();
        S = FPMultiPhiSolver(binnedTrain.theta, phis,
                             dx, dt,
                             binnedTrain.getTf(), X_min = -1.0)            
        
        start = time.clock()
        abg_est, warnflag = FPSupEstimator(S, binnedTrain, abg_init, verbose = False)
        finish = time.clock()
        D.addEstimate(sample_id, 'FP', abg_est, finish-start, warnflag)
                    
    del D  
예제 #12
0
def estimateSubT(N_spikes=100, sample_id =4,  T_thresh = 32.):
    N_phi = 20;
    print 'N_phi = ', N_phi
    
    phi_norms =  linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi)

    base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes
    regime_name = 'subT'
    regime_label = base_name + regime_name
    file_name = regime_label + '_' + str(sample_id)
    print file_name
            
    binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
    ps = binnedTrain._Train._params
    print array((ps._alpha, ps._beta, ps._gamma))
    
    #### N_thresh = 10
    binnedTrain.pruneBins(None, N_thresh = 10, T_thresh=T_thresh)
                
    abg_init = initialize_right_2std(binnedTrain)
    print abg_init
    abg_init[1] = amax([.1, abg_init[1]])
    abg_init[2] = amax([.0, abg_init[2]])
            
    dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_init, -1.0)            
    print dx, dt
    theta = binnedTrain.theta            
      
    ##### N_thresh = 1
    binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms)
    binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh)
    phis = binnedTrain.bins.keys();
    S = FPMultiPhiSolver(theta, phis,
                         dx, dt,
                         binnedTrain.getTf(), X_min = -1.0)            
    abg_est = WeightedFPEstimator(S,binnedTrain, abg_init)
    print abg_est
예제 #13
0
def MLEBox(N_spikes = 1000, N_trains=5, N_phi=16):
    print 'N_phi = ', N_phi
    
    N_phi_init = 8;
    phi_norms_init =  linspace(1/(2.*N_phi_init), 1. - 1/ (2.*N_phi_init), N_phi_init)

#    base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes
#    output_file = open('mlebox_output.txt', 'w')
#    for regime_name, T_thresh in zip(['subT', 'crit', 'superSin','superT'],
#                                     [128., 64., 64., 64.]):
    output_file = open('mlebox_output_thetas.txt', 'w')
    thetas = [20]
    base_name = 'sinusoidal_spike_train_N=%d_critical_theta='%N_spikes
    T_thresh = 64.
    for theta in thetas:    
        for sample_id in xrange(1,N_trains +1):
            regime_name = 'theta%d'%theta
            regime_label = base_name + '%d'%theta            
            file_name = regime_label + '_%d'%sample_id
            print file_name
                        
#        regime_label = base_name + regime_name
#            file_name = regime_label + '_' + str(sample_id)
#            print file_name
            
            binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms_init)
            ps = binnedTrain._Train._params
            abg_true = array((ps._alpha, ps._beta, ps._gamma))
        
            binnedTrain.pruneBins(None, N_thresh = 8, T_thresh=T_thresh)
            abg_init = initialize_right_2std(binnedTrain)
            abg_init[1] = amax([.1, abg_init[1]])
            abg_init[2] = amax([.0, abg_init[2]])

            abg_fortet, warnflag = FortetEstimatorSup(binnedTrain, abg_init)
            
            #RELOAD ALL DATA:               
            mleBinnedTrain = MLEBinnedSpikeTrain.initFromFile(file_name,
                                                              N_phi)
            
            #MLE F-P:
            dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) 
            phis = mleBinnedTrain.phi_ms;
            S = FPMultiPhiSolver(binnedTrain.theta, phis,
                                 dx, dt,
                                 binnedTrain.getTf(), X_min = -1.0)
            
            minus_idxs = mleBinnedTrain.phi_minus_indxs
            plus_idxs  = mleBinnedTrain.phi_plus_indxs
            minus_weights = mleBinnedTrain.phi_minus_weights
            plus_weights  = mleBinnedTrain.phi_plus_weights
            
            def loglikelihood(abg):     
                'rediscretize:'
                xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg, S._theta)
                dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin)
                S.rediscretize(S._dx, dt, S.getTf(), xmin)
                'Solve it:'
                Fs = S.c_solve(abg)
                spike_t_indexes = mleBinnedTrain.getTindxs(S._ts)
                'form (approximate) likelihood:'
                pdf = -diff(Fs[:,:,-1], axis = 1) / S._dt;
                likelihoods = pdf[minus_idxs, spike_t_indexes]*minus_weights +\
                              pdf[plus_idxs, spike_t_indexes]*plus_weights
#                if amin(likelihoods) <= .0:
#                    likelihoods[likelihoods<=.0] = 1e-8
                normalized_log_likelihood = sum(log(likelihoods))
                'Return '
                return -normalized_log_likelihood
            #MLE F-P:
#            abg_tnc, abg_cobyla, abg_neldermead = MLEEstimator(S,
#                                                             mleBinnedTrain, abg_init)
            abg_neldermead = MLEEstimator(S,mleBinnedTrain, abg_init)
            #OUTPUTs
            output_file.write('\n' +file_name + ':\n')
            for tag, abg in zip(['init', 'fortet', 'nelder_mead', 'true'],
                                [abg_init, abg_fortet, abg_tnc,
                                 abg_cobyla, abg_neldermead , abg_true]):
                output_file.write(tag + ':' + str(loglikelihood(abg)) + ':' + str(abg) + '\n');