def func(abg): xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg) dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin) S.rediscretize(S._dx, dt, S.getTf(), xmin) 'Solve it:' Fss = S.c_solve(abg); Fs = Fss[:,:,-1]; Ss = S.transformSurvivorData(binnedTrain) Ls = Fs - Ss 'Return ' G = .5*sum(Ls*Ls)*S._dt print 'abg = ', abg, '; G=', G # G = .0; # for phi, phi_idx in zip(S._phis, xrange(S._num_phis() )): # unique_Is = bins[phi]['unique_Is'] # SDF = bins[phi]['SDF'] # Ls = SDF - interp(unique_Is, ts, Fs[phi_idx,:]) # G += sum(Ls*Ls) del Fss, Fs, Ss, Ls; return G
def func(abg): 'rediscretize:' xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg, S._theta) dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin, factor = 2.) # print abg, dt, S._dx, xmin S.rediscretize(S._dx, dt, S.getTf(), xmin) 'Solve it:' # print abg # start = time.clock() Fs = S.c_solve(abg) # solve_end = time.clock() spike_t_indexes = mleBinnedTrain.getTindxs(S._ts) # t_indxs_end = time.clock(); # print abg, ' : solvetime = %f; indx_time = %f'%(solve_end - start, t_indxs_end - solve_end) 'form (approximate) likelihood:' pdf = -diff(Fs[:,:,-1], axis = 1) / S._dt; likelihoods = pdf[minus_idxs, spike_t_indexes]*minus_weights + \ pdf[plus_idxs, spike_t_indexes]*plus_weights if amin(likelihoods) <= .0: likelihoods[likelihoods<=.0] = 1e-8 'Return ' return -sum(log(likelihoods))
def func(abgt): 'Solve it:' abg = abgt[:3] xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg) dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin) S.rediscretize(S._dx, dt, S.getTf(), xmin) Fs = S.solve_tau(abgt, visualize=False)[:,:,-1] Ss = S.transformSurvivorData(binnedTrain) Ls = Fs - Ss 'Return ' G = .5*sum(Ls*Ls)*S._dt return G
def GradedNMEstimator(file_name, phi_norms, abg_est, T_max, N_thresh_final): for T_thresh, N_thresh, max_iters in zip(array([1./8., 1./4., 1./2., 1.])*T_max, array([2,2,2,1])*N_thresh_final, [50,50,100,None]): binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh, T_thresh) print 'N_bins = ', len(binnedTrain.bins.keys()) Tf = binnedTrain.getTf() print 'Tf = ', Tf dx = .02; dt = FPMultiPhiSolver.calculate_dt(dx, 5., 10.) phis = binnedTrain.bins.keys(); theta = binnedTrain.theta S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, X_MIN = -.5) from scipy.optimize import fmin def func(abg): 'Solve it:' Fs = S.solve(abg, visualize=False)[:,:,-1] Ss = S.transformSurvivorData(binnedTrain) Ls = Fs - Ss 'Return' G = .5*sum(Ls*Ls)*S._dt return G abg_est = fmin(func, abg_est, ftol = 1e-2*T_thresh, maxiter=max_iters) print 'current_estimate = ', abg_est return abg_est
def obtainRenewalDensities(phis, abg, theta, Tf): xmin = FPMultiPhiSolver.calculate_xmin(Tf, abg) dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg, xmin, factor = 5.) S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, xmin); Fs = S.solve(abg) ts = S._ts; iG = squeeze(Fs[:, :,-1]) g = -diff(iG)/ S._dt; t_mids = (S._ts[1:]+S._ts[:-1])/2.0 return g, S._ts ,t_mids
def func(abg): xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg, S._theta) dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin) S.rediscretize(S._dx, dt, S.getTf(), xmin) 'Solve it:' Fss = S.c_solve(abg) Fs = Fss[:,:,-1] Ss = S.transformSurvivorData(binnedTrain) lSups = amax(abs(Fs - Ss) , axis = 1) 'Return ' G = dot(weight_vector, lSups) if verbose: print 'abg = ', abg, '; G=', G return G
def loglikelihood(abg): 'rediscretize:' xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg, S._theta) dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin) S.rediscretize(S._dx, dt, S.getTf(), xmin) 'Solve it:' Fs = S.c_solve(abg) spike_t_indexes = mleBinnedTrain.getTindxs(S._ts) 'form (approximate) likelihood:' pdf = -diff(Fs[:,:,-1], axis = 1) / S._dt; likelihoods = pdf[minus_idxs, spike_t_indexes]*minus_weights +\ pdf[plus_idxs, spike_t_indexes]*plus_weights # if amin(likelihoods) <= .0: # likelihoods[likelihoods<=.0] = 1e-8 normalized_log_likelihood = sum(log(likelihoods)) 'Return ' return -normalized_log_likelihood
def _resetSolver(self): abg = self._abg_current max_speed = abg[0] + abs(abg[2]) - self._Solver._xs[0]; dx = abg[1] / max_speed / 1e2 dt = FPMultiPhiSolver.calculate_dt(dx, max_speed, 1e2) Tf = self._Solver.getTf() self._Solver.rediscretize(dx, dt, Tf)
def FP_L2_vs_Sup(N_spikes = 1000, N_trains=20): N_phi = 20; print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) batch_start = time.clock() base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes D = DataHarvester('FPvsWFP_4x%d_N=%d'%(N_trains,N_spikes)) for regime_name, T_thresh in zip(['subT','superT', 'crit', 'superSin'], 4*[64.]): regime_label = base_name + regime_name for sample_id in xrange(1,N_trains +1): file_name = regime_label + '_' + str(sample_id) print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) D.setRegime(regime_name,abg_true, Tsim=-1.0) #### N_thresh = 10 binnedTrain.pruneBins(None, N_thresh = 10, T_thresh=T_thresh) D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount()) abg_init = initialize_right_2std(binnedTrain) abg_init[1] = amax([.1, abg_init[1]]) abg_init[2] = amax([.0, abg_init[2]]) D.addEstimate(sample_id, 'init_N10', abg_init,.0) binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) theta = binnedTrain.theta binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh) phis = binnedTrain.bins.keys(); S = FPMultiPhiSolver(theta, phis, dx, dt, binnedTrain.getTf(), X_min = -1.0) start = time.clock() abg_est = FPL2Estimator(S,binnedTrain, abg_init) finish = time.clock() D.addEstimate(sample_id, 'FP_L2', abg_est, finish-start) start = time.clock() abg_est = FPSupEstimator(S,binnedTrain, abg_init) finish = time.clock() D.addEstimate(sample_id, 'FP_Sup', abg_est, finish-start) D.closeFile() print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
def ThetaEstimate(N_spikes = 1000, N_trains=100, N_phi=20, thetas = [1, 5, 10, 20]): print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) batch_start = time.clock() base_name = 'sinusoidal_spike_train_N=%d_critical_theta='%N_spikes T_thresh = 64. D = DataHarvester('ThetaEstimate_%dx%d_N=%d'%(len(thetas),N_trains,N_spikes)) for sample_id in xrange(1,N_trains +1): for theta in thetas: regime_name = 'theta%d'%theta regime_label = base_name + '%d'%theta file_name = regime_label + '_%d'%sample_id print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) D.setRegime(regime_name,abg_true, Tsim=-1.0) binnedTrain.pruneBins(None, N_thresh = 5, T_thresh=T_thresh) D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount()) abg_init = initialize_right_2std(binnedTrain) abg_init[1] = amax([.1, abg_init[1]]) abg_init[2] = amax([.0, abg_init[2]]) D.addEstimate(sample_id, 'Initializer', abg_init,.0, warnflag = 0) #RELOAD ALL DATA: binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh) #Weighted Fortet: start = time.clock() abg_est, warnflag = FortetEstimatorSup(binnedTrain, abg_init) finish = time.clock() D.addEstimate(sample_id, 'Fortet', abg_est, finish-start, warnflag) #Weighted F-P: dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) phis = binnedTrain.bins.keys(); S = FPMultiPhiSolver(binnedTrain.theta, phis, dx, dt, binnedTrain.getTf(), X_min = -1.0) start = time.clock() abg_est, warnflag = FPSupEstimator(S, binnedTrain, abg_init) finish = time.clock() D.addEstimate(sample_id, 'FP', abg_est, finish-start, warnflag) D.closeFile() print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
def func(abg): xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg) dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin) S.rediscretize(S._dx, dt, S.getTf(), xmin) 'Solve it:' Fss = S.c_solve(abg) Fs = Fss[:,:,-1] Ss = S.transformSurvivorData(binnedTrain) Ls = Fs - Ss 'Return ' G = .5*sum( dot(weight_vector, Ls*Ls) )*S._dt / sum(weight_vector) # 'clean up' # del Fss, Fs, Ss, Ls; print 'abg = ', abg, '; G=', G return G
def TestEstimate(N_spikes = 100, N_trains=5, N_phi=8): print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) batch_start = time.clock() base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes D = DataPrinter('') for regime_name, T_thresh in zip(['superT'], [64]): regime_label = base_name + regime_name for sample_id in xrange(1,N_trains +1): file_name = regime_label + '_' + str(sample_id) print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) D.setRegime(regime_name,abg_true, Tsim=-1.0) binnedTrain.pruneBins(None, N_thresh = 5, T_thresh=T_thresh) D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount()) abg_init = initialize_right_2std(binnedTrain) abg_init[1] = amax([.1, abg_init[1]]) abg_init[2] = amax([.0, abg_init[2]]) D.addEstimate(sample_id, 'Initializer', abg_init,.0, warnflag = 0) #RELOAD ALL DATA: binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh) #Weighted Fortet: start = time.clock() abg_est, warnflag = FortetEstimatorSup(binnedTrain, abg_init, verbose = True) finish = time.clock() D.addEstimate(sample_id, 'Fortet', abg_est, finish-start, warnflag) #Weighted F-P: dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) phis = binnedTrain.bins.keys(); S = FPMultiPhiSolver(binnedTrain.theta, phis, dx, dt, binnedTrain.getTf(), X_min = -1.0) start = time.clock() abg_est, warnflag = FPSupEstimator(S, binnedTrain, abg_init, verbose = True) finish = time.clock() D.addEstimate(sample_id, 'FP', abg_est, finish-start, warnflag) del D print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
def GradedDriver(): from scipy.optimize import fmin_bfgs N_phi = 10; print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) print 'GradedEstimator' for file_name in ['sinusoidal_spike_train_T=20000_subT_3.path', 'sinusoidal_spike_train_T=20000_subT_8.path', 'sinusoidal_spike_train_T=20000_subT_13.path']: print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh = 32, T_thresh = 32.) abg_est = abs( initialize5(binnedTrain)) print 'abg_init = ',abg_est theta = binnedTrain.theta for T_thresh, N_thresh, max_iters in zip([32/8., 32/4., 32/2., 32.], [128, 128, 64, 32], [50,50,100,None]): binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh, T_thresh) print 'N_bins = ', len(binnedTrain.bins.keys()) Tf = binnedTrain.getTf() print 'Tf = ', Tf dx = .02; dt = FPMultiPhiSolver.calculate_dt(dx, 4., 10.) phis = binnedTrain.bins.keys(); S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, X_MIN = -.5) from scipy.optimize import fmin def func(abg): 'Solve it:' Fs = S.solve(abg, visualize=False)[:,:,-1] Ss = S.transformSurvivorData(binnedTrain) Ls = Fs - Ss 'Return' G = .5*sum(Ls*Ls)*S._dt return G abg_est = fmin(func, abg_est, ftol = 1e-2*T_thresh, maxiter=max_iters) print 'current_estimate = ', abg_est print 'final estimate = ', abg_est
def CustomEstimate(spike_trains): D = DataPrinter('') for spike_train in spike_trains: regime_name = spike_train[0]; sample_id = spike_train[1] N_spikes = spike_train[2]; N_phi = spike_train[3]; phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes T_thresh = 128.0; regime_label = base_name + regime_name file_name = regime_label + '_' + str(sample_id) print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) D.setRegime(regime_name,abg_true, Tsim=-1.0) binnedTrain.pruneBins(None, N_thresh = 5, T_thresh=T_thresh) D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount()) abg_init = initialize_right_2std(binnedTrain) abg_init[1] = amax([.1, abg_init[1]]) abg_init[2] = amax([.0, abg_init[2]]) D.addEstimate(sample_id, 'Initializer', abg_init,.0, warnflag = 0) #RELOAD ALL DATA: binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh) #Weighted Fortet: start = time.clock() abg_est, warnflag = FortetEstimatorSup(binnedTrain, abg_init, verbose = True) finish = time.clock() D.addEstimate(sample_id, 'Fortet', abg_est, finish-start, warnflag) #Weighted F-P: dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) phis = binnedTrain.bins.keys(); S = FPMultiPhiSolver(binnedTrain.theta, phis, dx, dt, binnedTrain.getTf(), X_min = -1.0) start = time.clock() abg_est, warnflag = FPSupEstimator(S, binnedTrain, abg_init, verbose = True) finish = time.clock() D.addEstimate(sample_id, 'FP', abg_est, finish-start, warnflag) del D
def CvsPyEstimate(): N_phi = 20; print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) batch_start = time.clock() base_name = 'sinusoidal_spike_train_N=1000_' D = DataHarvester('CvsPY_2x4') for regime_name, T_thresh in zip(['subT', 'superSin'], [32, 16.]): regime_label = base_name + regime_name for sample_id in xrange(1,4): file_name = regime_label + '_' + str(sample_id) print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) D.setRegime(regime_name,abg_true, Tsim=-1.0) phi_omit = None binnedTrain.pruneBins(phi_omit, N_thresh = 64, T_thresh=T_thresh) Tf = binnedTrain.getTf() D.addSample(sample_id, Tf, binnedTrain.getBinCount(), binnedTrain.getSpikeCount()) start = time.clock() abg_init = initialize_right_2std(binnedTrain) finish = time.clock() D.addEstimate(sample_id, 'Initializer', abg_init, finish-start) dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) phis = binnedTrain.bins.keys(); theta = binnedTrain.theta S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, X_min = -1.0) start = time.clock() abg_est = cNMEstimator(S, binnedTrain, abg_init) finish = time.clock() D.addEstimate(sample_id, 'FP-C', abg_est, finish-start) start = time.clock() abg_est = NMEstimator(S, binnedTrain, abg_init) finish = time.clock() D.addEstimate(sample_id, 'FP-PY', abg_est, finish-start) D.closeFile()
def NelderMeadSubTEstimator(): N_phi = 20; print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) batch_start = time.clock() base_name = 'sinusoidal_spike_train_T=' D = DataHarvester('SubT_NMx16_refined_sim_dt') for regime_name, T_sim, T_thresh in zip(['subT'], [20000], [32.]): regime_label = base_name + str(T_sim)+ '_' + regime_name for sample_id in xrange(1,17): file_name = regime_label + '_' + str(sample_id) + '.path' print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) D.setRegime(regime_name,abg_true, T_sim) phi_omit = None binnedTrain.pruneBins(phi_omit, N_thresh = 64, T_thresh=T_thresh) Tf = binnedTrain.getTf() D.addSample(sample_id, Tf, binnedTrain.getBinCount(), binnedTrain.getSpikeCount()) dx = .04; dt = FPMultiPhiSolver.calculate_dt(dx, 4., 2.) phis = binnedTrain.bins.keys(); theta = binnedTrain.theta S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, X_MIN = -.5) start = time.clock() abg_init = initialize5(binnedTrain) finish = time.clock() D.addEstimate(sample_id, 'Initializer', abg_init, finish-start) abg_init = abs(abg_init) start = time.clock() abg_est = NMEstimator(S, binnedTrain, abg_init) finish = time.clock() D.addEstimate(sample_id, 'Nelder-Mead', abg_est, finish-start) D.closeFile() print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'
def estimateTau(regime = 'crit', number=11, N_thresh = 64, T_thresh = 16. ): file_name = 'sinusoidal_spike_train_N=1000_' + regime + '_' + str(number) print file_name N_phi = 20; print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh, T_thresh) print 'N_bins = ', len(binnedTrain.bins.keys()) Tf = binnedTrain.getTf() print 'Tf = ', Tf abg_init = initialize_right_2std(binnedTrain) phis = binnedTrain.bins.keys(); theta = binnedTrain.theta dx = .02; dt = FPMultiPhiSolver.calculate_dt(dx, abg_init,x_min= -1.0) S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, X_min = -2.) abgt_init = [abg_init[0], abg_init[1], abg_init[2], .5] print 'abgt_init = ', abgt_init start = time.clock() abgt_est = TaucharEstimator(S, binnedTrain, abgt_init) finish = time.clock() print 'abgt_est = ', abgt_est print 'compute time = ', finish-start print 'No tau comparison:' start = time.clock() abg_est = NMEstimator(S, binnedTrain, abg_init) finish = time.clock() print 'abg_est = ', abg_est print 'compute time = ', finish-start return abgt_est
def ThetaBox(thetas, sample_id = 1): D = DataPrinter('') for theta in thetas: N_phi = 20; phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) file_name = 'sinusoidal_spike_train_N=1000_critical_theta=%d_%d'%(theta, sample_id) print file_name T_thresh = 128.0; binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) print 'ps = ', ps.getParams() regime_name = 'theta=%d'%int(ps._theta) print regime_name D.setRegime(regime_name,abg_true, Tsim=-1.0) binnedTrain.pruneBins(None, N_thresh = 5, T_thresh=T_thresh) D.addSample(sample_id, binnedTrain.getTf(), binnedTrain.getBinCount(), binnedTrain.getSpikeCount()) abg_init = initialize_right_2std(binnedTrain, cap_beta_gamma=True) D.addEstimate(sample_id, 'Initializer', abg_init,.0, warnflag = 0) #RELOAD ALL DATA: binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh) #Weighted Fortet: start = time.clock() abg_est, warnflag = FortetEstimatorSup(binnedTrain, abg_init, verbose = False) finish = time.clock() D.addEstimate(sample_id, 'Fortet', abg_est, finish-start, warnflag) #Weighted F-P: dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) phis = binnedTrain.bins.keys(); S = FPMultiPhiSolver(binnedTrain.theta, phis, dx, dt, binnedTrain.getTf(), X_min = -1.0) start = time.clock() abg_est, warnflag = FPSupEstimator(S, binnedTrain, abg_init, verbose = False) finish = time.clock() D.addEstimate(sample_id, 'FP', abg_est, finish-start, warnflag) del D
def MultiTrainEstimator(): phis = linspace(.05, .95, 10) for file_name in ['sinusoidal_spike_train_T=1000.path', 'sinusoidal_spike_train_T=6000.path', 'sinusoidal_spike_train_T=10000_superSin.path', 'sinusoidal_spike_train_T=10000_crit.path']: print '#'*64 print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phis) phi_omit = None binnedTrain.pruneBins(phi_omit, N_thresh = 64, T_thresh = 10.) print 'N_bins = ', len(binnedTrain.bins.keys()) Tf = binnedTrain.getTf() print 'Tf = ', Tf dx = .05; dt = FPMultiPhiSolver.calculate_dt(dx, 5., 2.) binphis = binnedTrain.bins.keys(); theta = binnedTrain.theta S = FPMultiPhiSolver(theta, binphis, dx, dt, Tf) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) print 'abg_true = ', abg_true abg_init = abs(abg_true + abg_true * randn(3)) ; print 'abg_init = ', abg_init start = time.clock() abg_est = TNCEstimator(S, binnedTrain, abg_init) print 'Est. time = ', time.clock() - start print 'abg_est = ', abg_est print 'error = ', abg_true - abg_est start = time.clock() abg_est = NMEstimator(S, binnedTrain, abg_init) print 'Est. time = ', time.clock() - start print 'abg_est = ', abg_est print 'error = ', abg_true - abg_est
def MixedEstimator(abg_init, binnedTrain, dp_tol = 1e-2): phis = binnedTrain.bins.keys(); theta = binnedTrain.theta dp = dp_tol*2.0; abg = abg_init while dp > dp_tol: Tf = binnedTrain.getTf() xmin = FPMultiPhiSolver.calculate_xmin(Tf, abg) dx = FPMultiPhiSolver.calculate_dx(abg, xmin) dt = FPMultiPhiSolver.calculate_dt(dx, abg, xmin, factor = 8.) S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, xmin) Fs = S.solve(abg, visualize=False) Ss = S.transformSurvivorData(binnedTrain) Ls = Fs[:,:,-1] - Ss Nus = S.solveAdjoint(abg, Ls) dGdp = S.estimateParameterGradient(abg, Fs, Nus) from numpy.linalg.linalg import norm dG_normalized = dGdp/ norm(dGdp) dp = FortetLineEstimator(binnedTrain, abg, dG_normalized, dp_tol) abg = abg - dp*dG_normalized print 'dG = ', dG_normalized print 'dp = ', dp print 'abg = (%.3g, %.3g, %.3g)'%(abg[0],abg[1],abg[2]) print '-' return abg
def postVisualizer(): N_phi = 20 print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) theta = 20 base_name = 'sinusoidal_spike_train_N=1000_critical_theta=%d'%theta T_thresh = 64. analyzer = DataAnalyzer('ThetaEstimate_4x100_N=1000') sample_id = 32 regime_label = base_name + '%d'%theta file_name = base_name + '_%d'%sample_id print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh) regime_name = 'theta%d'%theta abg_true = analyzer.getTrueParamValues(regime_name); print abg_true abg_fortet = analyzer.getEstimates(sample_id, regime_name, 'Fortet')[0] print abg_fortet visualizeData_vs_Fortet(abg_fortet, binnedTrain, theta,title_tag = 'Fortet: estimates', save_fig_name='theta20_Fortet_estimates') visualizeData_vs_Fortet(abg_true, binnedTrain, theta,title_tag = 'Fortet: true', save_fig_name='theta20_Fortet_true') abg_fp = analyzer.getEstimates(sample_id, regime_name, 'FP')[0] print abg_fp dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) phis = binnedTrain.bins.keys(); S = FPMultiPhiSolver(binnedTrain.theta, phis, dx, dt, binnedTrain.getTf(), X_min = -1.0) visualizeData_vs_FP(S, abg_fp, binnedTrain,title_tag = 'FP: estimates', save_fig_name='theta20_FP_estimates') visualizeData_vs_FP(S, abg_true, binnedTrain,title_tag = 'FP: true', save_fig_name='theta20_FP_true')
def NMSuperSinEstimator(): phis = linspace(.05, .95, 40) file_name = 'sinusoidal_spike_train_T=10000_superSin.path' print '#'*64 print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phis) phi_omit = None binnedTrain.pruneBins(phi_omit, N_thresh = 128, T_thresh = 8.) print 'N_bins = ', len(binnedTrain.bins.keys()) Tf = binnedTrain.getTf() print 'Tf = ', Tf dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, 5., 2.) binphis = binnedTrain.bins.keys(); theta = binnedTrain.theta S = FPMultiPhiSolver(theta, binphis, dx, dt, Tf) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) print 'abg_true = ', abg_true abg_init = [0.716 , 0.199 , 0.51] print 'abg_init = ', abg_init start = time.clock() abg_est = NMEstimator(S, binnedTrain, abg_init) print 'Est. time = ', time.clock() - start print 'abg_est = ', abg_est print 'error = ', abg_true - abg_est
def estimateSubT(N_spikes=100, sample_id =4, T_thresh = 32.): N_phi = 20; print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes regime_name = 'subT' regime_label = base_name + regime_name file_name = regime_label + '_' + str(sample_id) print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params print array((ps._alpha, ps._beta, ps._gamma)) #### N_thresh = 10 binnedTrain.pruneBins(None, N_thresh = 10, T_thresh=T_thresh) abg_init = initialize_right_2std(binnedTrain) print abg_init abg_init[1] = amax([.1, abg_init[1]]) abg_init[2] = amax([.0, abg_init[2]]) dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_init, -1.0) print dx, dt theta = binnedTrain.theta ##### N_thresh = 1 binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) binnedTrain.pruneBins(None, N_thresh = 1, T_thresh=T_thresh) phis = binnedTrain.bins.keys(); S = FPMultiPhiSolver(theta, phis, dx, dt, binnedTrain.getTf(), X_min = -1.0) abg_est = WeightedFPEstimator(S,binnedTrain, abg_init) print abg_est
def calculateExactSDFs(): N_phi = 4; print 'N_phi = ', N_phi phis = 2*pi*array([0,.25, .5, .75]) for regime_idx, regime_name in enumerate(['superT', 'superSin', 'crit','subT']): # for regime_name in ['superT']: regime_label = 'sinusoidal_spike_train_N=1000_' + regime_name + '_12' binnedTrain = BinnedSpikeTrain.initFromFile(regime_label, phis) Tf = binnedTrain.getTf() print 'Tf = ', Tf theta = binnedTrain.theta; print 'theta = ', theta ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) abg = abg_true xmin = FPMultiPhiSolver.calculate_xmin(Tf, abg, theta) # dx = .0125# dx = .0125; ; dx = .0125; dt = FPMultiPhiSolver.calculate_dt(dx, abg, xmin, factor = 5.) print 'xmin = ', xmin, ', dx, dt = ', dx, dt S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, xmin) S.setTf(Tf) Fs = S.c_solve(abg) ts = S._ts; filename= RESULTS_DIR + '/Fs_%s'%regime_name print 'saving Fs to ', filename savez(filename, ts=ts, Gs=squeeze(Fs[:,:,-1]), phis=phis, Tf = Tf);
def supersin_sandbox(N_phi = 32): N_sub_samples = 10 N_samples = N_sub_samples*5; normalized_phis = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) #results banks: likelihoods = empty((4, 4, N_samples)) errors = empty((4, 3, N_samples)) seed(2013) for regime_idx, tag in enumerate(['superSin']): file_name = 'sinusoidal_spike_train_N=1000_%s_22'%(tag) print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, normalized_phis) theta = binnedTrain.theta; real_phis = normalized_phis * 2.0 * pi / theta; for sample_idx in xrange(N_samples): if 0 == mod(sample_idx, N_sub_samples): train_id = randint(1,101) file_name = 'sinusoidal_spike_train_N=1000_%s_%d'%(tag, train_id) print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, normalized_phis) phi_star,I_star = binnedTrain.getRandomPhiInterval() print 'phi_star_normalized, I_star: %.3f, %.3f' %(phi_star/ (2*pi/theta), I_star) phi_m, phi_minus, phi_plus = getApproximatePhis(phi_star, real_phis,theta) delta_phi_minus_weight, delta_phi_plus_weight = getDeltaPhiWeights(phi_star, phi_minus, phi_plus) #phi_star_idx = 0; phi_m_idx = 1; etc... solver_phis = [phi_star, phi_m, phi_minus, phi_plus] # print 'solver_phis = ', solver_phis # print 'weights = %.3f, %.3f'%(delta_phi_minus_weight, delta_phi_plus_weight) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) abg = abg_true Tf = I_star + .2; dx = .025; x_min = FPMultiPhiSolver.calculate_xmin(Tf, abg, theta) dt = FPMultiPhiSolver.calculate_dt(dx,abg, x_min, factor = 1.0) S = FPMultiPhiSolver(theta, solver_phis, dx, dt, Tf, x_min) S.setTf(Tf) Fs = S.c_solve(abg) Fth = Fs[:,:,-1] Fth_phis = S.solveFphi(abg, Fs)[:,:,-1] ts = S._ts; tm_idx, tp_idx = gettsIndex(ts, I_star) delta_t = S._dt delta_phi = phi_star - phi_m #various approximations to the likelihood, L L_star = -diff(Fth[0, [tm_idx, tp_idx]]) / (delta_t) L_m = -diff(Fth[1, [tm_idx, tp_idx]]) / (delta_t) L_plus_minus = -(diff(Fth[2, [tm_idx, tp_idx]])*delta_phi_minus_weight + \ diff(Fth[3, [tm_idx, tp_idx]])*delta_phi_plus_weight)/ (delta_t) gradphi_g = -diff(Fth_phis[1, [tm_idx, tp_idx]]) / (delta_t); logL_gradphi = log(L_m) + delta_phi * gradphi_g / L_m L_gradphi = exp(logL_gradphi) 'sanity check' approx_Fth_phi = .5 * sum(Fth[0, [tm_idx, tp_idx]] - Fth[1, [tm_idx, tp_idx]]) / (phi_star - phi_m) lFth_phi = .5* (sum(Fth_phis[1, [tm_idx, tp_idx]])) if (approx_Fth_phi * lFth_phi < .0): print 'sanity inverse: approx:%.3f , adjoint_calc: %.3f'%(approx_Fth_phi,lFth_phi) if (.0 >= L_star*L_m*L_plus_minus*L_gradphi): print 'negative likelihood encountered' # print 'di_F: %.4f,%.4f,%.4f' %(diF_star, diF_m, diF_plus_minus) # print 'error: %.4f,%.4f' %(abs(diF_star - diF_m), abs(diF_star-diF_plus_minus) ) likelihoods[regime_idx, :, sample_idx] = r_[L_star, L_m, L_plus_minus, L_gradphi] errors[regime_idx, :, sample_idx] = r_[abs(L_star - L_m), abs(L_star-L_plus_minus), abs(L_star - L_gradphi)] figure() plot(errors[regime_idx, 0,:], 'b', label='F_m') plot(errors[regime_idx, 1,:], 'r', label='F_min + F_plus') legend(); title(tag, fontsize = 32) from numpy import save save('likelihoods_supersin',likelihoods) save('errors_supersin', errors)
def thetas_sandbox(save_figs=False): N_samples = 100; N_phi = 64; normalized_phis = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) #results banks: # seed(2013) thetas = [10, 20] likelihoods = empty((len(thetas), 4, N_samples)) errors = empty((len(thetas), 3, N_samples)) base_name = 'sinusoidal_spike_train_N=1000_critical_theta=' for regime_idx, theta in enumerate(thetas): sample_id = 17 regime_name = 'theta%d'%theta regime_label = base_name + '%d'%theta file_name = regime_label + '_%d'%sample_id print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, normalized_phis) theta = binnedTrain.theta; real_phis = normalized_phis * 2.0 * pi / theta; for sample_idx in xrange(N_samples): phi_star,I_star = binnedTrain.getRandomPhiInterval() print 'phi_star_normalized, I_star: %.3f, %.3f' %(phi_star/ (2*pi/theta), I_star) phi_m, phi_minus, phi_plus = getApproximatePhis(phi_star, real_phis,theta) delta_phi_minus_weight, delta_phi_plus_weight = getDeltaPhiWeights(phi_star, phi_minus, phi_plus) #phi_star_idx = 0; phi_m_idx = 1; etc... solver_phis = [phi_star, phi_m, phi_minus, phi_plus] # print 'solver_phis = ', solver_phis # print 'weights = %.3f, %.3f'%(delta_phi_minus_weight, delta_phi_plus_weight) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) abg = abg_true Tf = I_star + .2; dx = .025; x_min = FPMultiPhiSolver.calculate_xmin(Tf, abg, theta) dt = FPMultiPhiSolver.calculate_dt(dx,abg, x_min, factor = 1.0) S = FPMultiPhiSolver(theta, solver_phis, dx, dt, Tf, x_min) S.setTf(Tf) Fs = S.c_solve(abg) Fth = Fs[:,:,-1] # Fth_phis = S.solveFphi(abg, Fs)[:,:,-1] ts = S._ts; tm_idx, tp_idx = gettsIndex(ts, I_star) delta_t = S._dt delta_phi = phi_star - phi_m #various approximations to the likelihood, L L_star = -diff(Fth[0, [tm_idx, tp_idx]]) / (delta_t) L_m = -diff(Fth[1, [tm_idx, tp_idx]]) / (delta_t) L_plus_minus = -(diff(Fth[2, [tm_idx, tp_idx]])*delta_phi_minus_weight + \ diff(Fth[3, [tm_idx, tp_idx]])*delta_phi_plus_weight)/ (delta_t) # gradphi_g = -diff(Fth_phis[1, [tm_idx, tp_idx]]) / (delta_t); # logL_gradphi = log(L_m) + delta_phi * gradphi_g / L_m # L_gradphi = exp(logL_gradphi) L_gradphi = L_plus_minus # print 'di_F: %.4f,%.4f,%.4f' %(diF_star, diF_m, diF_plus_minus) # print 'error: %.4f,%.4f' %(abs(diF_star - diF_m), abs(diF_star-diF_plus_minus) ) likelihoods[regime_idx, :, sample_idx] = r_[L_star, L_m, L_plus_minus, L_gradphi] errors[regime_idx, :, sample_idx] = r_[abs(L_star - L_m), abs(L_star-L_plus_minus), abs(L_star - L_gradphi)] figure() plot(errors[regime_idx, 0,:], 'b', label='F_m') plot(errors[regime_idx, 1,:], 'r', label='F_min + F_plus') legend(); title('theta= %.2f'%theta, fontsize = 32) from numpy import save save('likelihoods_thetas',likelihoods) save('errors_thetas', errors)
def adapted_sandbox(save_figs=False): # file_name = 'sinusoidal_spike_train_N=1000_subT_11' # file_name = 'sinusoidal_spike_train_N=1000_superSin_13' N_samples = 16; N_phi_per_quarter = 4; N_phi = 4*N_phi_per_quarter; normalized_phis = getAdaptedPhis(N_phi_per_quarter) diFs = empty((4, 3, N_samples)) errors = empty((4, 2, N_samples)) seed(2013) for regime_idx, tag in enumerate(['superSin', 'crit', 'superT', 'subT']): file_name = 'sinusoidal_spike_train_N=1000_%s_13'%tag binnedTrain = BinnedSpikeTrain.initFromFile(file_name, normalized_phis) theta = binnedTrain.theta; real_phis = normalized_phis * 2.0 * pi / theta; for sample_idx in xrange(N_samples): phi_star,I_star = binnedTrain.getRandomPhiInterval() print 'phi_star, I_star: ', phi_star, I_star phi_m, phi_minus, phi_plus = getApproximatePhis(phi_star, real_phis,theta) delta_phi_minus_weight, delta_phi_plus_weight = getDeltaPhiWeights(phi_star, phi_minus, phi_plus) solver_phis = [phi_star, phi_m, phi_minus, phi_plus] ps = binnedTrain._Train._params abg = array((ps._alpha, ps._beta, ps._gamma)) Tf = I_star + .2; dx = .025; x_min = FPMultiPhiSolver.calculate_xmin(Tf, abg, theta) dt = FPMultiPhiSolver.calculate_dt(dx,abg, x_min, factor = 1.0) S = FPMultiPhiSolver(theta, solver_phis, dx, dt, Tf, x_min) S.setTf(Tf) Fth = S.c_solve(abg)[:,:,-1] ts = S._ts; tm_idx, tp_idx = gettsIndex(ts, I_star) diF_star = -diff(Fth[0, [tm_idx, tp_idx]]) / (S._dt) diF_m = -diff(Fth[1, [tm_idx, tp_idx]]) / (S._dt) diF_plus_minus = -(diff(Fth[2, [tm_idx, tp_idx]])*delta_phi_minus_weight + \ diff(Fth[3, [tm_idx, tp_idx]])*delta_phi_plus_weight)/ (S._dt) # print 'di_F: %.4f,%.4f,%.4f' %(diF_star, diF_m, diF_plus_minus) # print 'error: %.4f,%.4f' %(abs(diF_star - diF_m), abs(diF_star-diF_plus_minus) ) diFs[regime_idx, :, sample_idx] = r_[diF_star, diF_m, diF_plus_minus] errors[regime_idx, :, sample_idx] = r_[abs(diF_star - diF_m), abs(diF_star-diF_plus_minus)] figure() plot(errors[regime_idx, 0,:], 'b', label='F_m') plot(errors[regime_idx, 1,:], 'r', label='F_min + F_plus') legend(); title(tag, fontsize = 32) from numpy import save save('Ls_adapted',diFs) save('errors_adapted', errors)
def AdjointEstimator(): N_phi = 20; print 'N_phi = ', N_phi phis = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) file_name = 'sinusoidal_spike_train_N=1000_crit_1' print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phis) phi_omit = None binnedTrain.pruneBins(phi_omit, N_thresh = 100, T_thresh = 10.0) print 'N_bins = ', len(binnedTrain.bins.keys()) Tf = binnedTrain.getTf() print 'Tf = ', Tf phis = binnedTrain.bins.keys(); theta = binnedTrain.theta ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) print 'abg_true = ', abg_true abg = abg_true xmin = FPMultiPhiSolver.calculate_xmin(Tf, abg) dx = FPMultiPhiSolver.calculate_dx(abg, xmin) dt = FPMultiPhiSolver.calculate_dt(dx, abg, xmin, factor = 8.) print 'xmin, dx, dt = ', xmin, dx, dt S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, xmin) abg_init = initialize5(binnedTrain) print 'abg_init = ', abg_init # start = time.clock() # abg_est = TNCEstimator(S, binnedTrain, abg_init) # print 'Est. time = ', time.clock() - start # print 'abg_est = ', abg_est # start = time.clock() # abg_est = NMEstimator(S, binnedTrain, abg_init) # print 'Est. time = ', time.clock() - start # print 'abg_est = ', abg_est # # start = time.clock() # abg_est = COBYLAEstimator(S, binnedTrain, abg_init) # print 'Est. time = ', time.clock() - start # print 'abg_est = ', abg_est # start = time.clock() # abg_est = CGEstimator(S, binnedTrain, abg_init) # print 'Est. time = ', time.clock() - start # print 'abg_est = ', abg_est start = time.clock() abg_est = BFGSEstimator(S, binnedTrain, abg_init) print 'Est. time = ', time.clock() - start print 'abg_est = ', abg_est
def MLEBox(N_spikes = 1000, N_trains=5, N_phi=16): print 'N_phi = ', N_phi N_phi_init = 8; phi_norms_init = linspace(1/(2.*N_phi_init), 1. - 1/ (2.*N_phi_init), N_phi_init) # base_name = 'sinusoidal_spike_train_N=%d_'%N_spikes # output_file = open('mlebox_output.txt', 'w') # for regime_name, T_thresh in zip(['subT', 'crit', 'superSin','superT'], # [128., 64., 64., 64.]): output_file = open('mlebox_output_thetas.txt', 'w') thetas = [20] base_name = 'sinusoidal_spike_train_N=%d_critical_theta='%N_spikes T_thresh = 64. for theta in thetas: for sample_id in xrange(1,N_trains +1): regime_name = 'theta%d'%theta regime_label = base_name + '%d'%theta file_name = regime_label + '_%d'%sample_id print file_name # regime_label = base_name + regime_name # file_name = regime_label + '_' + str(sample_id) # print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms_init) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) binnedTrain.pruneBins(None, N_thresh = 8, T_thresh=T_thresh) abg_init = initialize_right_2std(binnedTrain) abg_init[1] = amax([.1, abg_init[1]]) abg_init[2] = amax([.0, abg_init[2]]) abg_fortet, warnflag = FortetEstimatorSup(binnedTrain, abg_init) #RELOAD ALL DATA: mleBinnedTrain = MLEBinnedSpikeTrain.initFromFile(file_name, N_phi) #MLE F-P: dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, abg_true, -1.0) phis = mleBinnedTrain.phi_ms; S = FPMultiPhiSolver(binnedTrain.theta, phis, dx, dt, binnedTrain.getTf(), X_min = -1.0) minus_idxs = mleBinnedTrain.phi_minus_indxs plus_idxs = mleBinnedTrain.phi_plus_indxs minus_weights = mleBinnedTrain.phi_minus_weights plus_weights = mleBinnedTrain.phi_plus_weights def loglikelihood(abg): 'rediscretize:' xmin = FPMultiPhiSolver.calculate_xmin(S.getTf(), abg, S._theta) dt = FPMultiPhiSolver.calculate_dt(S._dx, abg, xmin) S.rediscretize(S._dx, dt, S.getTf(), xmin) 'Solve it:' Fs = S.c_solve(abg) spike_t_indexes = mleBinnedTrain.getTindxs(S._ts) 'form (approximate) likelihood:' pdf = -diff(Fs[:,:,-1], axis = 1) / S._dt; likelihoods = pdf[minus_idxs, spike_t_indexes]*minus_weights +\ pdf[plus_idxs, spike_t_indexes]*plus_weights # if amin(likelihoods) <= .0: # likelihoods[likelihoods<=.0] = 1e-8 normalized_log_likelihood = sum(log(likelihoods)) 'Return ' return -normalized_log_likelihood #MLE F-P: # abg_tnc, abg_cobyla, abg_neldermead = MLEEstimator(S, # mleBinnedTrain, abg_init) abg_neldermead = MLEEstimator(S,mleBinnedTrain, abg_init) #OUTPUTs output_file.write('\n' +file_name + ':\n') for tag, abg in zip(['init', 'fortet', 'nelder_mead', 'true'], [abg_init, abg_fortet, abg_tnc, abg_cobyla, abg_neldermead , abg_true]): output_file.write(tag + ':' + str(loglikelihood(abg)) + ':' + str(abg) + '\n');
def BFGSItersComparison(): N_phi = 20; print 'N_phi = ', N_phi phi_norms = linspace(1/(2.*N_phi), 1. - 1/ (2.*N_phi), N_phi) batch_start = time.clock() base_name = 'sinusoidal_spike_train_T=' D = DataHarvester('BFGS_Iters') for regime_name, T_sim, T_thresh in zip(['crit', 'superSin'], [5000, 5000], [16., 16.]): regime_label = base_name + str(T_sim)+ '_' + regime_name for sample_id in xrange(1,4): file_name = regime_label + '_' + str(sample_id) + '.path' print file_name binnedTrain = BinnedSpikeTrain.initFromFile(file_name, phi_norms) ps = binnedTrain._Train._params abg_true = array((ps._alpha, ps._beta, ps._gamma)) D.setRegime(regime_name,abg_true, T_sim) phi_omit = None binnedTrain.pruneBins(phi_omit, N_thresh = 64, T_thresh=T_thresh) Tf = binnedTrain.getTf() D.addSample(sample_id, Tf, binnedTrain.getBinCount(), binnedTrain.getSpikeCount()) dx = .025; dt = FPMultiPhiSolver.calculate_dt(dx, 5., 2.) phis = binnedTrain.bins.keys(); theta = binnedTrain.theta S = FPMultiPhiSolver(theta, phis, dx, dt, Tf, X_MIN = -2.0) start = time.clock() abg_init = initialize5(binnedTrain) finish = time.clock() D.addEstimate(sample_id, 'Initializer', abg_init, finish-start) start = time.clock() abg_est = BFGSEstimator(S, binnedTrain, abg_init, max_iters = 8) finish = time.clock() D.addEstimate(sample_id, 'BFGS_8', abg_est, finish-start) start = time.clock() abg_est = BFGSEstimator(S, binnedTrain, abg_est,max_iters = 8) finish = time.clock() D.addEstimate(sample_id, 'BFGS_16', abg_est, finish-start) start = time.clock() abg_est = BFGSEstimator(S, binnedTrain, abg_est,max_iters = 8) finish = time.clock() D.addEstimate(sample_id, 'BFGS_24', abg_est, finish-start) D.closeFile() print 'batch time = ', (time.clock() - batch_start) / 3600.0, ' hrs'