def maximizeLikelihood(self, experiment, beta0, buildXmatrix, maxIter=10**3, stopCond=10**-6): ### ### THIS IMPLEMENTATION IS NOT SO COOL :( ### IN NEW VERSION OF THE CODE I SHOULD IMPLEMENT A NEW CLASS THAT TAKES CARE OF MAXLIKELIHOOD ON lambda=exp(Xbeta) model ### """ Maximize likelihood. This function can be used to fit any model of the form lambda=exp(Xbeta). This function is used to fit both: - static threshold - dynamic threshold The difference between the two functions is in the size of beta0 and the returned beta, as well as the function buildXmatrix. """ # Precompute all the matrices used in the gradient ascent (see Eq. 20 in Pozzorini et al. 2015) ################################################################################################ # here X refer to the matrix made of y vectors defined in Eq. 21 (Pozzorini et al. 2015) # since the fit can be perfomed on multiple traces, we need lists all_X = [] # similar to X but only contains temporal samples where experimental spikes have been observed # storing this matrix is useful to improve speed when computing the likelihood as well as its derivative all_X_spikes = [] # sum X_spikes over spikes. Precomputing this quantity improve speed when the gradient is evaluated all_sum_X_spikes = [] # variables used to compute the loglikelihood of a Poisson process spiking at the experimental firing rate T_tot = 0.0 N_spikes_tot = 0.0 traces_nb = 0 for tr in experiment.trainingset_traces: if tr.useTrace: traces_nb += 1 # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes( tr.I, tr.V[0], tr.getSpikeTimes()) # Precomputes matrices to compute gradient ascent on log-likelihood # depeinding on the model being fitted (static vs dynamic threshodl) different buildXmatrix functions can be used (X_tmp, X_spikes_tmp, sum_X_spikes_tmp, N_spikes, T) = buildXmatrix(tr, V_est) T_tot += T N_spikes_tot += N_spikes all_X.append(X_tmp) all_X_spikes.append(X_spikes_tmp) all_sum_X_spikes.append(sum_X_spikes_tmp) # Compute log-likelihood of a poisson process (this quantity is used to normalize the model log-likelihood) ################################################################################################ logL_poisson = N_spikes_tot * (np.log(N_spikes_tot / T_tot) - 1) # Perform gradient ascent ################################################################################################ print "Maximize log-likelihood (bit/spks)..." beta = beta0 old_L = 1 for i in range(maxIter): learning_rate = 1.0 # In the first iterations using a small learning rate makes things somehow more stable if i <= 10: learning_rate = 0.1 L = 0 G = 0 H = 0 for trace_i in np.arange(traces_nb): # compute log-likelihood, gradient and hessian on a specific trace (note that the fit is performed on multiple traces) (L_tmp, G_tmp, H_tmp) = self.computeLikelihoodGradientHessian( beta, all_X[trace_i], all_X_spikes[trace_i], all_sum_X_spikes[trace_i]) # note that since differentiation is linear: gradient of sum = sum of gradient ; hessian of sum = sum of hessian L += L_tmp G += G_tmp H += H_tmp # Update optimal parametes (ie, implement Newton step) by tacking into account multiple traces beta = beta - learning_rate * np.dot(inv(H), G) if (i > 0 and abs((L - old_L) / old_L) < stopCond): # If converged print "\nConverged after %d iterations!\n" % (i + 1) break old_L = L # Compute normalized likelihood (for print) # The likelihood is normalized with respect to a poisson process and units are in bit/spks L_norm = (L - logL_poisson) / np.log(2) / N_spikes_tot reprint(L_norm) if math.isnan(L_norm): print "Problem during gradient ascent. Optimizatino stopped." break if (i == maxIter - 1): # If too many iterations print "\nNot converged after %d iterations.\n" % (maxIter) return beta
def maximizeLikelihood(self, experiment, beta0, buildXmatrix, maxIter=10 ** 3, stopCond=10 ** -6): """ Maximize likelihood. This function can be used to fit any model of the form lambda=exp(Xbeta). Here this function is used to fit both: - static threshold - dynamic threshold The difference between the two functions is in the size of beta0 and the returned beta, as well as the function buildXmatrix. """ # Precompute all the matrices used in the gradient ascent all_X = [] all_X_spikes = [] all_sum_X_spikes = [] T_tot = 0.0 N_spikes_tot = 0.0 traces_nb = 0 for tr in experiment.trainingset_traces: if tr.useTrace: traces_nb += 1 # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes()) # Precomputes matrices to perform gradient ascent on log-likelihood (X_tmp, X_spikes_tmp, sum_X_spikes_tmp, N_spikes, T) = buildXmatrix(tr, V_est) T_tot += T N_spikes_tot += N_spikes all_X.append(X_tmp) all_X_spikes.append(X_spikes_tmp) all_sum_X_spikes.append(sum_X_spikes_tmp) logL_poisson = N_spikes_tot * (np.log(N_spikes_tot / T_tot) - 1) # Perform gradient ascent print "Maximize log-likelihood (bit/spks)..." beta = beta0 old_L = 1 for i in range(maxIter): learning_rate = 1.0 if ( i <= 10 ): # be careful in the first iterations (using a small learning rate in the first step makes the fit more stable) learning_rate = 0.1 L = 0 G = 0 H = 0 for trace_i in np.arange(traces_nb): (L_tmp, G_tmp, H_tmp) = self.computeLikelihoodGradientHessian( beta, all_X[trace_i], all_X_spikes[trace_i], all_sum_X_spikes[trace_i] ) L += L_tmp G += G_tmp H += H_tmp beta = beta - learning_rate * np.dot(inv(H), G) if i > 0 and abs((L - old_L) / old_L) < stopCond: # If converged print "\nConverged after %d iterations!\n" % (i + 1) break old_L = L # Compute normalized likelihood (for print) # The likelihood is normalized with respect to a poisson process and units are in bit/spks L_norm = (L - logL_poisson) / np.log(2) / N_spikes_tot reprint(L_norm) if i == maxIter - 1: # If too many iterations print "\nNot converged after %d iterations.\n" % (maxIter) return beta
def fitSubthresholdDynamics(self, experiment, DT_beforeSpike=5.0): """ Implement Step 2 of the fitting procedure introduced in Pozzorini et al. PLOS Comb. Biol. 2015 The voltage reset is estimated by computing the spike-triggered average of the voltage. experiment: Experiment object on which the model is fitted. DT_beforeSpike: in ms, data right before spikes are excluded from the fit. This parameter can be used to define that time interval. """ print "\nGIF MODEL - Fit subthreshold dynamics..." # Expand eta in basis functions self.dt = experiment.dt # Build X matrix and Y vector to perform linear regression (use all traces in training set) # For each training set an X matrix and a Y vector is built. #################################################################################################### X = [] Y = [] cnt = 0 for tr in experiment.trainingset_traces: if tr.useTrace: cnt += 1 reprint("Compute X matrix for repetition %d" % (cnt)) # Compute the the X matrix and Y=\dot_V_data vector used to perform the multilinear linear regression (see Eq. 17.18 in Pozzorini et al. PLOS Comp. Biol. 2015) (X_tmp, Y_tmp) = self.fitSubthresholdDynamics_Build_Xmatrix_Yvector( tr, DT_beforeSpike=DT_beforeSpike) X.append(X_tmp) Y.append(Y_tmp) # Concatenate matrixes associated with different traces to perform a single multilinear regression #################################################################################################### if cnt == 1: X = X[0] Y = Y[0] elif cnt > 1: X = np.concatenate(X, axis=0) Y = np.concatenate(Y, axis=0) else: print "\nError, at least one training set trace should be selected to perform fit." # Perform linear Regression defined in Eq. 17 of Pozzorini et al. PLOS Comp. Biol. 2015 #################################################################################################### print "\nPerform linear regression..." XTX = np.dot(np.transpose(X), X) XTX_inv = inv(XTX) XTY = np.dot(np.transpose(X), Y) b = np.dot(XTX_inv, XTY) b = b.flatten() # Extract explicit model parameters from regression result b #################################################################################################### self.C = 1. / b[1] self.gl = -b[0] * self.C self.El = b[2] * self.C / self.gl self.eta.setFilter_Coefficients(-b[3:] * self.C) self.printParameters() # Compute percentage of variance explained on dV/dt #################################################################################################### var_explained_dV = 1.0 - np.mean((Y - np.dot(X, b))**2) / np.var(Y) print "Percentage of variance explained (on dV/dt): %0.2f" % ( var_explained_dV * 100.0) # Compute percentage of variance explained on V (see Eq. 26 in Pozzorini et al. PLOS Comp. Biol. 2105) #################################################################################################### SSE = 0 # sum of squared errors VAR = 0 # variance of data for tr in experiment.trainingset_traces: if tr.useTrace: # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes( tr.I, tr.V[0], tr.getSpikeTimes()) indices_tmp = tr.getROI_FarFromSpikes(0.0, self.Tref) SSE += sum((V_est[indices_tmp] - tr.V[indices_tmp])**2) VAR += len(indices_tmp) * np.var(tr.V[indices_tmp]) var_explained_V = 1.0 - SSE / VAR print "Percentage of variance explained (on V): %0.2f" % ( var_explained_V * 100.0)
def maximizeLikelihood(self, experiment, beta0, buildXmatrix, maxIter=10**3, stopCond=10**-6) : ### ### THIS IMPLEMENTATION IS NOT SO COOL :( ### IN NEW VERSION OF THE CODE I SHOULD IMPLEMENT A NEW CLASS THAT TAKES CARE OF MAXLIKELIHOOD ON lambda=exp(Xbeta) model ### """ Maximize likelihood. This function can be used to fit any model of the form lambda=exp(Xbeta). This function is used to fit both: - static threshold - dynamic threshold The difference between the two functions is in the size of beta0 and the returned beta, as well as the function buildXmatrix. """ # Precompute all the matrices used in the gradient ascent (see Eq. 20 in Pozzorini et al. 2015) ################################################################################################ # here X refer to the matrix made of y vectors defined in Eq. 21 (Pozzorini et al. 2015) # since the fit can be perfomed on multiple traces, we need lists all_X = [] # similar to X but only contains temporal samples where experimental spikes have been observed # storing this matrix is useful to improve speed when computing the likelihood as well as its derivative all_X_spikes = [] # sum X_spikes over spikes. Precomputing this quantity improve speed when the gradient is evaluated all_sum_X_spikes = [] # variables used to compute the loglikelihood of a Poisson process spiking at the experimental firing rate T_tot = 0.0 N_spikes_tot = 0.0 traces_nb = 0 for tr in experiment.trainingset_traces: if tr.useTrace : traces_nb += 1 # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes()) # Precomputes matrices to compute gradient ascent on log-likelihood # depeinding on the model being fitted (static vs dynamic threshodl) different buildXmatrix functions can be used (X_tmp, X_spikes_tmp, sum_X_spikes_tmp, N_spikes, T) = buildXmatrix(tr, V_est) T_tot += T N_spikes_tot += N_spikes all_X.append(X_tmp) all_X_spikes.append(X_spikes_tmp) all_sum_X_spikes.append(sum_X_spikes_tmp) # Compute log-likelihood of a poisson process (this quantity is used to normalize the model log-likelihood) ################################################################################################ logL_poisson = N_spikes_tot*(np.log(N_spikes_tot/T_tot)-1) # Perform gradient ascent ################################################################################################ print "Maximize log-likelihood (bit/spks)..." beta = beta0 old_L = 1 for i in range(maxIter) : learning_rate = 1.0 # In the first iterations using a small learning rate makes things somehow more stable if i<=10 : learning_rate = 0.1 L=0; G=0; H=0; for trace_i in np.arange(traces_nb): # compute log-likelihood, gradient and hessian on a specific trace (note that the fit is performed on multiple traces) (L_tmp,G_tmp,H_tmp) = self.computeLikelihoodGradientHessian(beta, all_X[trace_i], all_X_spikes[trace_i], all_sum_X_spikes[trace_i]) # note that since differentiation is linear: gradient of sum = sum of gradient ; hessian of sum = sum of hessian L+=L_tmp; G+=G_tmp; H+=H_tmp; # Update optimal parametes (ie, implement Newton step) by tacking into account multiple traces beta = beta - learning_rate*np.dot(inv(H),G) if (i>0 and abs((L-old_L)/old_L) < stopCond) : # If converged print "\nConverged after %d iterations!\n" % (i+1) break old_L = L # Compute normalized likelihood (for print) # The likelihood is normalized with respect to a poisson process and units are in bit/spks L_norm = (L-logL_poisson)/np.log(2)/N_spikes_tot reprint(L_norm) if math.isnan(L_norm): print "Problem during gradient ascent. Optimizatino stopped." break if (i==maxIter - 1) : # If too many iterations print "\nNot converged after %d iterations.\n" % (maxIter) return beta
def fitSubthresholdDynamics(self, experiment, DT_beforeSpike=5.0): print "\nGIF MODEL - Fit subthreshold dynamics..." # Expand eta in basis functions self.dt = experiment.dt self.eta.computeBins() # Build X matrix and Y vector to perform linear regression (use all traces in training set) X = [] Y = [] cnt = 0 for tr in experiment.trainingset_traces: if tr.useTrace: cnt += 1 reprint("Compute X matrix for repetition %d" % (cnt)) (X_tmp, Y_tmp) = self.fitSubthresholdDynamics_Build_Xmatrix_Yvector(tr, DT_beforeSpike=DT_beforeSpike) X.append(X_tmp) Y.append(Y_tmp) # Concatenate matrixes associated with different traces to perform a single multilinear regression if cnt == 1: X = X[0] Y = Y[0] elif cnt > 1: X = np.concatenate(X, axis=0) Y = np.concatenate(Y, axis=0) else: print "\nError, at least one training set trace should be selected to perform fit." # Linear Regression print "\nPerform linear regression..." XTX = np.dot(np.transpose(X), X) XTX_inv = inv(XTX) XTY = np.dot(np.transpose(X), Y) b = np.dot(XTX_inv, XTY) b = b.flatten() # Update and print model parameters self.C = 1.0 / b[1] self.gl = -b[0] * self.C self.El = b[2] * self.C / self.gl self.eta.setFilter_Coefficients(-b[3:] * self.C) self.printParameters() # Compute percentage of variance explained on dV/dt var_explained_dV = 1.0 - np.mean((Y - np.dot(X, b)) ** 2) / np.var(Y) print "Percentage of variance explained (on dV/dt): %0.2f" % (var_explained_dV * 100.0) # Compute percentage of variance explained on V SSE = 0 # sum of squared errors VAR = 0 # variance of data for tr in experiment.trainingset_traces: if tr.useTrace: # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes()) indices_tmp = tr.getROI_FarFromSpikes(0.0, self.Tref) SSE += sum((V_est[indices_tmp] - tr.V[indices_tmp]) ** 2) VAR += len(indices_tmp) * np.var(tr.V[indices_tmp]) var_explained_V = 1.0 - SSE / VAR print "Percentage of variance explained (on V): %0.2f" % (var_explained_V * 100.0)
def fitSubthresholdDynamics(self, experiment, DT_beforeSpike=5.0): """ Implement Step 2 of the fitting procedure introduced in Pozzorini et al. PLOS Comb. Biol. 2015 The voltage reset is estimated by computing the spike-triggered average of the voltage. experiment: Experiment object on which the model is fitted. DT_beforeSpike: in ms, data right before spikes are excluded from the fit. This parameter can be used to define that time interval. """ print "\nGIF MODEL - Fit subthreshold dynamics..." # Expand eta in basis functions self.dt = experiment.dt # Build X matrix and Y vector to perform linear regression (use all traces in training set) # For each training set an X matrix and a Y vector is built. #################################################################################################### X = [] Y = [] cnt = 0 for tr in experiment.trainingset_traces : if tr.useTrace : cnt += 1 reprint( "Compute X matrix for repetition %d" % (cnt) ) # Compute the the X matrix and Y=\dot_V_data vector used to perform the multilinear linear regression (see Eq. 17.18 in Pozzorini et al. PLOS Comp. Biol. 2015) (X_tmp, Y_tmp) = self.fitSubthresholdDynamics_Build_Xmatrix_Yvector(tr, DT_beforeSpike=DT_beforeSpike) X.append(X_tmp) Y.append(Y_tmp) # Concatenate matrixes associated with different traces to perform a single multilinear regression #################################################################################################### if cnt == 1: X = X[0] Y = Y[0] elif cnt > 1: X = np.concatenate(X, axis=0) Y = np.concatenate(Y, axis=0) else : print "\nError, at least one training set trace should be selected to perform fit." # Perform linear Regression defined in Eq. 17 of Pozzorini et al. PLOS Comp. Biol. 2015 #################################################################################################### print "\nPerform linear regression..." XTX = np.dot(np.transpose(X), X) XTX_inv = inv(XTX) XTY = np.dot(np.transpose(X), Y) b = np.dot(XTX_inv, XTY) b = b.flatten() # Extract explicit model parameters from regression result b #################################################################################################### self.C = 1./b[1] self.gl = -b[0]*self.C self.El = b[2]*self.C/self.gl self.eta.setFilter_Coefficients(-b[3:]*self.C) self.printParameters() # Compute percentage of variance explained on dV/dt #################################################################################################### var_explained_dV = 1.0 - np.mean((Y - np.dot(X,b))**2)/np.var(Y) print "Percentage of variance explained (on dV/dt): %0.2f" % (var_explained_dV*100.0) # Compute percentage of variance explained on V (see Eq. 26 in Pozzorini et al. PLOS Comp. Biol. 2105) #################################################################################################### SSE = 0 # sum of squared errors VAR = 0 # variance of data for tr in experiment.trainingset_traces : if tr.useTrace : # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes()) indices_tmp = tr.getROI_FarFromSpikes(0.0, self.Tref) SSE += sum((V_est[indices_tmp] - tr.V[indices_tmp])**2) VAR += len(indices_tmp)*np.var(tr.V[indices_tmp]) var_explained_V = 1.0 - SSE / VAR print "Percentage of variance explained (on V): %0.2f" % (var_explained_V*100.0)
def fitSubthresholdDynamics(self, experiment, Ek_all, DT_beforeSpike=5.0, do_plot=False): print "\ngGIF MODEL - Fit subthreshold dynamics..." var_explained_dV_all = [] b_all = [] for Ek in Ek_all : print "\nTest Ek = %0.2f mV..." % (Ek) # Expand eta in basis functions self.dt = experiment.dt self.eta.computeBins() # Build X matrix and Y vector to perform linear regression (use all traces in training set) X = [] Y = [] cnt = 0 for tr in experiment.trainingset_traces : if tr.useTrace : cnt += 1 reprint( "Compute X matrix for repetition %d" % (cnt) ) (X_tmp, Y_tmp) = self.fitSubthresholdDynamics_Build_Xmatrix_Yvector(tr, Ek, DT_beforeSpike=DT_beforeSpike) X.append(X_tmp) Y.append(Y_tmp) # Concatenate matrixes associated with different traces to perform a single multilinear regression if cnt == 1: X = X[0] Y = Y[0] elif cnt > 1: X = np.concatenate(X, axis=0) Y = np.concatenate(Y, axis=0) else : print "\nError, at least one training set trace should be selected to perform fit." # Linear Regression print "\nPerform linear regression..." XTX = np.dot(np.transpose(X), X) XTX_inv = inv(XTX) XTY = np.dot(np.transpose(X), Y) b = np.dot(XTX_inv, XTY) b = b.flatten() # Compute percentage of variance explained on dV/dt var_explained_dV = 1.0 - np.mean((Y - np.dot(X,b))**2)/np.var(Y) print "Done! Percentage of variance explained (on dV/dt): %0.2f" % (var_explained_dV*100.0) # Save results var_explained_dV_all.append(var_explained_dV) b_all.append(b) # Select best Ek self.Ek_all = Ek_all self.variance_explained_all = var_explained_dV_all ind_opt = np.argmax(var_explained_dV_all) b = b_all[ind_opt] Ek_opt = Ek_all[ind_opt] var_explained_dV = var_explained_dV_all[ind_opt] # Update and print model parameters self.C = 1./b[1] self.gl = -b[0]*self.C self.El = b[2]*self.C/self.gl self.Ek = Ek_opt self.eta.setFilter_Coefficients(-b[3:]*self.C) self.printParameters() # Compute percentage of variance explained on V SSE = 0 # sum of squared errors VAR = 0 # variance of data for tr in experiment.trainingset_traces : if tr.useTrace : # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes()) indices_tmp = tr.getROI_FarFromSpikes(0.0, self.Tref) SSE += sum((V_est[indices_tmp] - tr.V[indices_tmp])**2) VAR += len(indices_tmp)*np.var(tr.V[indices_tmp]) var_explained_V = 1.0 - SSE / VAR print "Percentage of variance explained (on V): %0.2f" % (var_explained_V*100.0) print "Percentage of variance explained (on dV/dt): %0.2f" % (var_explained_dV*100.0) if do_plot : plt.figure(figsize=(8,8), facecolor='white') plt.plot(self.Ek_all, self.variance_explained_all, '.-', color='black') plt.plot([Ek_opt],[var_explained_dV], '.', color='red') plt.xlabel('Ek (mV)') plt.ylabel('Pct. Variance explained on dV/dt (-)')
def maximizeLikelihood_dynamicThreshold(self, experiment, beta0, theta_tau_all, maxIter=10**3, stopCond=10**-6, do_plot=False) : beta_all = [] L_all = [] for theta_tau in theta_tau_all : print "\nTest tau_theta = %0.1f ms... \n" % (theta_tau) # Precompute all the matrices used in the gradient ascent all_X = [] all_X_spikes = [] all_sum_X_spikes = [] T_tot = 0.0 N_spikes_tot = 0.0 traces_nb = 0 for tr in experiment.trainingset_traces: if tr.useTrace : traces_nb += 1 # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes()) # Precomputes matrices to perform gradient ascent on log-likelihood (X_tmp, X_spikes_tmp, sum_X_spikes_tmp, N_spikes, T) = self.buildXmatrix_dynamicThreshold(tr, V_est, theta_tau) T_tot += T N_spikes_tot += N_spikes all_X.append(X_tmp) all_X_spikes.append(X_spikes_tmp) all_sum_X_spikes.append(sum_X_spikes_tmp) logL_poisson = N_spikes_tot*(np.log(N_spikes_tot/T_tot)-1) # Perform gradient ascent print "Maximize log-likelihood (bit/spks)..." beta = beta0 old_L = 1 for i in range(maxIter) : learning_rate = 1.0 if i<=10 : # be careful in the first iterations (using a small learning rate in the first step makes the fit more stable) learning_rate = 0.1 L=0; G=0; H=0; for trace_i in np.arange(traces_nb): (L_tmp,G_tmp,H_tmp) = self.computeLikelihoodGradientHessian(beta, all_X[trace_i], all_X_spikes[trace_i], all_sum_X_spikes[trace_i]) L+=L_tmp; G+=G_tmp; H+=H_tmp; beta = beta - learning_rate*np.dot(inv(H),G) if (i>0 and abs((L-old_L)/old_L) < stopCond) : # If converged print "\nConverged after %d iterations!\n" % (i+1) break old_L = L # Compute normalized likelihood (for print) # The likelihood is normalized with respect to a poisson process and units are in bit/spks L_norm = (L-logL_poisson)/np.log(2)/N_spikes_tot reprint(L_norm) if (i==maxIter - 1) : # If too many iterations print "\nNot converged after %d iterations.\n" % (maxIter) L_all.append(L_norm) beta_all.append(beta) ind_opt = np.argmax(L_all) theta_tau_opt = theta_tau_all[ind_opt] beta_opt = beta_all[ind_opt] L_norm_opt = L_all[ind_opt] print "\n Optimal timescale: %0.2f ms" % (theta_tau_opt) print "Log-likelihood: %0.2f bit/spike" % (L_norm_opt) self.fit_all_tau_theta = theta_tau_all self.fit_all_likelihood = L_all if do_plot : plt.figure(figsize=(6,6), facecolor='white') plt.plot(theta_tau_all, L_all, '.-', color='black') plt.plot([theta_tau_opt], [L_norm_opt], '.', color='red') plt.xlabel('Threshold coupling timescale (ms)') plt.ylabel('Log-likelihood (bit/spike)') plt.show() return (beta_opt, theta_tau_opt)
def maximizeLikelihood_dynamicThreshold(self, experiment, ki, Vi, beta0, maxIter=10**3, stopCond=10**-6) : all_X = [] all_X_spikes = [] all_sum_X_spikes = [] T_tot = 0.0 N_spikes_tot = 0.0 traces_nb = 0 for tr in experiment.trainingset_traces: if tr.useTrace : traces_nb += 1 # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes(tr.I, tr.V[0], tr.getSpikeTimes()) # Precomputes matrices to perform gradient ascent on log-likelihood (X_tmp, X_spikes_tmp, sum_X_spikes_tmp, N_spikes, T) = self.buildXmatrix_dynamicThreshold(tr, V_est, ki, Vi) T_tot += T N_spikes_tot += N_spikes all_X.append(X_tmp) all_X_spikes.append(X_spikes_tmp) all_sum_X_spikes.append(sum_X_spikes_tmp) logL_poisson = N_spikes_tot*(np.log(N_spikes_tot/T_tot)-1) # Perform gradient ascent print "Maximize log-likelihood (bit/spks)..." beta = beta0 old_L = 1 for i in range(maxIter) : learning_rate = 1.0 if i<=10 : # be careful in the first iterations (using a small learning rate in the first step makes the fit more stable) learning_rate = 0.1 L=0; G=0; H=0; for trace_i in np.arange(traces_nb): (L_tmp,G_tmp,H_tmp) = self.computeLikelihoodGradientHessian(beta, all_X[trace_i], all_X_spikes[trace_i], all_sum_X_spikes[trace_i]) L+=L_tmp; G+=G_tmp; H+=H_tmp; beta = beta - learning_rate*np.dot(inv(H),G) if (i>0 and abs((L-old_L)/old_L) < stopCond) : # If converged print "\nConverged after %d iterations!\n" % (i+1) break old_L = L # Compute normalized likelihood (for print) # The likelihood is normalized with respect to a poisson process and units are in bit/spks L_norm = (L-logL_poisson)/np.log(2)/N_spikes_tot reprint(L_norm) if (i==maxIter - 1) : # If too many iterations print "\nNot converged after %d iterations.\n" % (maxIter) return (beta, L_norm)
def fitSubthresholdDynamics(self, experiment, Ek_all, DT_beforeSpike=5.0, do_plot=False): print "\ngGIF MODEL - Fit subthreshold dynamics..." var_explained_dV_all = [] b_all = [] for Ek in Ek_all: print "\nTest Ek = %0.2f mV..." % (Ek) # Expand eta in basis functions self.dt = experiment.dt self.eta.computeBins() # Build X matrix and Y vector to perform linear regression (use all traces in training set) X = [] Y = [] cnt = 0 for tr in experiment.trainingset_traces: if tr.useTrace: cnt += 1 reprint("Compute X matrix for repetition %d" % (cnt)) (X_tmp, Y_tmp ) = self.fitSubthresholdDynamics_Build_Xmatrix_Yvector( tr, Ek, DT_beforeSpike=DT_beforeSpike) X.append(X_tmp) Y.append(Y_tmp) # Concatenate matrixes associated with different traces to perform a single multilinear regression if cnt == 1: X = X[0] Y = Y[0] elif cnt > 1: X = np.concatenate(X, axis=0) Y = np.concatenate(Y, axis=0) else: print "\nError, at least one training set trace should be selected to perform fit." # Linear Regression print "\nPerform linear regression..." XTX = np.dot(np.transpose(X), X) XTX_inv = inv(XTX) XTY = np.dot(np.transpose(X), Y) b = np.dot(XTX_inv, XTY) b = b.flatten() # Compute percentage of variance explained on dV/dt var_explained_dV = 1.0 - np.mean((Y - np.dot(X, b))**2) / np.var(Y) print "Done! Percentage of variance explained (on dV/dt): %0.2f" % ( var_explained_dV * 100.0) # Save results var_explained_dV_all.append(var_explained_dV) b_all.append(b) # Select best Ek self.Ek_all = Ek_all self.variance_explained_all = var_explained_dV_all ind_opt = np.argmax(var_explained_dV_all) b = b_all[ind_opt] Ek_opt = Ek_all[ind_opt] var_explained_dV = var_explained_dV_all[ind_opt] # Update and print model parameters self.C = 1. / b[1] self.gl = -b[0] * self.C self.El = b[2] * self.C / self.gl self.Ek = Ek_opt self.eta.setFilter_Coefficients(-b[3:] * self.C) self.printParameters() # Compute percentage of variance explained on V SSE = 0 # sum of squared errors VAR = 0 # variance of data for tr in experiment.trainingset_traces: if tr.useTrace: # Simulate subthreshold dynamics (time, V_est, eta_sum_est) = self.simulateDeterministic_forceSpikes( tr.I, tr.V[0], tr.getSpikeTimes()) indices_tmp = tr.getROI_FarFromSpikes(0.0, self.Tref) SSE += sum((V_est[indices_tmp] - tr.V[indices_tmp])**2) VAR += len(indices_tmp) * np.var(tr.V[indices_tmp]) var_explained_V = 1.0 - SSE / VAR print "Percentage of variance explained (on V): %0.2f" % ( var_explained_V * 100.0) print "Percentage of variance explained (on dV/dt): %0.2f" % ( var_explained_dV * 100.0) if do_plot: plt.figure(figsize=(8, 8), facecolor='white') plt.plot(self.Ek_all, self.variance_explained_all, '.-', color='black') plt.plot([Ek_opt], [var_explained_dV], '.', color='red') plt.xlabel('Ek (mV)') plt.ylabel('Pct. Variance explained on dV/dt (-)')