def __convertToFloats__(self, signal, annotation, time): """ method converts all string values in signal, annotation arrays into float values; here is one assumption: time array is in float format already """ floats = pl.ones(len(signal)) if annotation == None: entities = zip(signal) else: entities = zip(signal, annotation) for idx, values in enumerate(entities): for value in values: try: pl.float64(value) # check if it can be converted to float except ValueError: floats[idx] = 0 # the value is NOT like float type break true_floats = pl.nonzero(floats) # get indexes of non-zero positions signal = signal[true_floats].astype(float) if not annotation == None: annotation = annotation[true_floats].astype(float) if not time == None: time = time[true_floats] return signal, annotation, time
def find_gt_ranks(self, out_ranks, ground_truth_keys=None): """ :: Return ranks matrix for ground-truth columns only """ r = out_ranks.argsort() lzt_keys, lzt_len = self.get_adb_lists() gt_idx = [lzt_keys.index(s) for s in ground_truth_keys] ranks = pylab.zeros((len(gt_idx),len(gt_idx))) for i in pylab.arange(len(gt_idx)): for j in pylab.arange(len(gt_idx)): ranks[i][j]=pylab.nonzero(r[i]==gt_idx[j])[0][0] return ranks
def obs_lb(value=value, N=N, Xa=Xa, Xb=Xb, alpha=alpha, beta=beta, gamma=gamma, bounds_func=vars['bounds_func'], delta=delta, age_indices=ai, age_weights=aw): # calculate study-specific rate function shifts = pl.exp(pl.dot(Xa, alpha) + pl.dot(Xb, pl.atleast_1d(beta))) exp_gamma = pl.exp(gamma) mu_i = [pl.dot(weights, bounds_func(s_i * exp_gamma[ages], ages)) for s_i, ages, weights in zip(shifts, age_indices, age_weights)] # TODO: try vectorizing this loop to increase speed rate_param = mu_i*N violated_bounds = pl.nonzero(rate_param < value) logp = mc.negative_binomial_like(value[violated_bounds], rate_param[violated_bounds], delta) return logp
def get_pearson_corr(ch1, ch2, images, images_l, fn1, fn2): # computer correlation for non mainpulated images. ch1fp = {} ch2fp = {} ch1l = {} ch2l = {} for ch_ind in range(1, 97): ch1fp[ch_ind] = [] ch2fp[ch_ind] = [] ch1l[ch_ind] = [] ch2l[ch_ind] = [] # ch1fp[ch_ind].append(ch1[ch_ind]['bl_mu']) # ch2fp[ch_ind].append(ch2[ch_ind]['bl_mu']) for imname in images: ch1fp[ch_ind].append(ch1[ch_ind][imname]["fr_mu"]) # -ch1[ch_ind]['bl_mu']) ch2fp[ch_ind].append(ch2[ch_ind][imname]["fr_mu"]) # -ch1[ch_ind]['bl_mu']) for imname in images_l: ch1l[ch_ind].append(ch1[ch_ind][imname]["fr_mu"]) ch2l[ch_ind].append(ch2[ch_ind][imname]["fr_mu"]) chrho = [] for ch_ind in range(1, 97): # print ch1fp[ch_ind] # print np.array(ch2fp[ch_ind]).flatten() chrho.append(pearsonr(np.array(ch1fp[ch_ind]).flatten(), np.array(ch2fp[ch_ind]).flatten())[0]) # print np.array(chrho) # print pl.shape(np.array(chrho)) fig_corr_a = pl.figure(5) s_p_a = fig_corr_a.add_subplot(1, 2, 1) s_p_a.imshow(np.reshape(np.array(chrho), (8, 12)), norm=None, vmin=0, vmax=1, cmap=pl.cm.gray, interpolation=None) s_p_b = fig_corr_a.add_subplot(1, 2, 2) s_p_b.hist(np.array(chrho), bins=np.arange(-1.1, 1.1, 0.1)) sig_channels = pl.nonzero(np.array(chrho) > 0.85)[0] + 1 fig_corr_b = pl.figure(6) for n_plot in range(len(sig_channels)): s_p_temp = fig_corr_b.add_subplot(5, 5, n_plot + 1) s_p_temp.plot( np.array(ch1fp[sig_channels[n_plot]]).flatten(), np.array(ch2fp[sig_channels[n_plot]]).flatten(), "k." ) s_p_temp.plot( np.array(ch1l[sig_channels[n_plot]]).flatten(), np.array(ch2l[sig_channels[n_plot]]).flatten(), "r." ) s_p_temp.plot(ch1[sig_channels[n_plot]]["bl_mu"], ch2[sig_channels[n_plot]]["bl_mu"], "kx") s_p_temp.plot([0, 100], [0, 100], "k-") t = fn1.split(".")[0] + "v" + fn2.split(".")[0].split("_")[-1] fig_corr_a.savefig("./plots/" + t + "corr_a.pdf") fig_corr_b.savefig("./plots/" + t + "corr_b.pdf") return sig_channels
def _on_select(self, epress, erelease): start = (int(epress.xdata), int(epress.ydata)) stop = (int(erelease.xdata), int(erelease.ydata)) self.__rect = start + (stop[0]-start[0], stop[1]-start[1]) if self.mode == 'peak': ax = self.__axes data_matrix = ax.axes.get_images()[0].get_array() clip_matrix = data_matrix[start[1]:(stop[1]+1), start[0]:(stop[0]+1)] peak_pos = nonzero(clip_matrix == clip_matrix.max()) peak_pos = (peak_pos[1][0] + start[0], peak_pos[0][0] + start[1]) self.__peakpos = peak_pos circle = Circle(peak_pos, 4, **self.indicatorProps) ax.add_patch(circle) self.__canvas.draw() self.callback(self.__rect, self.__peakpos)
def _on_select(self, epress, erelease): start = (int(epress.xdata), int(epress.ydata)) stop = (int(erelease.xdata), int(erelease.ydata)) self.__rect = start + (stop[0] - start[0], stop[1] - start[1]) if self.mode == 'peak': ax = self.__axes data_matrix = ax.axes.get_images()[0].get_array() clip_matrix = data_matrix[start[1]:(stop[1] + 1), start[0]:(stop[0] + 1)] peak_pos = nonzero(clip_matrix == clip_matrix.max()) peak_pos = (peak_pos[1][0] + start[0], peak_pos[0][0] + start[1]) self.__peakpos = peak_pos circle = Circle(peak_pos, 4, **self.indicatorProps) ax.add_patch(circle) self.__canvas.draw() self.callback(self.__rect, self.__peakpos)
def _generate_basis2(self): """ This method generates a basis of the linear manifold w.r.t the canonical basis Each basis vector leaves in the ambiant space dimension and the number of vectors is equal to intrinsic dimension. """ #First get the canonical basis that are not needed to span the linear manifold nonzero_indexes = pl.nonzero( self.slope) # this must be of length self.intrinsic_dimension assert (len(nonzero_indexes) == self.intrinsic_dimension) # now pick one of the dimension for the denominator, pick the biggest for stability denominator_index = abs(self.slope).argmax() denominator_value = self.slope[denominator_index] basis = [ SparseVector([nonzero_indexes[i], self.ambiant_dimension - 1], [1, -self.slope[nonzero_indexes[i]]], self.ambiant_dimension) for i in set_minus(nonzero_indexes, [denominator_index]) ] return basis
def sim(): global V, Vlin, tao_e, Rar, Rmr Rar = pl.arange(Ras[0], Ras[1], Ras[2]) Rmr = pl.arange(Rms[0], Rms[1], Rms[2]) ns.mech.setcurrent(Ie * Ies, ns.dt) li = len(Rmr) lj = len(Rar) tao_e = pl.empty((li, lj)) tao_l = pl.empty((li, lj)) tao_n = pl.empty((li, lj)) for i in range(li): for j in range(lj): #Special conditions if Rar[j] < 10.: sec.L(15000.) else: sec.L(7000.) if Rmr[i] > 5000.: ns.h.tstop = 50. else: ns.h.tstop = 20. sec.Rm(Rmr[i]) sec.Ra(Rar[j]) print Rmr[i], Rar[j] ns.sim() #Obtain voltage, steady state voltage, normalize and #get logarithmic values t = ns.t Vinf = sec.nrnV0[-1] V = 1 - pl.array(sec.nrnV0)[:-1] / Vinf Vlin = pl.log(V) print Vinf #Estimate the time constant finding the #point at witch the voltage reaches the #value 1/e nz, = pl.nonzero(V > (1 / pl.e)) #The time where V ~ 1/e is the point #right after the last nz tao_e[i, j] = t[nz[-1] + 1] - tstart print 'tao_e', tao_e[i, j] #Define least squares data interval and #make the pulse starting time to be zero i0 = int(t0 / ns.dt) i1 = int(t1 / ns.dt) t01 = t[:i1 - i0] V01 = V[i0:i1] Vlin01 = Vlin[i0:i1] #Linear least squares A = pl.c_[t01, pl.ones_like(t01)] m, c = pl.lstsq(A, Vlin01.copy())[0] tao_l[i, j] = -1. / m - tstart print 'tao_l', tao_l[i, j], '(', m, c, pl.exp(c), ')' #Parametric function: v is the parameter vector and #x the independent varible fp = lambda p, t: p[0] * pl.exp(p[1] * t) #fp = lambda p, t: p[0]*pl.exp(p[1]*t) + p[2]*pl.exp(p[3]*t) #fp = lambda p, t: pl.exp(p[0]*t) #Error function e = lambda p, t, V: (fp(p, t) - V) #Initial parameter guess p0 = [1., -5.] #p0 = [1., -5., 1., -1.] #p0 = [-5.] #Fitting p, success = leastsq(e, p0, args=(t01, V01), maxfev=10000) tao_n[i, j] = -1. / p[1] - tstart print 'tao_n', tao_n[i, j], '(', p, success, ')' """
V_dend = np.diag(np.dot(PSP_mat, w)) for k in range(N): V_som[k] = (1.0 - dt / tau) * V_som[k] + g_d * ( V_dend[k] - 1 * V_som[k]) + np.dot(-w_inh[k, :], f) V_dend_list[:, i] = V_dend for k in range(N): f[k] = g(V_som[k]) #*max_rate f_list[:, i] = f #/max_rate chunk1_start = np.array(chunk_start[0]) chunk2_start = np.array(chunk_start[1]) chunk3_start = np.array(chunk_start[2]) tspk, nspk = pl.nonzero(id == 1) ################### ## ## Plotting ## ################### max1 = np.zeros(N) min1 = np.zeros(N) for i in range(N): max1[i] = np.max(f_list[i, 0:sample_len]) min1[i] = np.min(f_list[i, 0:sample_len]) avg_norm1 = np.zeros((N, sample_len)) for i in range(N):
def on_click(self,event): if not event.inaxes: self.xy=[] return self.x,self.y=int(event.xdata), int(event.ydata) self.key=event.key self.xx.append([self.x]) self.yy.append([self.y]) self.xy.append([self.y,self.x]) self.lc.set_data(self.xx,self.yy) if self.key=='m': print 'masking' self.xx[-1]=self.xx[0] self.yy[-1]=self.yy[0] self.xy[-1]=self.xy[0] ind=p.nonzero(points_inside_poly(self.points,self.xy)) self.mymask=self.mymask.reshape(self.lx*self.ly,1) self.mymask[ind]=1 self.mymask=self.mymask.reshape(self.lx,self.ly) datamasked=masked_array(self.data,self.mymask+self.automask+self.anisotropic_mask) self.im.set_data(datamasked) self.xx=[] self.yy=[] self.xy=[] self.lc.set_data(self.xx,self.yy) self.lm.set_data(self.xx,self.yy) # self.im.set_clim(vmax=(2*self.data.mean())) self.im.autoscale() p.draw() self.x=0 self.y=0 if self.key=='u': print 'unmasking' self.xx[-1]=self.xx[0] self.yy[-1]=self.yy[0] self.xy[-1]=self.xy[0] ind=p.nonzero(points_inside_poly(self.points,self.xy)) self.mymask=self.mymask.reshape(self.lx*self.ly,1) self.mymask[ind]=0 self.mymask=self.mymask.reshape(self.lx,self.ly) datanew=masked_array(self.data,self.mymask+self.automask+self.anisotropic_mask) self.im.set_data(datanew) self.xx=[] self.yy=[] self.xy=[] self.lc.set_data(self.xx,self.yy) self.lm.set_data(self.xx,self.yy) # self.im.set_clim(vmax=(2*self.data.mean())) self.im.autoscale() p.draw() self.x=0 self.y=0 if self.key=='r': print 'unmasking all' self.mymask=0*self.mymask datanew=masked_array(self.data,self.mymask+self.automask+self.anisotropic_mask) self.im.set_data(datanew) self.xx=[] self.yy=[] self.xy=[] self.lc.set_data(self.xx,self.yy) self.lm.set_data(self.xx,self.yy) # self.im.set_clim(vmax=(2*self.data.mean())) self.im.autoscale() p.draw() self.x=0 self.y=0 if self.key=='k': print 'save and exit' self.save_mask() print 'Mask saved in file:', self.mask_file # mask_f=EdfFile.EdfFile(self.mask_file) # mask_f.WriteImage({},self.mymask,0) # mask_f.WriteImage({},self.automask,1) # mask_f.WriteImage({},self.anisotropic_mask,2) # del(mask_f) p.close() return self.mymask+self.automask if self.key=='q': print 'exit without saving' p.close() return self.old_mymask+self.old_automask
# Programa de Franco Benassi # Proyecto #3 Interfaces Graficas 2020 # Ejercicio 1 from pylab import e, figure, meshgrid, linspace, nonzero, title, show, sqrt, sin, cos from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as pt import numpy as np #1A x = linspace(-6, 4, 100) y = linspace(-5, 4, 100) u, v = meshgrid(x, y) figura = figure() z = sqrt(4 - (u**2) + (v**2)) z[nonzero(z < 0)] = 0 #especificamos que vamos a graficar en 3D ax = figura.gca(projection='3d') ax.plot_surface(u, v, z, color='blue') title('Figura A') show() #1B figura = figure() ax = Axes3D(figura) x = linspace(-8, 4, 100) y = linspace(-8, 4, 100) x, y = np.meshgrid(x, y)
def on_click(self,event): if not event.inaxes: self.xy=[] return self.x,self.y=int(event.xdata), int(event.ydata) self.key=event.key self.xx.append([self.x]) self.yy.append([self.y]) self.xy.append([self.y,self.x]) self.lc.set_data(self.xx,self.yy) if self.key=='m': print 'masking' self.xx[-1]=self.xx[0] self.yy[-1]=self.yy[0] self.xy[-1]=self.xy[0] previously_masked = self.mymask.sum() #ind=p.nonzero(points_inside_poly(self.points,self.xy)) verts = [] codes = [] #print self.xy # these are the masked vertices for xy in self.xy: verts.append(xy) codes.append(Path.LINETO) codes[0] = Path.MOVETO codes[-1] = Path.CLOSEPOLY # need extra empty element? masked_path = Path(verts, codes) self.mymask=self.mymask.reshape(self.lx*self.ly,1) inds = [] #icnt = 0 #print self.points # these are the whole detector array for point in self.points: #ind = p.nonzero(masked_path.contains_point(point)) inds.append(masked_path.contains_point(point)) #icnt += 1 self.mymask[p.nonzero(inds)]=1 self.mymask=self.mymask.reshape(self.lx,self.ly) #print icnt print "masked out %d pixels (%d already masked)" % (self.mymask.sum()-previously_masked, n.sum(inds)+previously_masked-self.mymask.sum()) datamasked=masked_array(self.data,self.mymask+self.automask+self.anisotropic_mask) self.im.set_data(datamasked) self.xx=[] self.yy=[] self.xy=[] self.lc.set_data(self.xx,self.yy) self.lm.set_data(self.xx,self.yy) # self.im.set_clim(vmax=(2*self.data.mean())) self.im.autoscale() p.draw() self.x=0 self.y=0 if self.key=='u': print 'unmasking' self.xx[-1]=self.xx[0] self.yy[-1]=self.yy[0] self.xy[-1]=self.xy[0] previously_masked = self.mymask.sum() #ind=p.nonzero(points_inside_poly(self.points,self.xy)) verts = [] codes = [] #print self.xy # these are the masked vertices for xy in self.xy: verts.append(xy) codes.append(Path.LINETO) codes[0] = Path.MOVETO codes[-1] = Path.CLOSEPOLY # need extra empty element? masked_path = Path(verts, codes) self.mymask=self.mymask.reshape(self.lx*self.ly,1) inds = [] #print self.points # these are the whole detector array for point in self.points: inds.append(masked_path.contains_point(point)) self.mymask[p.nonzero(inds)]=0 self.mymask=self.mymask.reshape(self.lx,self.ly) print "ummasked %d pixels (%d already unmasked)" % (previously_masked-self.mymask.sum(), n.sum(inds)-previously_masked+self.mymask.sum()) datanew=masked_array(self.data,self.mymask+self.automask+self.anisotropic_mask) self.im.set_data(datanew) self.xx=[] self.yy=[] self.xy=[] self.lc.set_data(self.xx,self.yy) self.lm.set_data(self.xx,self.yy) # self.im.set_clim(vmax=(2*self.data.mean())) self.im.autoscale() p.draw() self.x=0 self.y=0 if self.key=='r': print 'unmasking all' self.mymask=0*self.mymask datanew=masked_array(self.data,self.mymask+self.automask+self.anisotropic_mask) self.im.set_data(datanew) self.xx=[] self.yy=[] self.xy=[] self.lc.set_data(self.xx,self.yy) self.lm.set_data(self.xx,self.yy) # self.im.set_clim(vmax=(2*self.data.mean())) self.im.autoscale() p.draw() self.x=0 self.y=0 if self.key=='k': print 'save and exit' self.save_mask() print 'Mask saved in file:', self.mask_file # mask_f=EdfFile.EdfFile(self.mask_file) # mask_f.WriteImage({},self.mymask,0) # mask_f.WriteImage({},self.automask,1) # mask_f.WriteImage({},self.anisotropic_mask,2) # del(mask_f) p.close() return self.mymask+self.automask if self.key=='q': print 'exit without saving' p.close() return self.old_mymask+self.old_automask
# resample zwift power onto edge CrossPlotFig = plt.figure() sc = plt.scatter(edge_power_x, zwift_power_r_r, s=5, c=base_t, \ cmap=plt.get_cmap('brg'), edgecolors='face' ) plt.colorbar(orientation='horizontal') plt.title('Infocrank Vs PowerTap P1 Over Time (sec)\n(delay removed)') plt.xlabel('PowerTap P1 (w)') plt.ylabel('Infocrank via Zwift (w)') plt.grid(b=True, which='major', axis='both') a = plt.axis() plt.axis([0, a[1], 0, a[3]]) plt.show() # # linear regression # from pylab import polyfit, average, ones, where, logical_and, nonzero ii = nonzero( logical_and( base_t>=0, \ logical_and(edge_power_x>50, \ edge_power_x<1000) )) x = edge_power_x[ii] y = zwift_power_r_r[ii] coef = polyfit(x, y, deg=1) slope = coef[0] offset = coef[1] print 'slope = %5.3f, offset = %i' % (slope, offset) y_fit = slope * x + offset color = average(edge_t[ii]) * ones(len(edge_t[ii])) plt.plot(x, y_fit, 'k-') plt.show()
I_syn = (1.0 - dt / tau_syn) * I_syn I_syn[spike_mat[:, i]] += 1 / tau / tau_syn PSP = (1.0 - dt / tau) * PSP + I_syn PSP_unit = PSP * 25 for l in range(N): synaptic_input_matrix[l * n_in:(l + 1) * n_in, i] = PSP_unit * w[:, l] V_dend = np.dot(w.T, PSP_unit) V_som = (1.0 - dt * g_L) * V_som + g_d * (V_dend - V_som) + np.dot( -w_inh, f) for k in range(N): f[k] = g(V_som[k]) f_list[:, i] = f nspk_random, tspk_random = pl.nonzero(random_mat[0:200, :] == 1) nspk1, tspk1 = pl.nonzero(pat1_mat[0:200, :] == 1) nspk2, tspk2 = pl.nonzero(pat2_mat[0:200, :] == 1) nspk3, tspk3 = pl.nonzero(pat3_mat[0:200, :] == 1) fig = plt.figure(figsize=(7, 2)) ax = fig.add_subplot(111) for i in pat1_start: plt.vlines([i], 0, n_in, "dodgerblue", linestyles='dashed', lw=1) if i + width < plot_len: plt.vlines([i + width], 0, n_in, "dodgerblue", linestyles='dashed', lw=1)
def sim(): global V,Vlin,tao_e,Rar,Rmr Rar = pl.arange(Ras[0],Ras[1],Ras[2]) Rmr = pl.arange(Rms[0],Rms[1],Rms[2]) ns.mech.setcurrent(Ie*Ies,ns.dt) li = len(Rmr) lj = len(Rar) tao_e = pl.empty((li,lj)) tao_l = pl.empty((li,lj)) tao_n = pl.empty((li,lj)) for i in range(li): for j in range(lj): #Special conditions if Rar[j] < 10.: sec.L(15000.) else: sec.L(7000.) if Rmr[i] > 5000.: ns.h.tstop = 50. else: ns.h.tstop = 20. sec.Rm(Rmr[i]) sec.Ra(Rar[j]) print Rmr[i],Rar[j] ns.sim() #Obtain voltage, steady state voltage, normalize and #get logarithmic values t = ns.t Vinf = sec.nrnV0[-1] V = 1 - pl.array(sec.nrnV0)[:-1]/Vinf Vlin = pl.log(V) print Vinf #Estimate the time constant finding the #point at witch the voltage reaches the #value 1/e nz, = pl.nonzero(V>(1/pl.e)) #The time where V ~ 1/e is the point #right after the last nz tao_e[i,j] = t[nz[-1]+1] - tstart print 'tao_e',tao_e[i,j] #Define least squares data interval and #make the pulse starting time to be zero i0 = int(t0/ns.dt) i1 = int(t1/ns.dt) t01 = t[:i1-i0] V01 = V[i0:i1] Vlin01 = Vlin[i0:i1] #Linear least squares A = pl.c_[t01,pl.ones_like(t01)] m, c = pl.lstsq(A, Vlin01.copy())[0] tao_l[i,j] = -1./m - tstart print 'tao_l',tao_l[i,j],'(',m, c, pl.exp(c),')' #Parametric function: v is the parameter vector and #x the independent varible fp = lambda p, t: p[0]*pl.exp(p[1]*t) #fp = lambda p, t: p[0]*pl.exp(p[1]*t) + p[2]*pl.exp(p[3]*t) #fp = lambda p, t: pl.exp(p[0]*t) #Error function e = lambda p, t, V: (fp(p,t)-V) #Initial parameter guess p0 = [1., -5.] #p0 = [1., -5., 1., -1.] #p0 = [-5.] #Fitting p, success = leastsq(e, p0, args=(t01,V01), maxfev=10000) tao_n[i,j] = -1./p[1] - tstart print 'tao_n',tao_n[i,j],'(',p,success,')' """