def plotSIFGI(): a = eqtools.EqdskReader(gfile='/afs/ipp-garching.mpg.de/home/i/ianf/codes/python/general/g33669.3000') a.remapLCFS() b = eqtools.AUGDDData(33669) tok = TRIPPy.Tokamak(b) cont = (pow(scipy.linspace(0,1,11),2)*(a.getFluxLCFS()-a.getFluxAxis())+a.getFluxAxis()) print(a.getFluxAxis()) print(a.getFluxLCFS()) print(cont) temp = b.getMachineCrossSectionFull() plotEq.plot(a,33669,lims=temp,contours=-1*cont) GI = genGRW(tok) r = GI.r()[0][-2:] z = GI.r()[2][-2:] print(r,z) plt.plot(r,z,color='#0E525A',linewidth=2.) SIF = genSIF(tok) r = SIF.r()[0][-2:] z = SIF.r()[2][-2:] print(r,z) plt.plot(r,z,color='#228B22',linewidth=2.) plt.subplots_adjust(left=.2,right=.95) plt.gca().set_xlim([scipy.nanmin(temp[0].astype(float)),scipy.nanmax(temp[0].astype(float))]) plt.gca().set_ylim([scipy.nanmin(temp[1].astype(float)),scipy.nanmax(temp[1].astype(float))]) plt.text(1.4,.9,'CSXR',fontsize=22,zorder=10) plt.text(2.25,.1,'GI',fontsize=22,zorder=10) limy = plt.gca().get_ylim() limx = plt.gca().get_xlim() plt.gca().text(1.0075*(limx[1]-limx[0])+limx[0],.97*(limy[1]-limy[0])+limy[0],str(33669),rotation=90,fontsize=14)
def get_hmf(dat, edat, vecs, nit=5, convergence=0.01): """ dat should have the shape Nobs,Npix edat the same thing vecs should have the shape (npix, ncomp) returns the eigen vectors and the projections vector """ ncomp = vecs.shape[1] ndat = len(dat) npix = len(dat[0]) # a step # As = np.matrix(np.zeros((ndat,ncomp))) # Gs = np.matrix(vecs) As = shared_zeros_matrix(ndat, ncomp) Gs = copy_as_shared(vecs) data_struct.dat = dat data_struct.edat = edat data_struct.Gs = Gs data_struct.As = As for i in range(nit): deltas1 = mapper(doAstep, range(ndat)) deltas2 = mapper(doGstep, range(npix)) curconv = scipy.nanmax([scipy.nanmax(deltas1), scipy.nanmax(deltas2)]) print curconv if curconv < convergence: break # orthogonalize Gs, As = orthogonalize(Gs, As) return Gs, As
def plot_map(outdict, e, savename, fig1=None, m=None): """ This function will plot the output data in a scatter plot over a map of the satellite path. Args: outdict (dict[str, obj]): Output dictionary from analyzebeacons. e (dict[str, obj]): Output dictionary from ephem_doponly. savename(:obj:`str`): Name of the file the image will be saved to. fig1(:obj:'matplotlib figure'): Figure. m (:obj:'basemap obj'): Basemap object """ t = outdict['time'] slat = e['site_latitude'] slon = e['site_longitude'] if fig1 is None: fig1 = plt.figure() plt.figure(fig1.number) latlim = [math.floor(slat - 15.), math.ceil(slat + 15.)] lonlim = [math.floor(slon - 15.), math.ceil(slon + 15.)] if m is None: m = Basemap(lat_0=slat, lon_0=slon, llcrnrlon=lonlim[0], llcrnrlat=latlim[0], urcrnrlon=lonlim[1], urcrnrlat=latlim[1]) m.drawcoastlines(color="gray") m.plot(slon, slat, "rx") scat = m.scatter(e["sublon"](t), e["sublat"](t), c=outdict['rTEC'], cmap='viridis', vmin=0, vmax=math.ceil(sp.nanmax(outdict['rTEC']))) plt.title('Map of TEC Over Satellite Path') cb = plt.colorbar(scat, label='rTEC in TECu') fig1.savefig(savename, dpi=300) #plt.draw() scat = m.scatter(e["sublon"](t), e["sublat"](t), c=outdict['rTEC_amp'], cmap='viridis', vmin=0, vmax=math.ceil(sp.nanmax(outdict['rTEC_amp']))) plt.title('Map of TEC_Amp Over Satellite Path') cb.set_clim(vmin=0, vmax=math.ceil(sp.nanmax(outdict['rTEC_amp']))) cb.draw_all() #plt.tight_layout() figpath, name = os.path.split(savename) savename = os.path.join(figpath, 'amp' + name) fig1.savefig(savename, dpi=300) plt.close(fig1)
def sky(self): if self.skyid is None: skymax = scipy.nanmax(self.varspec) scimax = scipy.nanmax(self.current) scimin = scipy.nanmin(self.current) spec = self.varspec * scimax / skymax - abs(scimin) self.skyid = len(self.axes.lines) pylab.plot(self.wave, spec, color='green') else: self.axes.lines.remove(self.axes.lines[self.skyid]) self.skyid = None pylab.draw()
def CenteredLagProduct(rawbeams, numtype=sp.complex128, pulse=sp.ones(14), lagtype='centered'): """ This function will create a centered lag product for each range using the raw IQ given to it. It will form each lag for each pulse and then integrate all of the pulses. Inputs: rawbeams - This is a NpxNs complex numpy array where Ns is number of samples per pulse and Npu is number of pulses N - The number of lags that will be created, default is 14. numtype - The type of numbers used to create the data. Default is sp.complex128 lagtype - Can be centered forward or backward. Output: acf_cent - This is a NrxNl complex numpy array where Nr is number of range gate and Nl is number of lags. """ N = len(pulse) # It will be assumed the data will be pulses vs rangne rawbeams = rawbeams.transpose() (Nr, Np) = rawbeams.shape # Make masks for each piece of data if lagtype == 'forward': arback = sp.zeros(N, dtype=int) arfor = sp.arange(N, dtype=int) elif lagtype == 'backward': arback = sp.arange(N, dtype=int) arfor = sp.zeros(N, dtype=int) else: # arex = sp.arange(0,N/2.0,0.5); arback = -sp.floor(sp.arange(0, N / 2.0, 0.5)).astype(int) arfor = sp.ceil(sp.arange(0, N / 2.0, 0.5)).astype(int) # figure out how much range space will be kept ap = sp.nanmax(abs(arback)) ep = Nr - sp.nanmax(arfor) rng_ar_all = sp.arange(ap, ep) # wearr = (1./(N-sp.tile((arfor-arback)[:,sp.newaxis],(1,Np)))).astype(numtype) #acf_cent = sp.zeros((ep-ap,N))*(1+1j) acf_cent = sp.zeros((ep - ap, N), dtype=numtype) for irng, curange in enumerate(rng_ar_all): rng_ar1 = int(curange) + arback rng_ar2 = int(curange) + arfor # get all of the acfs across pulses # sum along the pulses acf_tmp = sp.conj(rawbeams[rng_ar1, :]) * rawbeams[rng_ar2, :] #*wearr acf_ave = sp.sum(acf_tmp, 1) acf_cent[irng, :] = acf_ave # might need to transpose this return acf_cent
def get_hmf_smooth(dat, edat, vecs, nit=5, eps=0.01, convergence=0.01): """ dat should have the shape Nobs,Npix edat the same thing vecs should have the shape (npix, ncomp) returns the eigen vectors and the projections vector """ ncomp = vecs.shape[1] ndat = len(dat) npix = len(dat[0]) As = shared_zeros_matrix(ndat, ncomp) Gs = copy_as_shared(vecs) Gsold = shared_zeros_matrix(Gs.shape[0], Gs.shape[1]) #arrays used for processing data_struct.Gs = Gs data_struct.Gsold = Gsold data_struct.As = As # input data data_struct.dat = dat data_struct.edat = edat # parameters data_struct.eps = eps data_struct.ncomp = ncomp data_struct.npix = npix for i in range(nit): # a step deltas1 = mapper(doAstep, range(ndat)) data_struct.Gsold, data_struct.Gs = data_struct.Gs, data_struct.Gsold # swapping variables, because we going to update Gs while still using # original Gs from A step # g step deltas2 = mapper(doGstepSmooth, range(npix)) curconv = scipy.nanmax([scipy.nanmax(deltas1), scipy.nanmax(deltas2)]) print curconv if curconv < convergence: break Gs, As = orthogonalize(Gs, As) return Gs, As
def get_hmf_smooth(dat, edat, vecs, nit=5, eps=0.01, convergence=0.01): """ dat should have the shape Nobs,Npix edat the same thing vecs should have the shape (npix, ncomp) returns the eigen vectors and the projections vector """ ncomp = vecs.shape[1] ndat = len(dat) npix = len(dat[0]) As = shared_zeros_matrix(ndat, ncomp) Gs = copy_as_shared(vecs) Gsold = shared_zeros_matrix(Gs.shape[0], Gs.shape[1]) # arrays used for processing data_struct.Gs = Gs data_struct.Gsold = Gsold data_struct.As = As # input data data_struct.dat = dat data_struct.edat = edat # parameters data_struct.eps = eps data_struct.ncomp = ncomp data_struct.npix = npix for i in range(nit): # a step deltas1 = mapper(doAstep, range(ndat)) data_struct.Gsold, data_struct.Gs = data_struct.Gs, data_struct.Gsold # swapping variables, because we going to update Gs while still using # original Gs from A step # g step deltas2 = mapper(doGstepSmooth, range(npix)) curconv = scipy.nanmax([scipy.nanmax(deltas1), scipy.nanmax(deltas2)]) print curconv if curconv < convergence: break Gs, As = orthogonalize(Gs, As) return Gs, As
def CenteredLagProduct(rawbeams, numtype=sp.complex128, pulse=sp.ones(14), lagtype='centered'): """ This function will create a centered lag product for each range using the raw IQ given to it. It will form each lag for each pulse and then integrate all of the pulses. Inputs: rawbeams - This is a NpxNs complex numpy array where Ns is number of samples per pulse and Npu is number of pulses N - The number of lags that will be created, default is 14. numtype - The type of numbers used to create the data. Default is sp.complex128 lagtype - Can be centered forward or backward. Output: acf_cent - This is a NrxNl complex numpy array where Nr is number of range gate and Nl is number of lags. """ n_pulse = len(pulse) # It will be assumed the data will be pulses vs range rawbeams = rawbeams.transpose() n_range = rawbeams.shape[0] # Make masks for each piece of data if lagtype == 'forward': arback = sp.zeros(n_pulse, dtype=int) arfor = sp.arange(n_pulse, dtype=int) elif lagtype == 'backward': arback = sp.arange(n_pulse, dtype=int) arfor = sp.zeros(n_pulse, dtype=int) else: # arex = sp.arange(0,N/2.0,0.5); arback = -sp.floor(sp.arange(0, n_pulse/2.0, 0.5)).astype(int) arfor = sp.ceil(sp.arange(0, n_pulse/2.0, 0.5)).astype(int) # figure out how much range space will be kept a_p = sp.nanmax(abs(arback)) e_p = n_range - sp.nanmax(arfor) rng_ar_all = sp.arange(a_p, e_p) # wearr = (1./(N-sp.tile((arfor-arback)[:,sp.newaxis],(1,Np)))).astype(numtype) #acf_cent = sp.zeros((ep-ap,N))*(1+1j) acf_cent = sp.zeros((e_p-a_p, n_pulse), dtype=numtype) for irng, curange in enumerate(rng_ar_all): rng_ar1 = int(curange) + arback rng_ar2 = int(curange) + arfor # get all of the acfs across pulses # sum along the pulses acf_tmp = sp.conj(rawbeams[rng_ar1, :])*rawbeams[rng_ar2, :]#*wearr acf_ave = sp.sum(acf_tmp, 1) acf_cent[irng, :] = acf_ave# might need to transpose this return acf_cent
def print_verbose_message(self): """Method to print training statistics if Verbose is TRUE""" # Memory usage (does not work in Windows) # print('Peak memory usage: %.2f MB' % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / infer_platform() )) # Variance explained r2 = s.asarray(self.calculate_variance_explained(total=True)).mean(axis=0) r2[r2<0] = 0. print("- Variance explained: " + " ".join([ "View %s: %.2f%%" % (m,100*r2[m]) for m in range(self.dim["M"])])) # Sparsity levels of the weights W = self.nodes["W"].getExpectation() foo = [s.mean(s.absolute(W[m])<1e-3) for m in range(self.dim["M"])] print("- Fraction of zero weights: " + " ".join([ "View %s: %.0f%%" % (m,100*foo[m]) for m in range(self.dim["M"])])) # Correlation between factors Z = self.nodes["Z"].getExpectation() Z += s.random.normal(s.zeros(Z.shape),1e-10) r = s.absolute(corr(Z.T,Z.T)); s.fill_diagonal(r,0) print("- Maximum correlation between factors: %.2f" % (s.nanmax(r))) # Factor norm bar = s.mean(s.square(Z),axis=0) print("- Factor norms: " + " ".join([ "%.2f" % bar[k] for k in range(Z.shape[1])])) # Tau tau = self.nodes["Tau"].getExpectation() print("- Tau per view (average): " + " ".join([ "View %s: %.2f" % (m,tau[m].mean()) for m in range(self.dim["M"])])) print("\n")
def max_filter_bord(im, size=3): """The function performs a local max filter on a flat image. Border's pixels are processed. Args: im: the image to process size: the size in pixels of the local square window. Default value is 3. Returns: out: the filtered image """ ## Get the size of the image [nl, nc, d] = im.shape ## Get the size of the moving window s = (size - 1) / 2 ## Initialization of the output out = sp.empty((nl, nc, d), dtype=im.dtype.name) temp = sp.empty((nl + 2 * s, nc + 2 * s, d), dtype=im.dtype.name) # A temporary file is created temp[0:s, :, :] = sp.NaN temp[:, 0:s, :] = sp.NaN temp[-s:, :, :] = sp.NaN temp[:, -s:, :] = sp.NaN temp[s : s + nl, s:nc, :] = im ## Apply the max filter for i in range(s, nl + s): # Shift the origin to remove border effect for j in range(s, nc + s): for k in range(d): out[i - s, j - s, k] = sp.nanmax(temp[i - s : i + 1 + s, j - s : j + s + 1, k]) return out.astype(im.dtype.name)
def doGstepSmooth(j): dat, edat = data_struct.dat, data_struct.edat Gsold, Gs, As = data_struct.Gsold, data_struct.Gs, data_struct.As npix = data_struct.npix ncomp = data_struct.ncomp eps = data_struct.eps if j > 0 and j < (npix - 1): mult = 2 else: mult = 1 # Covj = np.matrix(np.diag(1. / edat[:, j] ** 2)) # Aj = As.T * Covj * As + mult * eps * np.identity(ncomp) # del Covj # rewrite of less performant code Aj = As.T * np.matrix((1.0 / edat[:, j] ** 2)[:, None] * np.asarray(As), copy=False) Aj[np.arange(ncomp), np.arange(ncomp)] = np.asarray( Aj[np.arange(ncomp), np.arange(ncomp)] + (mult * eps) * np.ones(ncomp) ).flatten() if j > 0 and j < (npix - 1): Fj = As.T * np.matrix(dat[:, j] / (edat[:, j]) ** 2, copy=False).T + eps * (Gsold[j - 1, :] + Gsold[j + 1, :]).T elif j == 0: Fj = As.T * np.matrix(dat[:, j] / (edat[:, j]) ** 2, copy=False).T + eps * Gsold[1, :].T elif j == npix - 1: Fj = As.T * np.matrix(dat[:, j] / (edat[:, j]) ** 2, copy=False).T + eps * Gsold[npix - 2, :].T Gj = scipy.linalg.solve(Aj, Fj, sym_pos=True) newGj = Gj.flatten() oldGj = Gsold[j, :] delta = scipy.nanmax(np.abs((newGj - oldGj) / (np.abs(oldGj).max() + 1e-100))) Gs[j, :] = newGj return delta
def initTau(self, pa, pb, qa, qb, qE): # Method to initialise the precision of the noise # Inputs: # pa (float): 'a' parameter of the prior distribution # pb (float): 'b' parameter of the prior distribution # qb (float): initialisation of the 'b' parameter of the variational distribution # qE (float): initial expectation of the variational distribution tau_list = [None] * self.M for m in range(self.M): if self.lik[m] == "poisson": tmp = 0.25 + 0.17 * s.nanmax(self.data[m], axis=0) tau_list[m] = Constant_Node(dim=((self.N, self.D[m])), value=s.repeat( tmp[None, :], self.N, 0)) elif self.lik[m] == "bernoulli": # seeger # tau_list[m] = Constant_Node(dim=((self.N,self.D[m])), value=0.25) # Jaakkola tau_list[m] = Tau_Jaakkola(dim=((self.N, self.D[m])), value=1.) elif self.lik[m] == "binomial": print("Not implemented") # tmp = 0.25*s.amax(self.data["tot"][m],axis=0) # tau_list[m] = Constant_Node(dim=(self.D[m],), value=tmp) elif self.lik[m] == "gaussian": tau_list[m] = Tau_Node(dim=(self.D[m], ), pa=pa[m], pb=pb[m], qa=qa[m], qb=qb[m], qE=qE[m]) self.Tau = Multiview_Mixed_Node(self.M, *tau_list) self.nodes["Tau"] = self.Tau
def maximum_out_degree_fraction(cover, weights=None): ''' Out Degree Fraction (ODF) of a node in a cluster is the ratio between its number of external (boundary) edges and its internal edges. Maximum ODF returns the maximum fraction for the cluster. ''' odf = out_degree_fraction(cover, weights=weights) return [ nanmax(ratios) for ratios in odf ]
def veldist_1d_vrconvolve(plotfilename,phi=_DEFAULTPHI,R=_DEFAULTR, ngrid=201,saveDir='../bar/1dvar/'): """ NAME: veldist_1d_vrconvolve PURPOSE: make a plot showing the influence of the los velocity uncertainties INPUT: plotfilename - filename for figure phi - Galactocentric azimuth R - Galactocentric radius ngrid - number of grid-points to calculate the los velocity distribution on saveDir - save pickles here OUTPUT: Figure in plotfilename HISTORY: 2010-09-11 - Written - Bovy (NYU) """ convolves= [0.02,0.04,0.08]#0, 5, 10, 20 km/s vloslinspace= (-.9,.9,ngrid) vloss= sc.linspace(*vloslinspace) vlosds= [] thissavefilename= os.path.join(saveDir,'convolve_')+'%.1f.sav' % 0. print "Restoring los-velocity distribution at distance uncertainties %.1f" % 0. savefile= open(thissavefilename,'r') vlosd= pickle.load(savefile) savefile.close() vlosds.append(vlosd) basesavefilename= os.path.join(saveDir,'vrconvolve_') for distsig in convolves: thissavefilename= os.path.join(saveDir,'convolve_')+'%.1f.sav' % 0. print "Restoring los-velocity distribution at distance uncertainties %.1f" % 0. savefile= open(thissavefilename,'r') vlosd= pickle.load(savefile) savefile.close() #Create Gaussian gauss= sc.exp(-0.5*vloss**2./distsig**2.) #gauss= gauss/sc.sum(gauss)/(vloss[1]-vloss[0]) vlosd= signal.convolve(vlosd,gauss,mode='same') vlosd= vlosd/sc.sum(vlosd)/(vloss[1]-vloss[0]) vlosds.append(vlosd) #Plot plot.bovy_print() plot.bovy_plot(vloss,vlosds[0],'k-',zorder=3, xrange=[vloslinspace[0],vloslinspace[1]], yrange=[0.,sc.nanmax(sc.array(vlosds).flatten())*1.1], xlabel=r'$v_{\mathrm{los}} / v_0$') plot.bovy_plot(vloss,vlosds[1],ls='-',color='0.75', overplot=True,zorder=2,lw=2.) plot.bovy_plot(vloss,vlosds[2],ls='-',color='0.6', overplot=True,zorder=2,lw=2.) plot.bovy_plot(vloss,vlosds[3],ls='-',color='0.45', overplot=True,zorder=2,lw=2.) kms= r' \mathrm{km\ s}^{-1}$' plot.bovy_text(r'$\mathrm{line-of-sight\ velocity\ uncertainties}$',title=True) plot.bovy_text(0.4,.65,r'$\sigma_v = 0\ '+kms+'\n'+r'$\sigma_v = 5\ '+kms+'\n'+r'$\sigma_v = 10\ '+kms+'\n'+r'$\sigma_v = 20\ '+kms) plot.bovy_end_print(plotfilename)
def CalcStats(samples): return dict( zip(['mean', 'max', 'min', 'sdeviation'], [ float(sp.mean(samples)), float(sp.nanmax(samples)), float(sp.nanmin(samples)), float(sp.std(samples)) ]))
def defaultValueStoppingCriterion(nIter, currentVArray, newVArray, criterion=0.001): diff = newVArray - currentVArray pct = diff / currentVArray a = abs(pct) # when we allow zero utility, sometimes the pct will have NaNs maxdiff = scipy.nanmax(a) if (scipy.isnan(maxdiff)): assert(False) return ((maxdiff < criterion), maxdiff)
def to_discrete_dataset(self, dfactor=36, dataset=None): #get maximum and minimum values print "getting feature summary" f_summary = self.get_feature_summary() print "min/max val" f_min_max = {} for name in f_summary.keys(): #if(nan in f_summary[name]): # print "f_summary[name]", fsummary[name] #try: # f_summary[name].remove(nan) #except: # print "not removed" #print f_summary, name #print list(f_summary[name]) min_val = nanmin(list(f_summary[name])) max_val = nanmax(list(f_summary[name])) #if(isnan(min_val) or isnan(max_val)): # print "name:", name, min_val, max_val #else: # print "name:", name, min_val, max_val # print "nan in summary:", nan in f_summary[name] # print "set:", f_summary[name] # print "dataset.py: error neither should be nan" # exit(0) f_min_max[name] = [min_val, max_val] #print name, [min_val, max_val] #raw_input() # print "computed keys" # for key in f_min_max.keys(): # if "3d" in key and "w_pick" in key: # print key, f_min_max[key] #convert other datset if applicable if (dataset == None): observations = self.observations else: observations = dataset obs_discrete = [] print "converting observations" for o in observations: dobs = o.to_discrete_observation(f_min_max, dfactor=dfactor) dobs.annotation = o.annotation obs_discrete.append(dobs) print "constructing dataset" return DiscreteDataset( obs_discrete, dfactor, f_min_max=f_min_max, feature_extractor_cls=self.feature_extractor_cls)
def compare_arrays(a, b, inc_time=False): diffAbs = (sp.absolute(sp.subtract(a, b))) diffMax = sp.nanmax(diffAbs) diffEps = (diffMax / sys.float_info.epsilon ) * 2 # we have to mult by two as python epsilon is not correct if inc_time: return sp.column_stack((a[:, 0], diffAbs)), diffMax, diffEps else: return diffAbs, diffMax, diffEps
def initTau(self, pa=1e-3, pb=1e-3, qa=1., qb=1., qE=None): """Method to initialise the precision of the noise PARAMETERS ---------- pa: float 'a' parameter of the prior distribution pb :float 'b' parameter of the prior distribution qb: float initialisation of the 'b' parameter of the variational distribution qE: float initial expectation of the variational distribution """ tau_list = [None] * self.M for m in range(self.M): # Poisson noise model for count data if self.lik[m] == "poisson": tmp = 0.25 + 0.17 * s.nanmax(self.data[m], axis=0) tmp = s.repeat(tmp[None, :], self.N, axis=0) tau_list[m] = Tau_Seeger(dim=(self.N, self.D[m]), value=tmp) # Bernoulli noise model for binary data elif self.lik[m] == "bernoulli": # tau_list[m] = Constant_Node(dim=(self.D[m],), value=s.ones(self.D[m])*0.25) # tau_list[m] = Tau_Jaakkola(dim=(self.D[m],), value=0.25) tau_list[m] = Tau_Jaakkola(dim=((self.N, self.D[m])), value=1.) elif self.lik[m] == "zero_inflated": # contains parameters to initialise both normal and jaakola tau tau_list[m] = Zero_Inflated_Tau_Jaakkola(dim=((self.G, self.D[m])), value=1., pa=pa, pb=pb, qa=qa, qb=qb, groups=self.groups_ix, qE=qE) # Gaussian noise model for continuous data elif self.lik[m] == "gaussian": tau_list[m] = TauD_Node(dim=(self.G, self.D[m]), pa=pa, pb=pb, qa=qa, qb=qb, groups=self.groups_ix, qE=qE) self.nodes["Tau"] = Multiview_Mixed_Node(self.M, *tau_list)
def get_cb_ticks(values): min_tick = sp.nanmin(values) max_tick = sp.nanmax(values) med_tick = min_tick + (max_tick - min_tick) / 2.0 if max_tick > 1.0: min_tick = sp.ceil(min_tick) max_tick = sp.floor(max_tick) med_tick = sp.around(med_tick) else: min_tick = sp.ceil(min_tick * 100.0) / 100.0 max_tick = sp.floor(max_tick * 100.0) / 100.0 med_tick = sp.around(med_tick, 2) return [min_tick, med_tick, max_tick]
def CenteredLagProduct(rawbeams,numtype=sp.complex128,pulse =sp.ones(14)): """ This function will create a centered lag product for each range using the raw IQ given to it. It will form each lag for each pulse and then integrate all of the pulses. Inputs: rawbeams - This is a NpxNs complex numpy array where Ns is number of samples per pulse and Npu is number of pulses N - The number of lags that will be created, default is 14. numtype - The type of numbers used to create the data. Default is sp.complex128 Output: acf_cent - This is a NrxNl complex numpy array where Nr is number of range gate and Nl is number of lags. """ N=len(pulse) # It will be assumed the data will be pulses vs rangne rawbeams = rawbeams.transpose() (Nr,Np) = rawbeams.shape # Make masks for each piece of data arex = sp.arange(0,N/2.0,0.5); arback = sp.array([-sp.int_(sp.floor(k)) for k in arex]); arfor = sp.array([sp.int_(sp.ceil(k)) for k in arex]) ; # figure out how much range space will be kept ap = sp.nanmax(abs(arback)); ep = Nr- sp.nanmax(arfor); rng_ar_all = sp.arange(ap,ep); # wearr = (1./(N-sp.tile((arfor-arback)[:,sp.newaxis],(1,Np)))).astype(numtype) #acf_cent = sp.zeros((ep-ap,N))*(1+1j) acf_cent = sp.zeros((ep-ap,N),dtype=numtype) for irng in sp.arange(len(rng_ar_all)): rng_ar1 =sp.int_(rng_ar_all[irng]) + arback rng_ar2 = sp.int_(rng_ar_all[irng]) + arfor # get all of the acfs across pulses # sum along the pulses acf_tmp = sp.conj(rawbeams[rng_ar1,:])*rawbeams[rng_ar2,:]#*wearr acf_ave = sp.sum(acf_tmp,1) acf_cent[irng,:] = acf_ave# might need to transpose this return acf_cent
def processDebug1(self, rawImagePath): logger.info("using standard Andor 0 process function") rawArray = self.read(rawImagePath) if not self.optionsDict["process?"]: return rawArray [atomsArray, lightArray] = self.fastKineticsCrop(rawArray, 2) if self.optionsDict["darkSubtraction"]: darkArray = self.loadDarkImage(self.darkImagePath) rawArray -= darkArray rawArray.clip(1) logger.info("atomsArray = %s" % atomsArray) logger.info("lightArray = %s" % lightArray) logger.info("atomsArray/lightArray = %s" % (atomsArray / lightArray)) logger.info("min , max atoms = %s, %s" % (scipy.nanmin(atomsArray), scipy.nanmax(atomsArray))) logger.info("min , max light = %s, %s" % (scipy.nanmin(lightArray), scipy.nanmax(lightArray))) logger.info("min , max atoms/light = %s, %s" % (scipy.nanmin( atomsArray / lightArray), scipy.nanmax(atomsArray / lightArray))) corrected = atomsArray / lightArray if self.optionsDict["rotate?"]: rotated = self.rotate(corrected, self.optionsDict["rotationAngle"]) return rotated
def _statisticsButton_fired(self): from scipy.stats import pearsonr xs, ys = self.dataSets[self.selectedDataSet] mean = scipy.mean(ys) median = scipy.median(ys) std = scipy.std(ys) minimum = scipy.nanmin(ys) maximum = scipy.nanmax(ys) peakToPeak = maximum - minimum pearsonCorrelation = pearsonr(xs, ys) resultString = "mean=%G , median=%G stdev =%G\nmin=%G,max=%G, pk-pk=%G\nPearson Correlation=(%G,%G)\n(stdev/mean)=%G" % ( mean, median, std, minimum, maximum, peakToPeak, pearsonCorrelation[0], pearsonCorrelation[1], std / mean) self.statisticsString = resultString
def print_verbose_message(self, i): """Method to print training statistics if Verbose is TRUE""" # Memory usage (does not work in Windows) # print('Peak memory usage: %.2f MB' % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / infer_platform() )) # Variance explained r2 = s.asarray( self.calculate_variance_explained(total=True)).mean(axis=0) r2[r2 < 0] = 0. print("- Variance explained: " + " ".join([ "View %s: %.2f%%" % (m, 100 * r2[m]) for m in range(self.dim["M"]) ])) # Sparsity levels of the weights W = self.nodes["W"].getExpectation() foo = [s.mean(s.absolute(W[m]) < 1e-3) for m in range(self.dim["M"])] print("- Fraction of zero weights: " + " ".join([ "View %s: %.0f%%" % (m, 100 * foo[m]) for m in range(self.dim["M"]) ])) # Correlation between factors Z = self.nodes["Z"].getExpectation() Z += s.random.normal(s.zeros(Z.shape), 1e-10) r = s.absolute(corr(Z.T, Z.T)) s.fill_diagonal(r, 0) print("- Maximum correlation between factors: %.2f" % (s.nanmax(r))) # Factor norm bar = s.mean(s.square(Z), axis=0) print("- Factor norms: " + " ".join(["%.2f" % bar[k] for k in range(Z.shape[1])])) # Tau tau = self.nodes["Tau"].getExpectation() print("- Tau per view (average): " + " ".join([ "View %s: %.2f" % (m, tau[m].mean()) for m in range(self.dim["M"]) ])) #Sigma: if 'Sigma' in self.nodes.keys(): sigma = self.nodes["Sigma"] if i >= sigma.start_opt and i % sigma.opt_freq == 0: print('Sigma node has been optimised:\n- Lengthscales = %s \n- Scale = %s' % \ (np.array2string(sigma.get_ls(), precision=2, separator=", "), np.array2string(1-sigma.get_zeta(), precision=2, separator=", "))) print("\n")
def getImageData(self, imageFile): logger.debug("pulling image data") # The xs and ys used for the image plot range need to be the # edges of the cells. self.imageFile = imageFile self.xs = scipy.linspace(0.0, self.pixelsX - 1, self.pixelsX) self.ys = scipy.linspace(0.0, self.pixelsY - 1, self.pixelsY) if not os.path.exists(imageFile): #if no file is define the image is flat 0s of camera size logger.error("image file not found. filling with zeros") self.zs = scipy.zeros((self.pixelsX, self.pixelsY)) print self.zs self.minZ = 0.0 self.maxZ = 1.0 self.model_changed = True else: try: self.rawImage = scipy.misc.imread(imageFile) self.zs = (self.rawImage - self.offset) / self.scale if self.ODCorrectionBool: logger.info("Correcting for OD saturation") self.zs = scipy.log( (1.0 - scipy.exp(-self.ODSaturationValue)) / (scipy.exp(-self.zs) - scipy.exp(-self.ODSaturationValue))) #we should account for the fact if ODSaturation value is wrong or there is noise we can get complex numbers! self.zs[scipy.imag(self.zs) > 0] = scipy.nan self.zs = self.zs.astype(float) self.minZ = scipy.nanmin(self.zs) self.maxZ = scipy.nanmax(self.zs) self.model_changed = True for fit in self.fitList: fit.xs = self.xs fit.ys = self.ys fit.zs = self.zs except Exception as e: logger.error("error in setting data %s" % e.message) logger.debug( "Sometimes we get an error unsupported operand type(s) for -: 'instance' and 'float'. " ) logger.debug( "checking for what could cause this . Tell Tim if you see this error message!!!!" ) logger.debug("type(self.rawImage) -> %s" % type(self.rawImage)) logger.debug("type(self.offset) -> %s" % type(self.offset))
def doAstep(i): # we use the fact that dat,edat aren't changed on the way, # so they shouldn't be copied to a different thread dat, edat = data_struct.dat, data_struct.edat Gs, As = data_struct.Gs, data_struct.As Fi = np.matrix(dat[i] / edat[i] ** 2, copy=False) * Gs # Covi = np.matrix(np.diag(1. / edat[i] ** 2), copy=False) # Gi = Gs.T * Covi * Gs # del Covi Gi = Gs.T * np.matrix((1.0 / edat[i] ** 2)[:, None] * np.asarray(Gs), copy=False) Ai = scipy.linalg.solve(Gi, Fi.T, sym_pos=True) newAi = Ai.flatten() oldAi = As[i, :] delta = scipy.nanmax(np.abs((newAi - oldAi) / (np.abs(oldAi).max() + 1e-100))) As[i, :] = newAi return delta
def metrique_pheno_derivative(ndvi=sp.empty): """ This method use the second derivative of the NDVI to identifie the dates of beginning of season, end of season and more. Parameters: ---------- ndvi : correspond à l'evolution DU NDVI pour un pixel sur une année """ try: ndviMin = ndvi.min() #valeur minimale ndviMax = ndvi.max() #valeur maximale #ndviMean=ndvi.mean() #valeur moyenne indMin = int(sp.median(sp.where(ndvi == ndviMin))) #indice du minimum indMax = int(sp.median(sp.where(ndvi == ndviMax))) #indice du max d1 = sp.convolve(ndvi, [1, -1], 'same') #first derivative approximation d2 = sp.convolve(ndvi, [1, -2, 1], 'same') #second derivative approximation k1 = d1[:-1] * d1[1:] #to find inflection point k2 = d2[:-1] * d2[1:] #to find inflection point ind0d1 = (sp.where(k1[:indMax] < 0))[0][-1] ind0d2 = (sp.where(k2[:indMax] < 0))[0][-1] sos = ind0d2 sos = sp.nanmax([ind0d1, ind0d2]) ind0d22 = (sp.where(k2[indMax + 1:] < 0))[0][0] eos = ind0d22 + indMax los = eos - sos out = [sos, eos + 1, los, indMin + 1, indMax + 1, ndviMin, ndviMax] # +1 parceque l'indice commence à 0 except: out = [-1, -1, -1, -1, -1, -1, -1] return out
def doAstep(i): # we use the fact that dat,edat aren't changed on the way, # so they shouldn't be copied to a different thread dat, edat = data_struct.dat, data_struct.edat Gs, As = data_struct.Gs, data_struct.As Fi = np.matrix(dat[i] / edat[i]**2, copy=False) * Gs # Covi = np.matrix(np.diag(1. / edat[i] ** 2), copy=False) # Gi = Gs.T * Covi * Gs # del Covi Gi = Gs.T * np.matrix( (1. / edat[i]**2)[:, None] * np.asarray(Gs), copy=False) Ai = scipy.linalg.solve(Gi, Fi.T, sym_pos=True) newAi = Ai.flatten() oldAi = As[i, :] delta = scipy.nanmax( np.abs((newAi - oldAi) / (np.abs(oldAi).max() + 1e-100))) As[i, :] = newAi return delta
def doGstep(j): dat, edat = data_struct.dat, data_struct.edat Gs, As = data_struct.Gs, data_struct.As # Covj = np.matrix(np.diag(1. / edat[:, j] ** 2), copy=False) # Aj = As.T * Covj * As # del Covj # the rewrite uses the fact that # diagonal matrix times matrix can be rewritten as # np.matrix(xs[:,None]*np.asarray(Gs)) == # np.matrix(np.diag(xs))*Gs Aj = As.T * np.matrix((1.0 / edat[:, j] ** 2)[:, None] * np.asarray(As), copy=False) Fj = As.T * np.matrix((dat[:, j] / (edat[:, j]) ** 2), copy=False).T Gj = scipy.linalg.solve(Aj, Fj, sym_pos=True) newGj = Gj.flatten() oldGj = Gs[j, :] delta = scipy.nanmax(np.abs((newGj - oldGj) / (np.abs(oldGj).max() + 1e-100))) Gs[j, :] = newGj return delta
def get_ls_ls_market_price_dispersion(ls_ls_market_ids, master_price, series): # if numpy.version.version = '1.8' or above => switch from scipy to numpy # checks nb of prices (non nan) per period (must be 2 prices at least) ls_ls_market_price_dispersion = [] for ls_market_ids in ls_ls_market_ids: list_market_prices = [master_price[series][master_price['ids'].index(indiv_id)] for indiv_id in ls_market_ids] arr_market_prices = np.array(list_market_prices, dtype = np.float32) arr_nb_market_prices = (~np.isnan(arr_market_prices)).sum(0) arr_bool_enough_market_prices = np.where(arr_nb_market_prices > 1, 1, np.nan) arr_market_prices = arr_bool_enough_market_prices * arr_market_prices range_price_array = scipy.nanmax(arr_market_prices, 0) - scipy.nanmin(arr_market_prices, axis = 0) std_price_array = scipy.stats.nanstd(arr_market_prices, 0) coeff_var_price_array = scipy.stats.nanstd(arr_market_prices, 0) / scipy.stats.nanmean(arr_market_prices, 0) gain_from_search_array = scipy.stats.nanmean(arr_market_prices, 0) - scipy.nanmin(arr_market_prices, axis = 0) list_list_market_price_dispersion.append(( ls_market_ids, len(ls_market_ids), range_price_array, std_price_array, coeff_var_price_array, gain_from_search_array )) return list_list_market_price_dispersion
def metrique_pheno_derivative(ndvi=sp.empty): """ This method use the second derivative of the NDVI to identifie the dates of beginning of season, end of season and more. Parameters: ---------- ndvi : correspond à l'evolution DU NDVI pour un pixel sur une année """ try: ndviMin=ndvi.min() #valeur minimale ndviMax=ndvi.max() #valeur maximale #ndviMean=ndvi.mean() #valeur moyenne indMin=int(sp.median(sp.where(ndvi==ndviMin))) #indice du minimum indMax=int(sp.median(sp.where(ndvi==ndviMax))) #indice du max d1=sp.convolve(ndvi, [1, -1],'same') #first derivative approximation d2=sp.convolve(ndvi, [1, -2, 1],'same') #second derivative approximation k1=d1[:-1]*d1[1:] #to find inflection point k2=d2[:-1]*d2[1:] #to find inflection point ind0d1=(sp.where(k1[:indMax]<0))[0][-1] ind0d2=(sp.where(k2[:indMax]<0))[0][-1] sos=ind0d2 sos=sp.nanmax([ind0d1,ind0d2]) ind0d22=(sp.where(k2[indMax+1:]<0))[0][0] eos=ind0d22+indMax los=eos-sos out=[sos,eos+1,los,indMin+1,indMax+1,ndviMin,ndviMax] # +1 parceque l'indice commence à 0 except: out=[-1,-1,-1,-1,-1,-1,-1] return out
def doGstepSmooth(j): dat, edat = data_struct.dat, data_struct.edat Gsold, Gs, As = data_struct.Gsold, data_struct.Gs, data_struct.As npix = data_struct.npix ncomp = data_struct.ncomp eps = data_struct.eps if j > 0 and j < (npix - 1): mult = 2 else: mult = 1 # Covj = np.matrix(np.diag(1. / edat[:, j] ** 2)) # Aj = As.T * Covj * As + mult * eps * np.identity(ncomp) # del Covj # rewrite of less performant code Aj = As.T * np.matrix( (1. / edat[:, j]**2)[:, None] * np.asarray(As), copy=False) Aj[np.arange(ncomp), np.arange(ncomp)] = np.asarray(Aj[np.arange(ncomp), np.arange(ncomp)] + (mult * eps) * np.ones(ncomp)).flatten() if j > 0 and j < (npix - 1): Fj = As.T * np.matrix(dat[:,j] / (edat[:,j]) ** 2, copy=False).T + \ eps * (Gsold[j - 1, :] + Gsold[j + 1, :]).T elif j == 0: Fj = As.T * np.matrix(dat[:,j] / (edat[:,j]) ** 2, copy=False).T + \ eps * Gsold[1, :].T elif j == npix - 1: Fj = As.T * np.matrix(dat[:,j] / (edat[:,j]) ** 2, copy=False).T + \ eps * Gsold[npix - 2, :].T Gj = scipy.linalg.solve(Aj, Fj, sym_pos=True) newGj = Gj.flatten() oldGj = Gsold[j, :] delta = scipy.nanmax( np.abs((newGj - oldGj) / (np.abs(oldGj).max() + 1e-100))) Gs[j, :] = newGj return delta
def doGstep(j): dat, edat = data_struct.dat, data_struct.edat Gs, As = data_struct.Gs, data_struct.As # Covj = np.matrix(np.diag(1. / edat[:, j] ** 2), copy=False) # Aj = As.T * Covj * As # del Covj # the rewrite uses the fact that # diagonal matrix times matrix can be rewritten as # np.matrix(xs[:,None]*np.asarray(Gs)) == # np.matrix(np.diag(xs))*Gs Aj = As.T * np.matrix( (1. / edat[:, j]**2)[:, None] * np.asarray(As), copy=False) Fj = As.T * np.matrix((dat[:, j] / (edat[:, j])**2), copy=False).T Gj = scipy.linalg.solve(Aj, Fj, sym_pos=True) newGj = Gj.flatten() oldGj = Gs[j, :] delta = scipy.nanmax( np.abs((newGj - oldGj) / (np.abs(oldGj).max() + 1e-100))) Gs[j, :] = newGj return delta
def plotbeamparametersv2(times, configfile, maindir, fitdir='Fitted', params=['Ne'], filetemplate='params', suptitle='Parameter Comparison', werrors=False, nelog=True): """ This function will plot the desired parameters for each beam along range. The values of the input and measured parameters will be plotted Inputs Times - A list of times that will be plotted. configfile - The INI file with the simulation parameters that will be useds. maindir - The directory the images will be saved in. params - List of Parameter names that will be ploted. These need to match in the ionocontainer names. filetemplate - The first part of a the file names. suptitle - The supertitle for the plots. werrors - A bools that determines if the errors will be plotted. """ sns.set_style("whitegrid") sns.set_context("notebook") # rc('text', usetex=True) maindir = Path(maindir) ffit = maindir/fitdir/'fitteddata.h5' inputfiledir = maindir/'Origparams' (sensdict, simparams) = readconfigfile(configfile) paramslower = [ip.lower() for ip in params] Nt = len(times) Np = len(params) #Read in fitted data Ionofit = IonoContainer.readh5(str(ffit)) dataloc = Ionofit.Sphere_Coords pnames = Ionofit.Param_Names pnameslower = sp.array([ip.lower() for ip in pnames.flatten()]) p2fit = [sp.argwhere(ip == pnameslower)[0][0] if ip in pnameslower else None for ip in paramslower] time2fit = [None]*Nt # Have to fix this because of time offsets if times[0] == 0: times += Ionofit.Time_Vector[0, 0] for itn, itime in enumerate(times): filear = sp.argwhere(Ionofit.Time_Vector[:, 0] >= itime) if len(filear) == 0: filenum = len(Ionofit.Time_Vector)-1 else: filenum = sp.argmin(sp.absolute(Ionofit.Time_Vector[:, 0]-itime)) time2fit[itn] = filenum times_int = [Ionofit.Time_Vector[i] for i in time2fit] # determine the beams angles = dataloc[:, 1:] rng = sp.unique(dataloc[:, 0]) b_arr = np.ascontiguousarray(angles).view(np.dtype((np.void, angles.dtype.itemsize * angles.shape[1]))) _, idx, invidx = np.unique(b_arr, return_index=True, return_inverse=True) beamlist = angles[idx] Nb = beamlist.shape[0] # Determine which imput files are to be used. dirlist = sorted(inputfiledir.glob('*.h5')) dirliststr = [str(i) for i in dirlist] sortlist, outime, outfilelist,timebeg,timelist_s = IonoContainer.gettimes(dirliststr) timelist = timebeg.copy() time2file = [None]*Nt time2intime = [None]*Nt # go through times find files and then times in files for itn, itime in enumerate(times): filear = sp.argwhere(timelist >= itime) if len(filear) == 0: filenum = [len(timelist)-1] else: filenum = filear[0] flist1 = [] timeinflist = [] for ifile in filenum: filetimes = timelist_s[ifile] log1 = (filetimes[:, 0] >= times_int[itn][0]) & (filetimes[:, 0] < times_int[itn][1]) log2 = (filetimes[:, 1] > times_int[itn][0]) & (filetimes[:, 1] <= times_int[itn][1]) log3 = (filetimes[:, 0] <= times_int[itn][0]) & (filetimes[:, 1] > times_int[itn][1]) log4 = (filetimes[:, 0] > times_int[itn][0]) & (filetimes[:, 1] < times_int[itn][1]) curtimes1 = sp.where(log1|log2|log3|log4)[0].tolist() flist1 = flist1+ [ifile]*len(curtimes1) timeinflist = timeinflist+curtimes1 time2intime[itn] = timeinflist time2file[itn] = flist1 nfig = int(sp.ceil(Nt*Nb)) imcount = 0 curfilenum = -1 # Loop for the figures for i_fig in range(nfig): lines = [None]*2 labels = [None]*2 (figmplf, axmat) = plt.subplots(int(sp.ceil(Np/2)), 2, figsize=(20, 15), facecolor='w') axvec = axmat.flatten() # loop that goes through each axis loops through each parameter, beam # then time. for ax in axvec: if imcount >= Nt*Nb*Np: break imcount_f = float(imcount) itime = int(sp.floor(imcount_f/Nb/Np)) iparam = int(imcount_f/Nb-Np*itime) ibeam = int(imcount_f-(itime*Np*Nb+iparam*Nb)) curbeam = beamlist[ibeam] altlist = sp.sin(curbeam[1]*sp.pi/180.)*rng curparm = paramslower[iparam] # Use Ne from input to compare the ne derived from the power. if curparm == 'nepow': curparm_in = 'ne' else: curparm_in = curparm curcoord = sp.zeros(3) curcoord[1:] = curbeam for iplot, filenum in enumerate(time2file[itime]): if curfilenum != filenum: curfilenum = filenum datafilename = dirlist[filenum] Ionoin = IonoContainer.readh5(str(datafilename)) if ('ti' in paramslower) or ('vi' in paramslower): Ionoin = maketi(Ionoin) pnames = Ionoin.Param_Names pnameslowerin = sp.array([ip.lower() for ip in pnames.flatten()]) prmloc = sp.argwhere(curparm_in == pnameslowerin) if prmloc.size != 0: curprm = prmloc[0][0] # build up parameter vector bs the range values by finding the closest point in space in the input curdata = sp.zeros(len(rng)) for irngn, irng in enumerate(rng): curcoord[0] = irng tempin = Ionoin.getclosestsphere(curcoord)[0][time2intime[itime]] Ntloc = tempin.shape[0] tempin = sp.reshape(tempin, (Ntloc, len(pnameslowerin))) curdata[irngn] = tempin[0, curprm] #actual plotting of the input data lines[0] = ax.plot(curdata, altlist, marker='o', c='b', linewidth=2)[0] labels[0] = 'Input Parameters' # Plot fitted data for the axis indxkep = np.argwhere(invidx == ibeam)[:, 0] curfit = Ionofit.Param_List[indxkep, time2fit[itime], p2fit[iparam]] rng_fit = dataloc[indxkep, 0] alt_fit = rng_fit*sp.sin(curbeam[1]*sp.pi/180.) errorexist = 'n'+paramslower[iparam] in pnameslower if errorexist and werrors: eparam = sp.argwhere('n'+paramslower[iparam] == pnameslower)[0][0] curerror = Ionofit.Param_List[indxkep, time2fit[itime], eparam] lines[1] = ax.errorbar(curfit, alt_fit, xerr=curerror, fmt='-.', c='g', linewidth=2)[0] else: lines[1] = ax.plot(curfit, alt_fit, marker='o', c='g', linewidth=2)[0] labels[1] = 'Fitted Parameters' # get and plot the input data numplots = len(time2file[itime]) # set the limit for the parameter if curparm == 'vi': ax.set(xlim=[-1.25*sp.nanmax(sp.absolute(curfit)), 1.25*sp.nanmax(sp.absolute(curfit))]) elif curparm_in != 'ne': ax.set(xlim=[0.75*sp.nanmin(curfit), sp.minimum(1.25*sp.nanmax(curfit), 8000.)]) elif (curparm_in == 'ne') and nelog: ax.set_xscale('log') ax.set_xlabel(params[iparam]) ax.set_ylabel('Alt km') ax.set_title('{0} vs Altitude, Time: {1}s Az: {2}$^o$ El: {3}$^o$'.format(params[iparam], times[itime], *curbeam)) imcount += 1 # save figure figmplf.suptitle(suptitle, fontsize=20) if None in labels: labels.remove(None) lines.remove(None) plt.figlegend(lines, labels, loc = 'lower center', ncol=5, labelspacing=0.) fname = filetemplate+'_{0:0>3}.png'.format(i_fig) plt.savefig(fname) plt.close(figmplf)
def defaultPolicyStoppingCriterion(nIter, currentPolicyArrayList, greedyPolicyList, criterion=0.0001): diffList = [(greedyPolicyList[i] - currentPolicyArrayList[i]) for i in range(len(greedyPolicyList))] pctDiffList = [(diffList[i] / currentPolicyArrayList[i]) for i in range(len(greedyPolicyList))] maxdiffList = [scipy.nanmax(abs(diff)) for diff in diffList] maxdiff = scipy.amax(maxdiffList) return ((maxdiff < criterion), maxdiff)
def _sort_chunk(self): """sort this chunk on the calculated discriminant functions method: "och" Examples for overlap samples tau=-2 tau=-1 tau=0 tau=1 tau=2 f1: |-----| |-----| |-----| |-----| |-----| f2: |-----| |-----| |-----| |-----| |-----| res: +++ ++++ +++++ ++++ +++ method: "sic" TODO: """ # init if self.nf == 0: return spk_ep = epochs_from_binvec( sp.nanmax(self._disc, axis=1) > self._lpr_n) if spk_ep.size == 0: return l, r = get_cut(self._tf) for i in xrange(spk_ep.shape[0]): # FIX: for now we just continue for empty epochs, # where do they come from anyways?! if spk_ep[i, 1] - spk_ep[i, 0] < 1: continue mc = self._disc[spk_ep[i, 0]:spk_ep[i, 1], :].argmax(0).argmax() s = self._disc[spk_ep[i, 0]:spk_ep[i, 1], mc].argmax() + spk_ep[ i, 0] spk_ep[i] = [s - l, s + r] # check epochs spk_ep = merge_epochs(spk_ep) n_ep = spk_ep.shape[0] for i in xrange(n_ep): # # method: overlap channels # if self._ovlp_taus is not None: # get event time and channel ep_t, ep_c = matrix_argmax( self._disc[spk_ep[i, 0]:spk_ep[i, 1]]) ep_t += spk_ep[i, 0] # lets fill in the results if ep_c < self.nf: # was single unit fid = self.get_idx_for(ep_c) self.rval[fid].append(ep_t + self._chunk_offset) else: # was overlap my_oc_idx = self._oc_idx[ep_c] fid0 = self.get_idx_for(my_oc_idx[0]) self.rval[fid0].append(ep_t + self._chunk_offset) fid1 = self.get_idx_for(my_oc_idx[1]) self.rval[fid1].append( ep_t + my_oc_idx[2] + self._chunk_offset) # # method: subtractive interference cancellation # else: ep_fout = self._fout[spk_ep[i, 0]:spk_ep[i, 1], :] ep_fout_norm = sp_la.norm(ep_fout) ep_disc = self._disc[spk_ep[i, 0]:spk_ep[i, 1], :].copy() niter = 0 while sp.nanmax(ep_disc) > self._lpr_n: # warn on spike overflow niter += 1 if niter > self.nf: warnings.warn( 'more spikes than filters found! ' 'epoch: [%d:%d] %d' % ( spk_ep[i][0] + self._chunk_offset, spk_ep[i][1] + self._chunk_offset, niter)) if niter > 2 * self.nf: break # find epoch details ep_t = sp.nanargmax(sp.nanmax(ep_disc, axis=1)) ep_c = sp.nanargmax(ep_disc[ep_t]) # build subtrahend sub = shifted_matrix_sub( sp.zeros_like(ep_disc), self._xcorrs[ep_c, :, :].T, ep_t - self._tf + 1) # apply subtrahend if ep_fout_norm > sp_la.norm(ep_fout + sub): ## DEBUG if self.verbose.get_has_plot(1): try: from spikeplot import xvf_tensor, plt, COLOURS x_range = sp.arange( spk_ep[i, 0] + self._chunk_offset, spk_ep[i, 1] + self._chunk_offset) f = plt.figure() f.suptitle('spike epoch [%d:%d] #%d' % (spk_ep[i, 0] + self._chunk_offset, spk_ep[i, 1] + self._chunk_offset, niter)) ax1 = f.add_subplot(211) ax1.set_color_cycle( ['k'] + COLOURS[:self.nf] * 2) ax1.plot(x_range, sp.zeros_like(x_range), ls='--') ax1.plot(x_range, ep_disc, label='pre_sub') ax1.axvline(x_range[ep_t], c='k') ax2 = f.add_subplot(212, sharex=ax1, sharey=ax1) ax2.set_color_cycle(['k'] + COLOURS[:self.nf]) ax2.plot(x_range, sp.zeros_like(x_range), ls='--') ax2.plot(x_range, sub) ax2.axvline(x_range[ep_t], c='k') except: pass ## BUGED ep_disc += sub + self._lpr_s if self._pr_s_b is not None: bias, extend = self._pr_s_b ep_disc[ep_t:min(ep_t + extend, ep_disc.shape[0]), ep_c] -= bias ## DEBUG if self.verbose.get_has_plot(1): try: ax1.plot(x_range, ep_disc, ls=':', lw=2, label='post_sub') ax1.legend(loc=2) except: pass ## BUGED fid = self.get_idx_for(ep_c) self.rval[fid].append( spk_ep[i, 0] + ep_t + self._chunk_offset) else: break del ep_fout, ep_disc, sub
def contourGD(geod,axstr,slicenum,vbounds=None,time = 0,gkey = None,cmap=defmap, fig=None,ax=None,title='',cbar=True,m=None,levels=None): """ """ poscoords = ['cartesian','wgs84','enu','ecef'] assert geod.coordnames.lower() in poscoords if geod.coordnames.lower() in ['cartesian','enu','ecef']: axdict = {'x':0,'y':1,'z':2} veckeys = ['x','y','z'] elif geod.coordnames.lower() == 'wgs84': axdict = {'lat':0,'long':1,'alt':2}# shows which row is this coordinate veckeys = ['long','lat','alt']# shows which is the x, y and z axes for plotting if type(axstr)==str: axis=axstr else: axis= veckeys[axstr] veckeys.remove(axis.lower()) veckeys.append(axis.lower()) datacoords = geod.dataloc xyzvecs = {l:sp.unique(datacoords[:,axdict[l]]) for l in veckeys} #make matrices M1,M2 = sp.meshgrid(xyzvecs[veckeys[0]],xyzvecs[veckeys[1]]) slicevec = sp.unique(datacoords[:,axdict[axis]]) min_idx = sp.argmin(sp.absolute(slicevec-slicenum)) slicenum=slicevec[min_idx] rec_coords = {axdict[veckeys[0]]:M1.flatten(),axdict[veckeys[1]]:M2.flatten(), axdict[axis]:slicenum*sp.ones(M2.size)} new_coords = sp.zeros((M1.size,3)) #make coordinates for ckey in rec_coords.keys(): new_coords[:,ckey] = rec_coords[ckey] #determine the data name if gkey is None: gkey = geod.data.keys[0] # get the data location, first check if the data can be just reshaped then do a # search sliceindx = slicenum==datacoords[:,axdict[axis]] datacoordred = datacoords[sliceindx] rstypes = ['C','F','A'] nfounds = True M1dlfl = datacoordred[:,axdict[veckeys[0]]] M2dlfl = datacoordred[:,axdict[veckeys[1]]] for ir in rstypes: M1dl = sp.reshape(M1dlfl,M1.shape,order =ir) M2dl = sp.reshape(M2dlfl,M1.shape,order =ir) if sp.logical_and(sp.allclose(M1dl,M1),sp.allclose(M2dl,M2)): nfounds=False break if nfounds: dataout = geod.datareducelocation(new_coords,geod.coordnames,gkey)[:,time] dataout = sp.reshape(dataout,M1.shape) else: dataout = sp.reshape(geod.data[gkey][sliceindx,time],M1.shape,order=ir) title = insertinfo(title,gkey,geod.times[time,0],geod.times[time,1]) if (ax is None) and (fig is None): fig = plt.figure(facecolor='white') ax = fig.gca() elif ax is None: ax = fig.gca() if vbounds is None: vbounds=[sp.nanmin(dataout),sp.nanmax(dataout)] if levels is None: levels=sp.linspace(vbounds[0],vbounds[1],5) if m is None: ploth = ax.contour(M1,M2,dataout,levels = levels,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap) ax.axis([xyzvecs[veckeys[0]].min(), xyzvecs[veckeys[0]].max(), xyzvecs[veckeys[1]].min(), xyzvecs[veckeys[1]].max()]) if cbar: cbar2 = plt.colorbar(ploth, ax=ax, format='%.0e') else: cbar2 = None ax.set_title(title) ax.set_xlabel(veckeys[0]) ax.set_ylabel(veckeys[1]) else: N1,N2 = m(M1,M2) ploth = ax.contour(N1,N2,dataout,levels = levels,vmin=vbounds[0], vmax=vbounds[1],cmap = cmap) if cbar: #cbar2 = m.colorbar(ploth, format='%.0e') cbar2 = m.colorbar(ploth) else: cbar2 = None return(ploth,cbar2)
tmp_psi_ = hdf5_handles[t]['psi'][:, tmp_idx] tmp_iso1 = hdf5_handles[t]['iso1'][:, tmp_idx] * sf[t][:, sp.newaxis] tmp_iso2 = hdf5_handles[t]['iso2'][:, tmp_idx] * sf[t][:, sp.newaxis] for mr in mr_t: for i, p in enumerate(tissues): tmp_psi = tmp_psi_[t_idx[(t, p)], :] n_idx = sp.c_[tmp_iso1[t_idx[(t, p)], :].max(axis=0), tmp_iso2[t_idx[(t, p)], :].max( axis=0)].min(axis=1) < mr tmp_psi[:, n_idx] = sp.nan idx_nn = ~sp.isnan(tmp_psi) d_psi = sp.nanmax(tmp_psi, axis=0) - sp.nanmin(tmp_psi, axis=0) d_psi[sp.isnan(d_psi)] = 0 for dp in d_psi_t: if cc == 0: count[(t, p)][(mr, dp)] = tmp_idx[ (sp.sum(idx_nn, axis=0) >= nan_t) & (d_psi >= dp)] else: count[(t, p)][(mr, dp)] = sp.r_[ count[(t, p)][(mr, dp)], tmp_idx[(sp.sum(idx_nn, axis=0) >= nan_t) & (d_psi >= dp)]] ### store count as pre-processed pickle cPickle.dump((count, tissues, dsets, tids, is_tumor, t_idx),
def veldist_1d_slope(plotfilename,phi=_DEFAULTPHI,R=_DEFAULTR, ngrid=201,saveDir='../bar/1dvar/'): """ NAME: veldist_1d_slope PURPOSE: make a plot showing the influence of the shape of the rotation curve INPUT: plotfilename - filename for figure phi - Galactocentric azimuth R - Galactocentric radius ngrid - number of grid-points to calculate the los velocity distribution on saveDir - save pickles here OUTPUT: Figure in plotfilename HISTORY: 2010-05-15 - Written - Bovy (NYU) """ slopes= [-0.2,-0.1,0.,0.1,0.2] vloslinspace= (-.9,.9,ngrid) vloss= sc.linspace(*vloslinspace) vlosds= [] basesavefilename= os.path.join(saveDir,'slope_') for slope in slopes: thissavefilename= basesavefilename+'%.1f.sav' % slope if os.path.exists(thissavefilename): print "Restoring los-velocity distribution at slope %.1f" % slope savefile= open(thissavefilename,'r') vlosd= pickle.load(savefile) savefile.close() else: print "Calculating los-velocity distribution at slope %.1f" % slope potparams= (0.9,0.01,25.*_degtorad,.8,None) vlosd= predictVlos(vloslinspace, l=phi, d=R, distCoord='GCGC', pot='bar',beta=slope, potparams=potparams) vlosd= vlosd/(sc.nansum(vlosd)*(vloss[1]-vloss[0])) savefile= open(thissavefilename,'w') pickle.dump(vlosd,savefile) savefile.close() vlosds.append(vlosd) #Plot plot.bovy_print() plot.bovy_plot(vloss,vlosds[2],'k-',zorder=3, xrange=[vloslinspace[0],vloslinspace[1]], yrange=[0.,sc.nanmax(sc.array(vlosds).flatten())*1.1], xlabel=r'$v_{\mathrm{los}} / v_0$') plot.bovy_plot(vloss,vlosds[0],ls='-',color='0.75', overplot=True,zorder=2,lw=2.) plot.bovy_plot(vloss,vlosds[1],ls='-',color='0.60', overplot=True,zorder=2,lw=2.) plot.bovy_plot(vloss,vlosds[3],ls='-',color='0.45', overplot=True,zorder=2,lw=1.5) plot.bovy_plot(vloss,vlosds[4],ls='-',color='0.3', overplot=True,zorder=2,lw=1.5) plot.bovy_text(r'$\mathrm{shape\ of\ the\ rotation\ curve}$',title=True) plot.bovy_text(0.5,.5,r'$\beta = -0.2$'+'\n'+r'$\beta = -0.1$'+ '\n'+ r'$\beta = \phantom{-}0.0$'+'\n'+ r'$\beta= \phantom{-}0.1$'+'\n'+ r'$\beta= \phantom{-}0.2$') plot.bovy_end_print(plotfilename)
def bovy_dens2d(X,**kwargs): """ NAME: bovy_dens2d PURPOSE: plot a 2d density with optional contours INPUT: first argument is the density matplotlib.pyplot.imshow keywords (see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.imshow) xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed xrange yrange noaxes - don't plot any axes overplot - if True, overplot colorbar - if True, add colorbar shrink= colorbar argument: shrink the colorbar by the factor (optional) Contours: contours - if True, draw contours (10 by default) levels - contour-levels cntrmass - if True, the density is a probability and the levels are probability masses contained within the contour cntrcolors - colors for contours (single color or array) cntrlabel - label the contours cntrlw, cntrls - linewidths and linestyles for contour cntrlabelsize, cntrlabelcolors,cntrinline - contour arguments onedhists - if True, make one-d histograms on the sides onedhistcolor - histogram color retAxes= return all Axes instances OUTPUT: HISTORY: 2010-03-09 - Written - Bovy (NYU) """ if kwargs.has_key('overplot'): overplot= kwargs['overplot'] kwargs.pop('overplot') else: overplot= False if not overplot: pyplot.figure() if kwargs.has_key('xlabel'): xlabel= kwargs['xlabel'] kwargs.pop('xlabel') else: xlabel=None if kwargs.has_key('ylabel'): ylabel= kwargs['ylabel'] kwargs.pop('ylabel') else: ylabel=None if kwargs.has_key('zlabel'): zlabel= kwargs['zlabel'] kwargs.pop('zlabel') else: zlabel=None if kwargs.has_key('extent'): extent= kwargs['extent'] kwargs.pop('extent') else: if kwargs.has_key('xrange'): xlimits=list(kwargs['xrange']) kwargs.pop('xrange') else: xlimits=[0,X.shape[0]] if kwargs.has_key('yrange'): ylimits=list(kwargs['yrange']) kwargs.pop('yrange') else: ylimits=[0,X.shape[1]] extent= xlimits+ylimits if not kwargs.has_key('aspect'): kwargs['aspect']= (xlimits[1]-xlimits[0])/float(ylimits[1]-ylimits[0]) if kwargs.has_key('noaxes'): noaxes= kwargs['noaxes'] kwargs.pop('noaxes') else: noaxes= False if (kwargs.has_key('contours') and kwargs['contours']) or \ kwargs.has_key('levels') or \ (kwargs.has_key('cntrmass') and kwargs['cntrmass']): contours= True else: contours= False if kwargs.has_key('contours'): kwargs.pop('contours') if kwargs.has_key('levels'): levels= kwargs['levels'] kwargs.pop('levels') elif contours: if kwargs.has_key('cntrmass') and kwargs['cntrmass']: levels= sc.linspace(0.,1.,_DEFAULTNCNTR) elif True in sc.isnan(sc.array(X)): levels= sc.linspace(sc.nanmin(X),sc.nanmax(X),_DEFAULTNCNTR) else: levels= sc.linspace(sc.amin(X),sc.amax(X),_DEFAULTNCNTR) if kwargs.has_key('cntrmass') and kwargs['cntrmass']: cntrmass= True kwargs.pop('cntrmass') else: cntrmass= False if kwargs.has_key('cntrmass'): kwargs.pop('cntrmass') if kwargs.has_key('cntrcolors'): cntrcolors= kwargs['cntrcolors'] kwargs.pop('cntrcolors') elif contours: cntrcolors='k' if kwargs.has_key('cntrlabel') and kwargs['cntrlabel']: cntrlabel= True kwargs.pop('cntrlabel') else: cntrlabel= False if kwargs.has_key('cntrlabel'): kwargs.pop('cntrlabel') if kwargs.has_key('cntrlw'): cntrlw= kwargs['cntrlw'] kwargs.pop('cntrlw') elif contours: cntrlw= None if kwargs.has_key('cntrls'): cntrls= kwargs['cntrls'] kwargs.pop('cntrls') elif contours: cntrls= None if kwargs.has_key('cntrlabelsize'): cntrlabelsize= kwargs['cntrlabelsize'] kwargs.pop('cntrlabelsize') elif contours: cntrlabelsize= None if kwargs.has_key('cntrlabelcolors'): cntrlabelcolors= kwargs['cntrlabelcolors'] kwargs.pop('cntrlabelcolors') elif contours: cntrlabelcolors= None if kwargs.has_key('cntrinline'): cntrinline= kwargs['cntrinline'] kwargs.pop('cntrinline') elif contours: cntrinline= None if kwargs.has_key('retCumImage'): retCumImage= kwargs['retCumImage'] kwargs.pop('retCumImage') else: retCumImage= False if kwargs.has_key('colorbar'): cb= kwargs['colorbar'] kwargs.pop('colorbar') else: cb= False if kwargs.has_key('shrink'): shrink= kwargs['shrink'] kwargs.pop('shrink') else: shrink= None if kwargs.has_key('onedhists'): onedhists= kwargs['onedhists'] kwargs.pop('onedhists') else: onedhists= False if kwargs.has_key('onedhistcolor'): onedhistcolor= kwargs['onedhistcolor'] kwargs.pop('onedhistcolor') else: onedhistcolor= 'k' if kwargs.has_key('retAxes'): retAxes= kwargs['retAxes'] kwargs.pop('retAxes') else: retAxes= False if onedhists: if overplot: fig= pyplot.gcf() else: fig= pyplot.figure() nullfmt = NullFormatter() # no labels # definitions for the axes left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left+width rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] axScatter = pyplot.axes(rect_scatter) axHistx = pyplot.axes(rect_histx) axHisty = pyplot.axes(rect_histy) # no labels axHistx.xaxis.set_major_formatter(nullfmt) axHistx.yaxis.set_major_formatter(nullfmt) axHisty.xaxis.set_major_formatter(nullfmt) axHisty.yaxis.set_major_formatter(nullfmt) fig.sca(axScatter) ax=pyplot.gca() ax.set_autoscale_on(False) out= pyplot.imshow(X,extent=extent,**kwargs) pyplot.axis(extent) _add_axislabels(xlabel,ylabel) _add_ticks() #Add colorbar if cb: if shrink is None: if kwargs.has_key('aspect'): shrink= sc.amin([float(kwargs['aspect'])*0.87,1.]) else: shrink= 0.87 CB1= pyplot.colorbar(out,shrink=shrink) if not zlabel is None: if zlabel[0] != '$': thiszlabel=r'$'+zlabel+'$' else: thiszlabel=zlabel CB1.set_label(zlabel) if contours or retCumImage: if kwargs.has_key('aspect'): aspect= kwargs['aspect'] else: aspect= None if kwargs.has_key('origin'): origin= kwargs['origin'] else: origin= None if cntrmass: #Sum from the top down! X[sc.isnan(X)]= 0. sortindx= sc.argsort(X.flatten())[::-1] cumul= sc.cumsum(sc.sort(X.flatten())[::-1])/sc.sum(X.flatten()) cntrThis= sc.zeros(sc.prod(X.shape)) cntrThis[sortindx]= cumul cntrThis= sc.reshape(cntrThis,X.shape) else: cntrThis= X if contours: cont= pyplot.contour(cntrThis,levels,colors=cntrcolors, linewidths=cntrlw,extent=extent,aspect=aspect, linestyles=cntrls,origin=origin) if cntrlabel: pyplot.clabel(cont,fontsize=cntrlabelsize, colors=cntrlabelcolors, inline=cntrinline) if noaxes: ax.set_axis_off() #Add onedhists if not onedhists: if retCumImage: return cntrThis elif retAxes: return pyplot.gca() else: return out histx= sc.nansum(X.T,axis=1)*m.fabs(ylimits[1]-ylimits[0])/X.shape[1] #nansum bc nan is *no dens value* histy= sc.nansum(X.T,axis=0)*m.fabs(xlimits[1]-xlimits[0])/X.shape[0] histx[sc.isnan(histx)]= 0. histy[sc.isnan(histy)]= 0. dx= (extent[1]-extent[0])/float(len(histx)) axHistx.plot(sc.linspace(extent[0]+dx,extent[1]-dx,len(histx)),histx, drawstyle='steps-mid',color=onedhistcolor) dy= (extent[3]-extent[2])/float(len(histy)) axHisty.plot(histy,sc.linspace(extent[2]+dy,extent[3]-dy,len(histy)), drawstyle='steps-mid',color=onedhistcolor) axHistx.set_xlim( axScatter.get_xlim() ) axHisty.set_ylim( axScatter.get_ylim() ) axHistx.set_ylim( 0, 1.2*sc.amax(histx)) axHisty.set_xlim( 0, 1.2*sc.amax(histy)) if retCumImage: return cntrThis elif retAxes: return (axScatter,axHistx,axHisty) else: return out
#cmd+="\cp -p "+northxyz_grd_path+" "+northxyz_filtered_grd_path+"\n"; #subprocess.call(cmd,shell=True); out_f=netcdf.netcdf_file(eastxyz_filtered_grd_path,"w",True); out_f.createDimension("x",x.shape[0]); out_x=out_f.createVariable("x","f",("x",)); out_x[:]=x[:]; out_x._attributes["actual_range"]=scipy.array([x.min(), x.max()]); out_f.createDimension("y",y.shape[0]); out_y=out_f.createVariable("y","f",("y",)); out_y[:]=y[:]; out_y._attributes["actual_range"]=scipy.array([y.min(), y.max()]); data_out=scipy.arange(x.shape[0]*y.shape[0]); data_out.shape=(y.shape[0],x.shape[0]); out_z=out_f.createVariable("z",scipy.dtype("float32").char,("y","x")); out_z._attributes["actual_range"]=scipy.array([scipy.nanmin(eastvel), scipy.nanmax(eastvel)]); out_z[:]=eastvel[:]; out_z._attributes["_FillValue"]="nan"; out_f.flush(); out_f.sync(); out_f.close(); exit(); # Write grid files... cmd ="\nxyz2grd "+eastvel+" "+R+" -G"+eastxyz_filtered_grd_path+" -I120=\n"; cmd+="\nxyz2grd "+northvel+" "+R+" -G"+northxyz_filtered_grd_path+" -I120=\n";
def bovy_dens2d(X, **kwargs): """ NAME: bovy_dens2d PURPOSE: plot a 2d density with optional contours INPUT: first argument is the density matplotlib.pyplot.imshow keywords (see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.imshow) xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed xrange yrange noaxes - don't plot any axes overplot - if True, overplot Contours: contours - if True, draw contours (10 by default) levels - contour-levels cntrmass - if True, the density is a probability and the levels are probability masses contained within the contour cntrcolors - colors for contours (single color or array) cntrlabel - label the contours cntrlw, cntrls - linewidths and linestyles for contour cntrlabelsize, cntrlabelcolors,cntrinline - contour arguments OUTPUT: HISTORY: 2010-03-09 - Written - Bovy (NYU) """ if 'overplot' in kwargs: overplot = kwargs['overplot'] kwargs.pop('overplot') else: overplot = False if not overplot: pyplot.figure() ax = pyplot.gca() ax.set_autoscale_on(False) if 'xlabel' in kwargs: xlabel = kwargs['xlabel'] kwargs.pop('xlabel') else: xlabel = None if 'ylabel' in kwargs: ylabel = kwargs['ylabel'] kwargs.pop('ylabel') else: ylabel = None if 'extent' in kwargs: extent = kwargs['extent'] kwargs.pop('extent') else: if 'xrange' in kwargs: xlimits = list(kwargs['xrange']) kwargs.pop('xrange') else: xlimits = [0, X.shape[0]] if 'yrange' in kwargs: ylimits = list(kwargs['yrange']) kwargs.pop('yrange') else: ylimits = [0, X.shape[1]] extent = xlimits + ylimits if 'noaxes' in kwargs: noaxes = kwargs['noaxes'] kwargs.pop('noaxes') else: noaxes = False if 'contours' in kwargs and kwargs['contours']: contours = True kwargs.pop('contours') elif kwargs.has_key('levels') or 'cntrmass' in kwargs: contours = True else: contours = False if 'contours' in kwargs: kwargs.pop('contours') if 'levels' in kwargs: levels = kwargs['levels'] kwargs.pop('levels') elif contours: if 'cntrmass' in kwargs and kwargs['cntrmass']: levels = sc.linspace(0., 1., _DEFAULTNCNTR) elif True in sc.isnan(sc.array(X)): levels = sc.linspace(sc.nanmin(X), sc.nanmax(X), _DEFAULTNCNTR) else: levels = sc.linspace(sc.amin(X), sc.amax(X), _DEFAULTNCNTR) if 'cntrmass' in kwargs and kwargs['cntrmass']: cntrmass = True kwargs.pop('cntrmass') else: cntrmass = False if 'cntrmass' in kwargs: kwargs.pop('cntrmass') if 'cntrcolors' in kwargs: cntrcolors = kwargs['cntrcolors'] kwargs.pop('cntrcolors') elif contours: cntrcolors = 'k' if 'cntrlabel' in kwargs and kwargs['cntrlabel']: cntrlabel = True kwargs.pop('cntrlabel') else: cntrlabel = False if 'cntrlabel' in kwargs: kwargs.pop('cntrlabel') if 'cntrlw' in kwargs: cntrlw = kwargs['cntrlw'] kwargs.pop('cntrlw') elif contours: cntrlw = None if 'cntrls' in kwargs: cntrls = kwargs['cntrls'] kwargs.pop('cntrls') elif contours: cntrls = None if 'cntrlabelsize' in kwargs: cntrlabelsize = kwargs['cntrlabelsize'] kwargs.pop('cntrlabelsize') elif contours: cntrlabelsize = None if 'cntrlabelcolors' in kwargs: cntrlabelcolors = kwargs['cntrlabelcolors'] kwargs.pop('cntrlabelcolors') elif contours: cntrlabelcolors = None if 'cntrinline' in kwargs: cntrinline = kwargs['cntrinline'] kwargs.pop('cntrinline') elif contours: cntrinline = None if 'retCumImage' in kwargs: retCumImage = kwargs['retCumImage'] kwargs.pop('retCumImage') else: retCumImage = False out = pyplot.imshow(X, extent=extent, **kwargs) pyplot.axis(extent) _add_axislabels(xlabel, ylabel) _add_ticks() if contours: if 'aspect' in kwargs: aspect = kwargs['aspect'] else: aspect = None if 'origin' in kwargs: origin = kwargs['origin'] else: origin = None if cntrmass: #Sum from the top down! sortindx = sc.argsort(X.flatten())[::-1] cumul = sc.cumsum(sc.sort(X.flatten())[::-1]) / sc.sum(X.flatten()) cntrThis = sc.zeros(sc.prod(X.shape)) cntrThis[sortindx] = cumul cntrThis = sc.reshape(cntrThis, X.shape) else: cntrThis = X cont = pyplot.contour(cntrThis, levels, colors=cntrcolors, linewidths=cntrlw, extent=extent, aspect=aspect, linestyles=cntrls, origin=origin) if cntrlabel: pyplot.clabel(cont, fontsize=cntrlabelsize, colors=cntrlabelcolors, inline=cntrinline) if noaxes: ax.set_axis_off() if retCumImage: return cntrThis else: return out
def veldist_1d_convolve(plotfilename,phi=_DEFAULTPHI,R=_DEFAULTR, ngrid=201,saveDir='../bar/1dvar/'): """ NAME: veldist_1d_convolve PURPOSE: make a plot showing the influence of the distance uncertainties INPUT: plotfilename - filename for figure phi - Galactocentric azimuth R - Galactocentric radius ngrid - number of grid-points to calculate the los velocity distribution on saveDir - save pickles here OUTPUT: Figure in plotfilename HISTORY: 2010-05-15 - Written - Bovy (NYU) """ convolves= [0.,0.2,0.3] vloslinspace= (-.9,.9,ngrid) vloss= sc.linspace(*vloslinspace) vlosds= [] basesavefilename= os.path.join(saveDir,'convolve_') for distsig in convolves: thissavefilename= basesavefilename+'%.1f.sav' % distsig if os.path.exists(thissavefilename): print "Restoring los-velocity distribution at distance uncertainties %.1f" % distsig savefile= open(thissavefilename,'r') vlosd= pickle.load(savefile) savefile.close() else: print "Calculating los-velocity distribution at distance uncertainties %.1f" % distsig potparams= (0.9,0.01,25.*_degtorad,.8,None) if distsig == 0.: vlosd= predictVlos(vloslinspace, l=phi, d=R, distCoord='GCGC', pot='bar',beta=0., potparams=potparams) else: vlosd= predictVlosConvolve(vloslinspace, l=phi, d=R, distCoord='GCGC', pot='bar',beta=0., potparams=potparams, convolve=distsig) vlosd= vlosd/(sc.nansum(vlosd)*(vloss[1]-vloss[0])) savefile= open(thissavefilename,'w') pickle.dump(vlosd,savefile) savefile.close() vlosds.append(vlosd) #Plot plot.bovy_print() plot.bovy_plot(vloss,vlosds[0],'k-',zorder=3, xrange=[vloslinspace[0],vloslinspace[1]], yrange=[0.,sc.nanmax(sc.array(vlosds).flatten())*1.1], xlabel=r'$v_{\mathrm{los}} / v_0$') plot.bovy_plot(vloss,vlosds[1],ls='-',color='0.75', overplot=True,zorder=2,lw=2.) plot.bovy_plot(vloss,vlosds[2],ls='-',color='0.6', overplot=True,zorder=2,lw=2.) #plot.bovy_plot(vloss,vlosds[3],ls='-',color='0.45', # overplot=True,zorder=2,lw=2.) plot.bovy_text(r'$\mathrm{distance\ uncertainties}$',title=True) plot.bovy_text(0.5,.65,r'$\sigma_d = 0$'+'\n'+r'$\sigma_d = 20 \%$'+'\n'+r'$\sigma_d = 30 \%$') plot.bovy_end_print(plotfilename)
ax = Axes3D(fig) #ax.plot3D(p[:,0], p[:,1], p[:,2]) #Plots the 3D graph - Stationary def func( k ): #########One of the input parameters needed for the animation.FuncAnimated which is used for iterating over the new points step = 10 * k #The step size regulates the speed of the animation, very easy to change, just increase integer to speed up and decrease to slow down ax.plot3D(p[:step, 0], p[:step, 1], p[:step, 2], color='g') #For each i integrates the new function ################################################################################### ###Degfining the axes ax.set_xlim3d([sp.nanmin(p[:, 0]), sp.nanmax(p[:, 0]) ]) ########ABLE TO PLOT AXIS EXCLUDING NAN VALUES ax.set_xlabel('X') ax.set_ylim3d([sp.nanmin(p[:, 1]), sp.nanmax(p[:, 1])]) ax.set_ylabel('Y') ax.set_zlim3d([sp.nanmin(p[:, 2]), sp.nanmax(p[:, 2])]) ax.set_zlabel('Z') ################################################################################### # ax.set_xlim3d([sp.amin(p[:,0]),sp.amax(p[:,0])]) #Axes without considering the nan values # ax.set_xlabel('X') # # ax.set_ylim3d([sp.amin(p[:,1]),sp.amax(p[:,1])]) # ax.set_ylabel('Y')
def bovy_dens2d(X,**kwargs): """ NAME: bovy_dens2d PURPOSE: plot a 2d density with optional contours INPUT: first argument is the density matplotlib.pyplot.imshow keywords (see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.imshow) xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed xrange yrange noaxes - don't plot any axes overplot - if True, overplot colorbar - if True, add colorbar shrink= colorbar argument: shrink the colorbar by the factor (optional) conditional - normalize each column separately (for probability densities, i.e., cntrmass=True) Contours: justcontours - if True, only draw contours contours - if True, draw contours (10 by default) levels - contour-levels cntrmass - if True, the density is a probability and the levels are probability masses contained within the contour cntrcolors - colors for contours (single color or array) cntrlabel - label the contours cntrlw, cntrls - linewidths and linestyles for contour cntrlabelsize, cntrlabelcolors,cntrinline - contour arguments cntrSmooth - use ndimage.gaussian_filter to smooth before contouring onedhists - if True, make one-d histograms on the sides onedhistcolor - histogram color retAxes= return all Axes instances retCont= return the contour instance OUTPUT: plot to output device, Axes instances depending on input HISTORY: 2010-03-09 - Written - Bovy (NYU) """ overplot= kwargs.pop('overplot',False) if not overplot: pyplot.figure() xlabel= kwargs.pop('xlabel',None) ylabel= kwargs.pop('ylabel',None) zlabel= kwargs.pop('zlabel',None) if 'extent' in kwargs: extent= kwargs.pop('extent') else: xlimits= kwargs.pop('xrange',[0,X.shape[1]]) ylimits= kwargs.pop('yrange',[0,X.shape[0]]) extent= xlimits+ylimits if not 'aspect' in kwargs: kwargs['aspect']= (xlimits[1]-xlimits[0])/float(ylimits[1]-ylimits[0]) noaxes= kwargs.pop('noaxes',False) justcontours= kwargs.pop('justcontours',False) if ('contours' in kwargs and kwargs['contours']) or \ 'levels' in kwargs or justcontours or \ ('cntrmass' in kwargs and kwargs['cntrmass']): contours= True else: contours= False kwargs.pop('contours',None) if 'levels' in kwargs: levels= kwargs['levels'] kwargs.pop('levels') elif contours: if 'cntrmass' in kwargs and kwargs['cntrmass']: levels= sc.linspace(0.,1.,_DEFAULTNCNTR) elif True in sc.isnan(sc.array(X)): levels= sc.linspace(sc.nanmin(X),sc.nanmax(X),_DEFAULTNCNTR) else: levels= sc.linspace(sc.amin(X),sc.amax(X),_DEFAULTNCNTR) cntrmass= kwargs.pop('cntrmass',False) conditional= kwargs.pop('conditional',False) cntrcolors= kwargs.pop('cntrcolors','k') cntrlabel= kwargs.pop('cntrlabel',False) cntrlw= kwargs.pop('cntrlw',None) cntrls= kwargs.pop('cntrls',None) cntrSmooth= kwargs.pop('cntrSmooth',None) cntrlabelsize= kwargs.pop('cntrlabelsize',None) cntrlabelcolors= kwargs.pop('cntrlabelcolors',None) cntrinline= kwargs.pop('cntrinline',None) retCumImage= kwargs.pop('retCumImage',False) cb= kwargs.pop('colorbar',False) shrink= kwargs.pop('shrink',None) onedhists= kwargs.pop('onedhists',False) onedhistcolor= kwargs.pop('onedhistcolor','k') retAxes= kwargs.pop('retAxes',False) retCont= kwargs.pop('retCont',False) if onedhists: if overplot: fig= pyplot.gcf() else: fig= pyplot.figure() nullfmt = NullFormatter() # no labels # definitions for the axes left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left+width rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] axScatter = pyplot.axes(rect_scatter) axHistx = pyplot.axes(rect_histx) axHisty = pyplot.axes(rect_histy) # no labels axHistx.xaxis.set_major_formatter(nullfmt) axHistx.yaxis.set_major_formatter(nullfmt) axHisty.xaxis.set_major_formatter(nullfmt) axHisty.yaxis.set_major_formatter(nullfmt) fig.sca(axScatter) ax=pyplot.gca() ax.set_autoscale_on(False) if conditional: plotthis= X/sc.tile(sc.sum(X,axis=0),(X.shape[1],1)) else: plotthis= X if not justcontours: out= pyplot.imshow(plotthis,extent=extent,**kwargs) if not overplot: pyplot.axis(extent) _add_axislabels(xlabel,ylabel) _add_ticks() #Add colorbar if cb and not justcontours: if shrink is None: shrink= sc.amin([float(kwargs.pop('aspect',1.))*0.87,1.]) CB1= pyplot.colorbar(out,shrink=shrink) if not zlabel is None: if zlabel[0] != '$': thiszlabel=r'$'+zlabel+'$' else: thiszlabel=zlabel CB1.set_label(thiszlabel) if contours or retCumImage: aspect= kwargs.get('aspect',None) origin= kwargs.get('origin',None) if cntrmass: #Sum from the top down! plotthis[sc.isnan(plotthis)]= 0. sortindx= sc.argsort(plotthis.flatten())[::-1] cumul= sc.cumsum(sc.sort(plotthis.flatten())[::-1])/sc.sum(plotthis.flatten()) cntrThis= sc.zeros(sc.prod(plotthis.shape)) cntrThis[sortindx]= cumul cntrThis= sc.reshape(cntrThis,plotthis.shape) else: cntrThis= plotthis if contours: if not cntrSmooth is None: cntrThis= ndimage.gaussian_filter(cntrThis,cntrSmooth, mode='nearest') cont= pyplot.contour(cntrThis,levels,colors=cntrcolors, linewidths=cntrlw,extent=extent,aspect=aspect, linestyles=cntrls,origin=origin) if cntrlabel: pyplot.clabel(cont,fontsize=cntrlabelsize, colors=cntrlabelcolors, inline=cntrinline) if noaxes: ax.set_axis_off() #Add onedhists if not onedhists: if retCumImage: return cntrThis elif retAxes: return pyplot.gca() elif retCont: return cont elif justcontours: return cntrThis else: return out histx= sc.nansum(X.T,axis=1)*m.fabs(ylimits[1]-ylimits[0])/X.shape[1] #nansum bc nan is *no dens value* histy= sc.nansum(X.T,axis=0)*m.fabs(xlimits[1]-xlimits[0])/X.shape[0] histx[sc.isnan(histx)]= 0. histy[sc.isnan(histy)]= 0. dx= (extent[1]-extent[0])/float(len(histx)) axHistx.plot(sc.linspace(extent[0]+dx,extent[1]-dx,len(histx)),histx, drawstyle='steps-mid',color=onedhistcolor) dy= (extent[3]-extent[2])/float(len(histy)) axHisty.plot(histy,sc.linspace(extent[2]+dy,extent[3]-dy,len(histy)), drawstyle='steps-mid',color=onedhistcolor) axHistx.set_xlim( axScatter.get_xlim() ) axHisty.set_ylim( axScatter.get_ylim() ) axHistx.set_ylim( 0, 1.2*sc.amax(histx)) axHisty.set_xlim( 0, 1.2*sc.amax(histy)) if retCumImage: return cntrThis elif retAxes: return (axScatter,axHistx,axHisty) elif justcontours: return cntrThis else: return out
def bovy_dens2d(X,**kwargs): """ NAME: bovy_dens2d PURPOSE: plot a 2d density with optional contours INPUT: first argument is the density matplotlib.pyplot.imshow keywords (see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.imshow) xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed xrange yrange noaxes - don't plot any axes overplot - if True, overplot colorbar - if True, add colorbar shrink= colorbar argument: shrink the colorbar by the factor (optional) Contours: contours - if True, draw contours (10 by default) levels - contour-levels cntrmass - if True, the density is a probability and the levels are probability masses contained within the contour cntrcolors - colors for contours (single color or array) cntrlabel - label the contours cntrlw, cntrls - linewidths and linestyles for contour cntrlabelsize, cntrlabelcolors,cntrinline - contour arguments OUTPUT: HISTORY: 2010-03-09 - Written - Bovy (NYU) """ if kwargs.has_key('overplot'): overplot= kwargs['overplot'] kwargs.pop('overplot') else: overplot= False if not overplot: pyplot.figure() ax=pyplot.gca() ax.set_autoscale_on(False) if kwargs.has_key('xlabel'): xlabel= kwargs['xlabel'] kwargs.pop('xlabel') else: xlabel=None if kwargs.has_key('ylabel'): ylabel= kwargs['ylabel'] kwargs.pop('ylabel') else: ylabel=None if kwargs.has_key('zlabel'): zlabel= kwargs['zlabel'] kwargs.pop('zlabel') else: zlabel=None if kwargs.has_key('extent'): extent= kwargs['extent'] kwargs.pop('extent') else: if kwargs.has_key('xrange'): xlimits=list(kwargs['xrange']) kwargs.pop('xrange') else: xlimits=[0,X.shape[0]] if kwargs.has_key('yrange'): ylimits=list(kwargs['yrange']) kwargs.pop('yrange') else: ylimits=[0,X.shape[1]] extent= xlimits+ylimits if not kwargs.has_key('aspect'): kwargs['aspect']= (xlimits[1]-xlimits[0])/float(ylimits[1]-ylimits[0]) if kwargs.has_key('noaxes'): noaxes= kwargs['noaxes'] kwargs.pop('noaxes') else: noaxes= False if (kwargs.has_key('contours') and kwargs['contours']) or \ kwargs.has_key('levels') or \ (kwargs.has_key('cntrmass') and kwargs['cntrmass']): contours= True else: contours= False if kwargs.has_key('contours'): kwargs.pop('contours') if kwargs.has_key('levels'): levels= kwargs['levels'] kwargs.pop('levels') elif contours: if kwargs.has_key('cntrmass') and kwargs['cntrmass']: levels= sc.linspace(0.,1.,_DEFAULTNCNTR) elif True in sc.isnan(sc.array(X)): levels= sc.linspace(sc.nanmin(X),sc.nanmax(X),_DEFAULTNCNTR) else: levels= sc.linspace(sc.amin(X),sc.amax(X),_DEFAULTNCNTR) if kwargs.has_key('cntrmass') and kwargs['cntrmass']: cntrmass= True kwargs.pop('cntrmass') else: cntrmass= False if kwargs.has_key('cntrmass'): kwargs.pop('cntrmass') if kwargs.has_key('cntrcolors'): cntrcolors= kwargs['cntrcolors'] kwargs.pop('cntrcolors') elif contours: cntrcolors='k' if kwargs.has_key('cntrlabel') and kwargs['cntrlabel']: cntrlabel= True kwargs.pop('cntrlabel') else: cntrlabel= False if kwargs.has_key('cntrlabel'): kwargs.pop('cntrlabel') if kwargs.has_key('cntrlw'): cntrlw= kwargs['cntrlw'] kwargs.pop('cntrlw') elif contours: cntrlw= None if kwargs.has_key('cntrls'): cntrls= kwargs['cntrls'] kwargs.pop('cntrls') elif contours: cntrls= None if kwargs.has_key('cntrlabelsize'): cntrlabelsize= kwargs['cntrlabelsize'] kwargs.pop('cntrlabelsize') elif contours: cntrlabelsize= None if kwargs.has_key('cntrlabelcolors'): cntrlabelcolors= kwargs['cntrlabelcolors'] kwargs.pop('cntrlabelcolors') elif contours: cntrlabelcolors= None if kwargs.has_key('cntrinline'): cntrinline= kwargs['cntrinline'] kwargs.pop('cntrinline') elif contours: cntrinline= None if kwargs.has_key('retCumImage'): retCumImage= kwargs['retCumImage'] kwargs.pop('retCumImage') else: retCumImage= False if kwargs.has_key('colorbar'): cb= kwargs['colorbar'] kwargs.pop('colorbar') else: cb= False if kwargs.has_key('shrink'): shrink= kwargs['shrink'] kwargs.pop('shrink') else: shrink= None out= pyplot.imshow(X,extent=extent,**kwargs) pyplot.axis(extent) _add_axislabels(xlabel,ylabel) _add_ticks() #Add colorbar if cb: if shrink is None: if kwargs.has_key('aspect'): shrink= sc.amin([float(kwargs['aspect'])*0.87,1.]) else: shrink= 0.87 CB1= pyplot.colorbar(out,shrink=shrink) if not zlabel is None: if zlabel[0] != '$': thiszlabel=r'$'+zlabel+'$' else: thiszlabel=zlabel CB1.set_label(zlabel) if contours or retCumImage: if kwargs.has_key('aspect'): aspect= kwargs['aspect'] else: aspect= None if kwargs.has_key('origin'): origin= kwargs['origin'] else: origin= None if cntrmass: #Sum from the top down! X[sc.isnan(X)]= 0. sortindx= sc.argsort(X.flatten())[::-1] cumul= sc.cumsum(sc.sort(X.flatten())[::-1])/sc.sum(X.flatten()) cntrThis= sc.zeros(sc.prod(X.shape)) cntrThis[sortindx]= cumul cntrThis= sc.reshape(cntrThis,X.shape) else: cntrThis= X if contours: cont= pyplot.contour(cntrThis,levels,colors=cntrcolors, linewidths=cntrlw,extent=extent,aspect=aspect, linestyles=cntrls,origin=origin) if cntrlabel: pyplot.clabel(cont,fontsize=cntrlabelsize, colors=cntrlabelcolors, inline=cntrinline) if noaxes: ax.set_axis_off() if retCumImage: return cntrThis else: return out
def mcdata(data, other=None, x_offset=0, div=2, zero_line=True, events=None, epochs=None, plot_handle=None, colours=None, title=None, filename=None, show=True): """plot multichanneled data -> general plot parameter :type data: ndarray :param data: The base data to plot with observations(samples) on the rows and variables(channels) on the columns. This data will be plotted on in the n topmost axes. :type other: ndarray :param other: Other data that augments the base data. The other data will be plotted in one axe visibly divided from the base data. Default=None :type x_offset: int :param x_offset: A offset value for the x-axis(samples). This allows for the x-axis to show proper values for windows not starting at x=0. All values for events and epochs etc. will not be shown if they do not fall into the frame defined. Default=0 :type div: float :param div: Percentage of the figure height to use as divider for the others plot. Default=1 :type zero_line: bool :param zero_line: if True, mark the zero line for the data channels Default=True :type events: dict :param events: dict of events from [x_offset, x_offset+len(data)]. If the dict entries are lists/ndarrays, vertical markers will be placed at these samples. If the dict entries are tuples of length 2, like (ndarray,ndarray), the first is interpreted as the waveform, and the second as the events. Each unit will be coloured according to the 'colours' vector. Default={} :type epochs: dict :param epochs: dict of epochs from [x_offset, x_offset+len(data)]. Epochs with numeric keys will be interpreted as belonging to the unit with that key and will be coloured according to the '' vector. All other epochs will appear in grey colour. Epochs are passed as a 2dim vector, like [[start,stop]]. Default={} """ # checks if not isinstance(data, sp.ndarray): raise ValueError("data is no ndarray!") if data.ndim != 2: raise ValueError("data is not dim=2!") fig, ax = check_plotting_handle(plot_handle, create_ax=False) # init fig.clear() has_other = other is not None ns, nc = data.shape x_vals = sp.arange(ns) + x_offset if colours is None: col_lst = COLOURS elif colours == 'black': col_lst = ['k'] * nc else: col_lst = colours ax_spacer = div * 0.01 # prepare axes if has_other: ax_height = (0.8 - (nc + 1) * ax_spacer) / (nc + 1) else: ax_height = (0.8 - (nc - 1) * ax_spacer) / nc for c in xrange(nc): ax_size = ( 0.1, 0.9 - (c + 1) * ax_height - c * ax_spacer, 0.8, ax_height) ax = fig.add_axes(ax_size, sharex=ax, sharey=ax) ax.set_ylabel('CH %d' % c) if c != nc - 1: plt.setp(ax.get_xticklabels(), visible=False) #ax.set_xticklabels([tl.get_text() for tl in ax.get_xticklabels()], visible=False) #ax.set_xlim(x_vals[0], x_vals[-1]) #ax.set_ylim(data.min() * 1.1, data.max() * 1.1) if has_other: ax = fig.add_axes((0.1, 0.1, 0.8, ax_height), sharex=ax) ax.set_ylabel('OTHER') #ax.set_xlim(x_vals[0], x_vals[-1]) #ax.set_ylim(-other.max() * 1.1, other.max() * 1.1) # plot data for c, a in enumerate(fig.axes[:nc]): a.add_collection( mpl.collections.LineCollection( [sp.vstack((x_vals, data[:, c])).T], colors=[(0, 0, 0)])) # plot other if has_other: fig.axes[-1].add_collection(mpl.collections.LineCollection( [sp.vstack((x_vals, other[:, c])).T for c in xrange(other.shape[1])], colors=col_lst)) # plot events if events is not None: for u in sorted(events): try: col = col_lst[u % len(col_lst)] except: col = 'gray' if isinstance(events[u], tuple): if len(events[u]) != 2: raise ValueError('Event entry for unit %s is not a tuple ' 'of length 2' % u) u_wf, u_ev = events[u] if not u_wf.shape[1] == nc: raise ValueError('Waveform for unit %s has mismatching ' 'channel count' % u) cut = int(sp.floor(u_wf.shape[0] / 2.0)) for c, a in enumerate(fig.axes[:nc]): a.add_collection( mpl.collections.LineCollection( [sp.vstack((sp.arange(u_wf.shape[0]) - cut + u_ev[i], u_wf[:, c])).T for i in xrange(u_ev.size)], colors=[col])) if has_other: for e in u_ev: fig.axes[-1].axvline(e, c=col) elif isinstance(events[u], (list, sp.ndarray)): for a in fig.axes: for e in events[u]: a.axvline(e, c=col) else: raise ValueError('events for unit %s are messed up' % u) # plot epochs if epochs is not None: for u in sorted(epochs): try: col = col_lst[u % len(col_lst)] except: col = 'gray' for ep in epochs[u]: for a in fig.axes: a.axvspan(ep[0], ep[1], fc=col, alpha=0.2) # zero lines if zero_line: for a in fig.axes: a.add_collection( mpl.collections.LineCollection( [sp.vstack(([x_vals[0], x_vals[-1]], sp.zeros(2))).T], linestyles='dashed', colors=[(0, 0, 0)])) # scale axes fig.axes[0].set_xlim(x_vals[0], x_vals[-1]) fig.axes[0].set_ylim(sp.nanmin(data) * 1.05, sp.nanmax(data) * 1.05) if has_other: fig.axes[-1].set_ylim(sp.nanmin(other) * 1.1, sp.nanmax(other) * 1.1) # figure title if title is not None: fig.suptitle(title) # produce plot if filename is not None: save_figure(fig, filename, '') if show is True: plt.show() # return return fig
def initTau(self, groups, pa=1e-3, pb=1e-3, qa=1., qb=1., qE=None): """Method to initialise the precision of the noise PARAMETERS ---------- pa: float 'a' parameter of the prior distribution pb :float 'b' parameter of the prior distribution qb: float initialisation of the 'b' parameter of the variational distribution qE: float initial expectation of the variational distribution """ # Sanity checks assert len( groups ) == self.N, 'sample groups labels do not match number of samples' tau_list = [None] * self.M # convert groups into integers from 0 to n_groups tmp = np.unique(groups, return_inverse=True) groups_ix = tmp[1] n_group = len(np.unique(groups_ix)) for m in range(self.M): # Poisson noise model for count data if self.lik[m] == "poisson": tmp = 0.25 + 0.17 * s.nanmax(self.data[m], axis=0) tmp = s.repeat(tmp[None, :], self.N, axis=0) tau_list[m] = Tau_Seeger(dim=(self.N, self.D[m]), value=tmp) # Bernoulli noise model for binary data elif self.lik[m] == "bernoulli": # tau_list[m] = Constant_Node(dim=(self.D[m],), value=s.ones(self.D[m])*0.25) # tau_list[m] = Tau_Jaakkola(dim=(self.D[m],), value=0.25) tau_list[m] = Tau_Jaakkola(dim=((self.N, self.D[m])), value=1.) elif self.lik[m] == "zero_inflated": # contains parameters to initialise both normal and jaakola tau tau_list[m] = Zero_Inflated_Tau_Jaakkola(dim=((n_group, self.D[m])), value=1., pa=pa, pb=pb, qa=qa, qb=qb, groups=groups_ix, qE=qE) # Gaussian noise model for continuous data elif self.lik[m] == "gaussian": tau_list[m] = TauD_Node(dim=(n_group, self.D[m]), pa=pa, pb=pb, qa=qa, qb=qb, groups=groups_ix, qE=qE) self.nodes["Tau"] = Multiview_Mixed_Node(self.M, *tau_list)
def TVDI_function(inNDVI,inLST,pas=0.02,t=1,s1Min=0.3,s2Max=0.8,ss1Min=0.2,ss2Max=0.8): """ Allows to calculates the TVDI. this function is a modified version of the IDL script published by Monica Garcia: (Garcia,M., Fernández, N., Villagarcía, L., Domingo, F., Puigdefábregas,J. & I. Sandholt. 2014. 2014. Accuracy of the Temperature–Vegetation Dryness Index using MODIS under water-limited vs. energy-limited evapotranspiration conditions Remote Sensing of Environment 149, 100-117.) Input: inNDVI: NDVI inLST: land surface temperature pas: intervall of the NDVI S1min: lower threshold to determine the interval which will be used to determine the design parameters of LSTmax S2max: upper threshold to determine the interval which will be used to determine the design parameters of LSTmax ss1Min: lower threshold to determine the interval which will be used to determine the calculation of parmaètres LSTmin ss2Max: upper threshold to determine the interval which will be used to determine the design parameters of LSTmin t : t=0 to use Garcia M method and t=1 to calculate the TVDI without using the threshold . Output: TVDI """ TVDI=sp.zeros(inLST.shape) if inNDVI.shape == inLST.shape : inNdvi=sp.reshape(inNDVI,(inNDVI.size)) inLst=sp.reshape(inLST,(inLST.size)) mini=sp.nanmin(inNdvi) # valeur minimale maxi=sp.nanmax(inNdvi) # valeur maximale arg=sp.argsort(inNdvi) #trie et renvoi les indices des valeurs ordonnées inV=inNdvi[arg] # on récupère les valeurs de NDVI inT=inLst[arg] # on récupère les valeurs de temperature # pas de decoupade du NDVI en intervalle percentileMax=99.0 percentileMin=1.0 nObsMin=5 # la longeur minimale que doit avoir un intervalle pour être considéré ni= int(round((maxi-mini)/pas ) + 1) # Nombre total d'intervalle iValMax=0 iValMin=ni #création des vecteurs de stockage vx= sp.zeros((ni),dtype="float") vMaxi=sp.zeros((ni),dtype="float") vMini=sp.zeros((ni),dtype="float") vMaxi[0:]=None vMini[0:]=None vNpi=sp.zeros((ni),dtype="float") for k in range (ni): hi=k*pas + mini # valeur de depart de l'intervalle hs=k*pas + hi # valeur de fin de l'intervalle a=sp.where(inV <= hi) ii=a[0].max() b=sp.where(inV <= hs) iis=b[0].max() vNpi[k]= iis - ii inTp=inT[ii:iis+1] #recuperation des valeurs de temperature contenues dans cet intervalle vx[k]=(hs - hi )/2 +hi #recuperation de valeur de NDVI qui se trouve au milieu intervalle if vNpi[k] > nObsMin : #on teste si l'intervalle defini a suffisamment de valeur inTp=inTp[sp.argsort(inTp)] #on trie les valeurs de temperature contenu dans cet intervalle vMaxi[k]=inTp[ int( ( vNpi[k] *percentileMax/100 )) ] #on recupère la valeur de temperature qui correspond au 99em percentile de l'intervalle vMini[k]=inTp[ int( ( vNpi[k] *percentileMin/100 ))] #on recupère la valeur de temperature qui correspond au 99em percentile de l'intervalle if k >iValMax: iValMax=k if k < iValMin: iValMin=k # calcul de LSTmax et LSTmin if (t==0): # Dry Edge # on utilise un seuil inferieur pour trouver la fin de l'intervalle qui va servir pour le calcul de la regression linéaire # on utilise iValMin et iValMax pour eviter les nan c'est à dire on reste dans les intervalles qui respecte le nObsMin try: b=sp.where(vx < s1Min) # seuil inferieur à modifier ii=sp.nanmax([sp.nanmax(b[0]),iValMin]) b=sp.where(vx < s2Max) # seuil superieur à modifier iis=sp.nanmin([sp.nanmax(b[0]),iValMax]) # Wet Edge c=sp.where(vx < ss1Min) # seuil inferieur à modifier ii2=sp.nanmax([sp.nanmax(c[0]),iValMin]) c=sp.where(vx < ss2Max) # seuil superieur à modifier iis2=sp.nanmin([sp.nanmax(c[0]),iValMax]) except: print "problème avec les valeurs inferieures et superieures utilisées" else: ii=iValMin iis=iValMax ii2=iValMin iis2=iValMax #calcul de la regression linéaire estimation1=sp.stats.linregress(vx[ii:iis+1],vMaxi[ii:iis+1]) #LSTmax= a * NDVI + b lstmax_a=estimation1[0] #recuperation du paramètre de pente lstmax_b=estimation1[1] #recuperation du paramètre de l'ordonnée à l'origine estimation1=sp.stats.linregress(vx[ii2:iis2+1],vMini[ii2:iis2+1]) #LSTmax= a * NDVI + b lstmin=sp.nanmin(vMini[ii2:iis2+1]) #calcul de TVDI TVDI=( inLST - lstmin) / ( lstmax_b + (lstmax_a * inNDVI )- lstmin+0.00000001 ) # TVDI=( inLST - lstmin) / ( ( lstmax_b + (lstmax_a * inNDVI ))- lstmin +0.00001 ) else: print "les deux tableaux n'ont pas la même taille" exit return TVDI
def plotbeamparametersv2(times, configfile, maindir, fitdir='Fitted', params=['Ne'], filetemplate='params', suptitle='Parameter Comparison', werrors=False, nelog=True): """ This function will plot the desired parameters for each beam along range. The values of the input and measured parameters will be plotted Inputs Times - A list of times that will be plotted. configfile - The INI file with the simulation parameters that will be useds. maindir - The directory the images will be saved in. params - List of Parameter names that will be ploted. These need to match in the ionocontainer names. filetemplate - The first part of a the file names. suptitle - The supertitle for the plots. werrors - A bools that determines if the errors will be plotted. """ sns.set_style("whitegrid") sns.set_context("notebook") # rc('text', usetex=True) maindir = Path(maindir) ffit = maindir / fitdir / 'fitteddata.h5' inputfiledir = maindir / 'Origparams' (sensdict, simparams) = readconfigfile(configfile) paramslower = [ip.lower() for ip in params] Nt = len(times) Np = len(params) #Read in fitted data Ionofit = IonoContainer.readh5(str(ffit)) dataloc = Ionofit.Sphere_Coords pnames = Ionofit.Param_Names pnameslower = sp.array([ip.lower() for ip in pnames.flatten()]) p2fit = [ sp.argwhere(ip == pnameslower)[0][0] if ip in pnameslower else None for ip in paramslower ] time2fit = [None] * Nt # Have to fix this because of time offsets if times[0] == 0: times += Ionofit.Time_Vector[0, 0] for itn, itime in enumerate(times): filear = sp.argwhere(Ionofit.Time_Vector[:, 0] >= itime) if len(filear) == 0: filenum = len(Ionofit.Time_Vector) - 1 else: filenum = sp.argmin(sp.absolute(Ionofit.Time_Vector[:, 0] - itime)) time2fit[itn] = filenum times_int = [Ionofit.Time_Vector[i] for i in time2fit] # determine the beams angles = dataloc[:, 1:] rng = sp.unique(dataloc[:, 0]) b_arr = np.ascontiguousarray(angles).view( np.dtype((np.void, angles.dtype.itemsize * angles.shape[1]))) _, idx, invidx = np.unique(b_arr, return_index=True, return_inverse=True) beamlist = angles[idx] Nb = beamlist.shape[0] # Determine which imput files are to be used. dirlist = sorted(inputfiledir.glob('*.h5')) dirliststr = [str(i) for i in dirlist] sortlist, outime, outfilelist, timebeg, timelist_s = IonoContainer.gettimes( dirliststr) timelist = timebeg.copy() time2file = [None] * Nt time2intime = [None] * Nt # go through times find files and then times in files for itn, itime in enumerate(times): filear = sp.argwhere(timelist >= itime) if len(filear) == 0: filenum = [len(timelist) - 1] else: filenum = filear[0] flist1 = [] timeinflist = [] for ifile in filenum: filetimes = timelist_s[ifile] log1 = (filetimes[:, 0] >= times_int[itn][0]) & (filetimes[:, 0] < times_int[itn][1]) log2 = (filetimes[:, 1] > times_int[itn][0]) & (filetimes[:, 1] <= times_int[itn][1]) log3 = (filetimes[:, 0] <= times_int[itn][0]) & (filetimes[:, 1] > times_int[itn][1]) log4 = (filetimes[:, 0] > times_int[itn][0]) & (filetimes[:, 1] < times_int[itn][1]) curtimes1 = sp.where(log1 | log2 | log3 | log4)[0].tolist() flist1 = flist1 + [ifile] * len(curtimes1) timeinflist = timeinflist + curtimes1 time2intime[itn] = timeinflist time2file[itn] = flist1 nfig = int(sp.ceil(Nt * Nb)) imcount = 0 curfilenum = -1 # Loop for the figures for i_fig in range(nfig): lines = [None] * 2 labels = [None] * 2 (figmplf, axmat) = plt.subplots(int(sp.ceil(Np / 2)), 2, figsize=(20, 15), facecolor='w') axvec = axmat.flatten() # loop that goes through each axis loops through each parameter, beam # then time. for ax in axvec: if imcount >= Nt * Nb * Np: break imcount_f = float(imcount) itime = int(sp.floor(imcount_f / Nb / Np)) iparam = int(imcount_f / Nb - Np * itime) ibeam = int(imcount_f - (itime * Np * Nb + iparam * Nb)) curbeam = beamlist[ibeam] altlist = sp.sin(curbeam[1] * sp.pi / 180.) * rng curparm = paramslower[iparam] # Use Ne from input to compare the ne derived from the power. if curparm == 'nepow': curparm_in = 'ne' else: curparm_in = curparm curcoord = sp.zeros(3) curcoord[1:] = curbeam for iplot, filenum in enumerate(time2file[itime]): if curfilenum != filenum: curfilenum = filenum datafilename = dirlist[filenum] Ionoin = IonoContainer.readh5(str(datafilename)) if ('ti' in paramslower) or ('vi' in paramslower): Ionoin = maketi(Ionoin) pnames = Ionoin.Param_Names pnameslowerin = sp.array( [ip.lower() for ip in pnames.flatten()]) prmloc = sp.argwhere(curparm_in == pnameslowerin) if prmloc.size != 0: curprm = prmloc[0][0] # build up parameter vector bs the range values by finding the closest point in space in the input curdata = sp.zeros(len(rng)) for irngn, irng in enumerate(rng): curcoord[0] = irng tempin = Ionoin.getclosestsphere(curcoord)[0][ time2intime[itime]] Ntloc = tempin.shape[0] tempin = sp.reshape(tempin, (Ntloc, len(pnameslowerin))) curdata[irngn] = tempin[0, curprm] #actual plotting of the input data lines[0] = ax.plot(curdata, altlist, marker='o', c='b', linewidth=2)[0] labels[0] = 'Input Parameters' # Plot fitted data for the axis indxkep = np.argwhere(invidx == ibeam)[:, 0] curfit = Ionofit.Param_List[indxkep, time2fit[itime], p2fit[iparam]] rng_fit = dataloc[indxkep, 0] alt_fit = rng_fit * sp.sin(curbeam[1] * sp.pi / 180.) errorexist = 'n' + paramslower[iparam] in pnameslower if errorexist and werrors: eparam = sp.argwhere('n' + paramslower[iparam] == pnameslower)[0][0] curerror = Ionofit.Param_List[indxkep, time2fit[itime], eparam] lines[1] = ax.errorbar(curfit, alt_fit, xerr=curerror, fmt='-.', c='g', linewidth=2)[0] else: lines[1] = ax.plot(curfit, alt_fit, marker='o', c='g', linewidth=2)[0] labels[1] = 'Fitted Parameters' # get and plot the input data numplots = len(time2file[itime]) # set the limit for the parameter if curparm == 'vi': ax.set(xlim=[ -1.25 * sp.nanmax(sp.absolute(curfit)), 1.25 * sp.nanmax(sp.absolute(curfit)) ]) elif curparm_in != 'ne': ax.set(xlim=[ 0.75 * sp.nanmin(curfit), sp.minimum(1.25 * sp.nanmax(curfit), 8000.) ]) elif (curparm_in == 'ne') and nelog: ax.set_xscale('log') ax.set_xlabel(params[iparam]) ax.set_ylabel('Alt km') ax.set_title( '{0} vs Altitude, Time: {1}s Az: {2}$^o$ El: {3}$^o$'.format( params[iparam], times[itime], *curbeam)) imcount += 1 # save figure figmplf.suptitle(suptitle, fontsize=20) if None in labels: labels.remove(None) lines.remove(None) plt.figlegend(lines, labels, loc='lower center', ncol=5, labelspacing=0.) fname = filetemplate + '_{0:0>3}.png'.format(i_fig) plt.savefig(fname) plt.close(figmplf)
def bovy_dens2d(X, **kwargs): """ NAME: bovy_dens2d PURPOSE: plot a 2d density with optional contours INPUT: first argument is the density matplotlib.pyplot.imshow keywords (see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.imshow) xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed xrange yrange noaxes - don't plot any axes overplot - if True, overplot colorbar - if True, add colorbar shrink= colorbar argument: shrink the colorbar by the factor (optional) conditional - normalize each column separately (for probability densities, i.e., cntrmass=True) gcf=True does not start a new figure (does change the ranges and labels) Contours: justcontours - if True, only draw contours contours - if True, draw contours (10 by default) levels - contour-levels cntrmass - if True, the density is a probability and the levels are probability masses contained within the contour cntrcolors - colors for contours (single color or array) cntrlabel - label the contours cntrlw, cntrls - linewidths and linestyles for contour cntrlabelsize, cntrlabelcolors,cntrinline - contour arguments cntrSmooth - use ndimage.gaussian_filter to smooth before contouring onedhists - if True, make one-d histograms on the sides onedhistcolor - histogram color retAxes= return all Axes instances retCont= return the contour instance OUTPUT: plot to output device, Axes instances depending on input HISTORY: 2010-03-09 - Written - Bovy (NYU) """ overplot = kwargs.pop('overplot', False) gcf = kwargs.pop('gcf', False) if not overplot and not gcf: pyplot.figure() xlabel = kwargs.pop('xlabel', None) ylabel = kwargs.pop('ylabel', None) zlabel = kwargs.pop('zlabel', None) if 'extent' in kwargs: extent = kwargs.pop('extent') else: xlimits = kwargs.pop('xrange', [0, X.shape[1]]) ylimits = kwargs.pop('yrange', [0, X.shape[0]]) extent = xlimits + ylimits if not 'aspect' in kwargs: kwargs['aspect'] = (xlimits[1] - xlimits[0]) / float(ylimits[1] - ylimits[0]) noaxes = kwargs.pop('noaxes', False) justcontours = kwargs.pop('justcontours', False) if ('contours' in kwargs and kwargs['contours']) or \ 'levels' in kwargs or justcontours or \ ('cntrmass' in kwargs and kwargs['cntrmass']): contours = True else: contours = False kwargs.pop('contours', None) if 'levels' in kwargs: levels = kwargs['levels'] kwargs.pop('levels') elif contours: if 'cntrmass' in kwargs and kwargs['cntrmass']: levels = sc.linspace(0., 1., _DEFAULTNCNTR) elif True in sc.isnan(sc.array(X)): levels = sc.linspace(sc.nanmin(X), sc.nanmax(X), _DEFAULTNCNTR) else: levels = sc.linspace(sc.amin(X), sc.amax(X), _DEFAULTNCNTR) cntrmass = kwargs.pop('cntrmass', False) conditional = kwargs.pop('conditional', False) cntrcolors = kwargs.pop('cntrcolors', 'k') cntrlabel = kwargs.pop('cntrlabel', False) cntrlw = kwargs.pop('cntrlw', None) cntrls = kwargs.pop('cntrls', None) cntrSmooth = kwargs.pop('cntrSmooth', None) cntrlabelsize = kwargs.pop('cntrlabelsize', None) cntrlabelcolors = kwargs.pop('cntrlabelcolors', None) cntrinline = kwargs.pop('cntrinline', None) retCumImage = kwargs.pop('retCumImage', False) cb = kwargs.pop('colorbar', False) shrink = kwargs.pop('shrink', None) onedhists = kwargs.pop('onedhists', False) onedhistcolor = kwargs.pop('onedhistcolor', 'k') retAxes = kwargs.pop('retAxes', False) retCont = kwargs.pop('retCont', False) if onedhists: if overplot or gcf: fig = pyplot.gcf() else: fig = pyplot.figure() nullfmt = NullFormatter() # no labels # definitions for the axes left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left + width rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] axScatter = pyplot.axes(rect_scatter) axHistx = pyplot.axes(rect_histx) axHisty = pyplot.axes(rect_histy) # no labels axHistx.xaxis.set_major_formatter(nullfmt) axHistx.yaxis.set_major_formatter(nullfmt) axHisty.xaxis.set_major_formatter(nullfmt) axHisty.yaxis.set_major_formatter(nullfmt) fig.sca(axScatter) ax = pyplot.gca() ax.set_autoscale_on(False) if conditional: plotthis = X / sc.tile(sc.sum(X, axis=0), (X.shape[1], 1)) else: plotthis = X if not justcontours: out = pyplot.imshow(plotthis, extent=extent, **kwargs) if not overplot: pyplot.axis(extent) _add_axislabels(xlabel, ylabel) _add_ticks() #Add colorbar if cb and not justcontours: if shrink is None: shrink = sc.amin([float(kwargs.pop('aspect', 1.)) * 0.87, 1.]) CB1 = pyplot.colorbar(out, shrink=shrink) if not zlabel is None: if zlabel[0] != '$': thiszlabel = r'$' + zlabel + '$' else: thiszlabel = zlabel CB1.set_label(thiszlabel) if contours or retCumImage: aspect = kwargs.get('aspect', None) origin = kwargs.get('origin', None) if cntrmass: #Sum from the top down! plotthis[sc.isnan(plotthis)] = 0. sortindx = sc.argsort(plotthis.flatten())[::-1] cumul = sc.cumsum(sc.sort(plotthis.flatten())[::-1]) / sc.sum( plotthis.flatten()) cntrThis = sc.zeros(sc.prod(plotthis.shape)) cntrThis[sortindx] = cumul cntrThis = sc.reshape(cntrThis, plotthis.shape) else: cntrThis = plotthis if contours: if not cntrSmooth is None: cntrThis = ndimage.gaussian_filter(cntrThis, cntrSmooth, mode='nearest') cont = pyplot.contour(cntrThis, levels, colors=cntrcolors, linewidths=cntrlw, extent=extent, aspect=aspect, linestyles=cntrls, origin=origin) if cntrlabel: pyplot.clabel(cont, fontsize=cntrlabelsize, colors=cntrlabelcolors, inline=cntrinline) if noaxes: ax.set_axis_off() #Add onedhists if not onedhists: if retCumImage: return cntrThis elif retAxes: return pyplot.gca() elif retCont: return cont elif justcontours: return cntrThis else: return out histx = sc.nansum(X.T, axis=1) * m.fabs(ylimits[1] - ylimits[0]) / X.shape[ 1] #nansum bc nan is *no dens value* histy = sc.nansum(X.T, axis=0) * m.fabs(xlimits[1] - xlimits[0]) / X.shape[0] histx[sc.isnan(histx)] = 0. histy[sc.isnan(histy)] = 0. dx = (extent[1] - extent[0]) / float(len(histx)) axHistx.plot(sc.linspace(extent[0] + dx, extent[1] - dx, len(histx)), histx, drawstyle='steps-mid', color=onedhistcolor) dy = (extent[3] - extent[2]) / float(len(histy)) axHisty.plot(histy, sc.linspace(extent[2] + dy, extent[3] - dy, len(histy)), drawstyle='steps-mid', color=onedhistcolor) axHistx.set_xlim(axScatter.get_xlim()) axHisty.set_ylim(axScatter.get_ylim()) axHistx.set_ylim(0, 1.2 * sc.amax(histx)) axHisty.set_xlim(0, 1.2 * sc.amax(histy)) if retCumImage: return cntrThis elif retAxes: return (axScatter, axHistx, axHisty) elif justcontours: return cntrThis else: return out
def bovy_dens2d(X, **kwargs): """ NAME: bovy_dens2d PURPOSE: plot a 2d density with optional contours INPUT: first argument is the density matplotlib.pyplot.imshow keywords (see http://matplotlib.sourceforge.net/api/axes_api.html#matplotlib.axes.Axes.imshow) xlabel - (raw string!) x-axis label, LaTeX math mode, no $s needed ylabel - (raw string!) y-axis label, LaTeX math mode, no $s needed xrange yrange noaxes - don't plot any axes overplot - if True, overplot colorbar - if True, add colorbar shrink= colorbar argument: shrink the colorbar by the factor (optional) Contours: contours - if True, draw contours (10 by default) levels - contour-levels cntrmass - if True, the density is a probability and the levels are probability masses contained within the contour cntrcolors - colors for contours (single color or array) cntrlabel - label the contours cntrlw, cntrls - linewidths and linestyles for contour cntrlabelsize, cntrlabelcolors,cntrinline - contour arguments onedhists - if True, make one-d histograms on the sides onedhistcolor - histogram color retAxes= return all Axes instances OUTPUT: HISTORY: 2010-03-09 - Written - Bovy (NYU) """ if kwargs.has_key('overplot'): overplot = kwargs['overplot'] kwargs.pop('overplot') else: overplot = False if not overplot: pyplot.figure() if kwargs.has_key('xlabel'): xlabel = kwargs['xlabel'] kwargs.pop('xlabel') else: xlabel = None if kwargs.has_key('ylabel'): ylabel = kwargs['ylabel'] kwargs.pop('ylabel') else: ylabel = None if kwargs.has_key('zlabel'): zlabel = kwargs['zlabel'] kwargs.pop('zlabel') else: zlabel = None if kwargs.has_key('extent'): extent = kwargs['extent'] kwargs.pop('extent') else: if kwargs.has_key('xrange'): xlimits = list(kwargs['xrange']) kwargs.pop('xrange') else: xlimits = [0, X.shape[0]] if kwargs.has_key('yrange'): ylimits = list(kwargs['yrange']) kwargs.pop('yrange') else: ylimits = [0, X.shape[1]] extent = xlimits + ylimits if not kwargs.has_key('aspect'): kwargs['aspect'] = (xlimits[1] - xlimits[0]) / float(ylimits[1] - ylimits[0]) if kwargs.has_key('noaxes'): noaxes = kwargs['noaxes'] kwargs.pop('noaxes') else: noaxes = False if (kwargs.has_key('contours') and kwargs['contours']) or \ kwargs.has_key('levels') or \ (kwargs.has_key('cntrmass') and kwargs['cntrmass']): contours = True else: contours = False if kwargs.has_key('contours'): kwargs.pop('contours') if kwargs.has_key('levels'): levels = kwargs['levels'] kwargs.pop('levels') elif contours: if kwargs.has_key('cntrmass') and kwargs['cntrmass']: levels = sc.linspace(0., 1., _DEFAULTNCNTR) elif True in sc.isnan(sc.array(X)): levels = sc.linspace(sc.nanmin(X), sc.nanmax(X), _DEFAULTNCNTR) else: levels = sc.linspace(sc.amin(X), sc.amax(X), _DEFAULTNCNTR) if kwargs.has_key('cntrmass') and kwargs['cntrmass']: cntrmass = True kwargs.pop('cntrmass') else: cntrmass = False if kwargs.has_key('cntrmass'): kwargs.pop('cntrmass') if kwargs.has_key('cntrcolors'): cntrcolors = kwargs['cntrcolors'] kwargs.pop('cntrcolors') elif contours: cntrcolors = 'k' if kwargs.has_key('cntrlabel') and kwargs['cntrlabel']: cntrlabel = True kwargs.pop('cntrlabel') else: cntrlabel = False if kwargs.has_key('cntrlabel'): kwargs.pop('cntrlabel') if kwargs.has_key('cntrlw'): cntrlw = kwargs['cntrlw'] kwargs.pop('cntrlw') elif contours: cntrlw = None if kwargs.has_key('cntrls'): cntrls = kwargs['cntrls'] kwargs.pop('cntrls') elif contours: cntrls = None if kwargs.has_key('cntrlabelsize'): cntrlabelsize = kwargs['cntrlabelsize'] kwargs.pop('cntrlabelsize') elif contours: cntrlabelsize = None if kwargs.has_key('cntrlabelcolors'): cntrlabelcolors = kwargs['cntrlabelcolors'] kwargs.pop('cntrlabelcolors') elif contours: cntrlabelcolors = None if kwargs.has_key('cntrinline'): cntrinline = kwargs['cntrinline'] kwargs.pop('cntrinline') elif contours: cntrinline = None if kwargs.has_key('retCumImage'): retCumImage = kwargs['retCumImage'] kwargs.pop('retCumImage') else: retCumImage = False if kwargs.has_key('colorbar'): cb = kwargs['colorbar'] kwargs.pop('colorbar') else: cb = False if kwargs.has_key('shrink'): shrink = kwargs['shrink'] kwargs.pop('shrink') else: shrink = None if kwargs.has_key('onedhists'): onedhists = kwargs['onedhists'] kwargs.pop('onedhists') else: onedhists = False if kwargs.has_key('onedhistcolor'): onedhistcolor = kwargs['onedhistcolor'] kwargs.pop('onedhistcolor') else: onedhistcolor = 'k' if kwargs.has_key('retAxes'): retAxes = kwargs['retAxes'] kwargs.pop('retAxes') else: retAxes = False if onedhists: if overplot: fig = pyplot.gcf() else: fig = pyplot.figure() nullfmt = NullFormatter() # no labels # definitions for the axes left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left + width rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] axScatter = pyplot.axes(rect_scatter) axHistx = pyplot.axes(rect_histx) axHisty = pyplot.axes(rect_histy) # no labels axHistx.xaxis.set_major_formatter(nullfmt) axHistx.yaxis.set_major_formatter(nullfmt) axHisty.xaxis.set_major_formatter(nullfmt) axHisty.yaxis.set_major_formatter(nullfmt) fig.sca(axScatter) ax = pyplot.gca() ax.set_autoscale_on(False) out = pyplot.imshow(X, extent=extent, **kwargs) pyplot.axis(extent) _add_axislabels(xlabel, ylabel) _add_ticks() #Add colorbar if cb: if shrink is None: if kwargs.has_key('aspect'): shrink = sc.amin([float(kwargs['aspect']) * 0.87, 1.]) else: shrink = 0.87 CB1 = pyplot.colorbar(out, shrink=shrink) if not zlabel is None: if zlabel[0] != '$': thiszlabel = r'$' + zlabel + '$' else: thiszlabel = zlabel CB1.set_label(zlabel) if contours or retCumImage: if kwargs.has_key('aspect'): aspect = kwargs['aspect'] else: aspect = None if kwargs.has_key('origin'): origin = kwargs['origin'] else: origin = None if cntrmass: #Sum from the top down! X[sc.isnan(X)] = 0. sortindx = sc.argsort(X.flatten())[::-1] cumul = sc.cumsum(sc.sort(X.flatten())[::-1]) / sc.sum(X.flatten()) cntrThis = sc.zeros(sc.prod(X.shape)) cntrThis[sortindx] = cumul cntrThis = sc.reshape(cntrThis, X.shape) else: cntrThis = X if contours: cont = pyplot.contour(cntrThis, levels, colors=cntrcolors, linewidths=cntrlw, extent=extent, aspect=aspect, linestyles=cntrls, origin=origin) if cntrlabel: pyplot.clabel(cont, fontsize=cntrlabelsize, colors=cntrlabelcolors, inline=cntrinline) if noaxes: ax.set_axis_off() #Add onedhists if not onedhists: if retCumImage: return cntrThis elif retAxes: return pyplot.gca() else: return out histx = sc.nansum(X.T, axis=1) * m.fabs(ylimits[1] - ylimits[0]) / X.shape[ 1] #nansum bc nan is *no dens value* histy = sc.nansum(X.T, axis=0) * m.fabs(xlimits[1] - xlimits[0]) / X.shape[0] histx[sc.isnan(histx)] = 0. histy[sc.isnan(histy)] = 0. dx = (extent[1] - extent[0]) / float(len(histx)) axHistx.plot(sc.linspace(extent[0] + dx, extent[1] - dx, len(histx)), histx, drawstyle='steps-mid', color=onedhistcolor) dy = (extent[3] - extent[2]) / float(len(histy)) axHisty.plot(histy, sc.linspace(extent[2] + dy, extent[3] - dy, len(histy)), drawstyle='steps-mid', color=onedhistcolor) axHistx.set_xlim(axScatter.get_xlim()) axHisty.set_ylim(axScatter.get_ylim()) axHistx.set_ylim(0, 1.2 * sc.amax(histx)) axHisty.set_xlim(0, 1.2 * sc.amax(histy)) if retCumImage: return cntrThis elif retAxes: return (axScatter, axHistx, axHisty) else: return out