def get_PDZerrDistribution(hdf5file, bpzfile, columns): """ It returns the error distribution based on PDZs. --- hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.hdf5' bpzfile = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.bpz' columns = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.columns' """ ids, zb, zs, mo = U.get_data(bpzfile, (0, 1, 11, 12)) # ids,zb,zs,mo = U.get_data(bpzfile,(0,1,9,10)) good = N.greater(abs(mo), 17.) * N.less(abs(mo), 25.) # good = N.greater(abs(mo),22.)*N.less(abs(mo),23.) ids, zb, zs, mo = U.multicompress(good, (ids, zb, zs, mo)) ng = len(ids) #Readin the PDZs... p = h5py.File(hdf5file, mode='r') pdzo = p.get('FullProbability') pdz = pdzo[good, :, :] zz = p.get('redshift')[:] dz = zz[2] - zz[1] basez2 = N.arange(-0.1, 0.1, dz) basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.) nz = len(basez2) delta_z_pdzs = N.zeros(nz - 1) # Computing the z error distr. function # based on peak values. delta_z_peaks = (zb - zs) / (1. + zs) a1, a2 = N.histogram(delta_z_peaks, basez2) for ii in range(ng): pdz_mot = U.sum(pdz[ii, :, :], axis=1) delta_z_pdzs += U.match_resol(zz - zb[ii], pdz_mot, basez2b) plt.figure(12, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.subplot(211) plt.plot(basez2b, a1 / float(sum(a1)), 'b-', lw=12, alpha=0.6) plt.plot(basez2b, delta_z_pdzs / float(sum(delta_z_pdzs)), 'r-', lw=5, alpha=0.9) plt.grid() plt.xlim(-0.1, 0.1) plt.ylabel('P(z)', size=20, labelpad=+1) plt.legend(['peaks', 'pdfs'], loc='upper left', fontsize=20) plt.subplot(212) resi = 2 plt.plot( basez2b[::resi], abs((a1[::resi] / float(sum(a1))) - (delta_z_pdzs[::resi] / float(sum(delta_z_pdzs)))), 'k-') plt.grid() plt.xlim(-0.1, 0.1) plt.xlabel('$\delta_{z}$', size=30)
def compute_Wittman_CIs(hdf5file, zs): """ It calculates the (HPD) CI intervals and F(C) for galaxies with zs redshift values. """ verbose = 0 p = h5py.File(hdf5file, mode='r') pdz = p.get('FullProbability') zz = p.get('redshift')[:] ngal = N.shape(pdz)[0] print 'N.shape(pdz)', N.shape(pdz) if ngal <> len(zs): print 'Dimension missmatch!' sys.exit() ci_values = N.zeros(ngal) for ii in range(ngal): pdz_gal = U.sum(pdz[ii, :, :], axis=1) # dz = 0.001 # x=N.arange(-3.*sigma_g,3.*sigma_g+dz/100.,dz) # sigma_g=0.02 # gaus = N.exp(-(x/sigma_g)**2) # pepe = N.convolve(pdz_gal,gaus,1) pdz_gal /= pdz_gal.sum() #Norm the distr. if verbose: print 'PDF-size', N.shape(pdz_gal) print 'z-length', len(zz) dz = abs(zz - zs[ii]) pos_z = N.where(dz == min(dz))[0][0] pdz_th_value = pdz_gal[pos_z] if verbose: print 'pdz_th_value', pdz_th_value good_z_ranges = N.greater_equal(pdz_gal, pdz_th_value) pdz_r = N.compress(good_z_ranges, pdz_gal) ci_values[ii] = pdz_r.sum() #print pdz_r.sum() #if ii<10: # plt.clf() # plt.plot(zz,pdz_gal,'k-') # plt.plot(zz[good_z_ranges],pdz_gal[good_z_ranges],'-r.') # plt.plot(zz[pos_z],pdz_gal[pos_z],'go',ms=10) #pausa = raw_input('paused') return ci_values
def comparing_populations_HDF5(hdf5file): p = h5py.File(hdf5file, mode='r') pdz = p.get('FullProbability') # pdz = p.get('Likelihood') zz = p.get('redshift')[:] tt = p.get('type')[:] nz = len(zz) ng = N.shape(pdz)[0] probs = U.zeros((nz, 2), float) for ii in range(ng): pepe1 = U.sum(pdzr[ii, :, 0:35], axis=1) pepe2 = U.sum(pdzr[ii, :, 36:], axis=1) pepe3 = (U.sum(pepe1) + U.sum(pepe2)) * 1. # Normalized PDZs pdz1 = pepe1 / pepe3 pdz2 = pepe2 / pepe3 probs[:, 0] += pdz1 probs[:, 1] += pdz2 plt.plot(zz[::30], probs[::30, 0] * 275., 'r-', zz[::30], probs[::30, 1] * 275, 'b-', lw=5) plt.xlim(0., 1.5) for ii in range(ng): pepe1 = U.sum(paca[ii, :, 0:35], axis=1) pepe2 = U.sum(paca[ii, :, 36:], axis=1) pepe3 = (U.sum(pepe1) + U.sum(pepe2)) * 1. # Normalized PDZs pdz1 = pepe1 / pepe3 pdz2 = pepe2 / pepe3 probs[:, 0] += pdz1 probs[:, 1] += pdz2
pepe[ss, :, :] / (pepe[ss, :, :].max()), 0) except: print 'Impossible to read: ', os.path.basename(hdf5_files[ii]) #for ss in range(nm): # full_table[ss,:,:] = final_mat[ss,:,:] fp_file.close() #if plots: plt.figure(22, figsize=(13, 6), dpi=70, facecolor='w', edgecolor='k') plt.clf() for ss in range(4): plt.subplot(1, 4, ss + 1) if ss < 1: yo = U.sum(final_mat[0:3, 50:-100, :], axis=0) else: yo = final_mat[3 + ss, 50:-100, :] yo2 = N.where(yo < minval, 0., yo) print yo2.min() print yo2.max() print '' plt.contour(zz[50:-100], tt, N.log10(yo2).T, 800, linewidths=2, vmin=-1.0, vmax=1.) if ss < 1: plt.title('R$\leq$%s' % (m[ss + 3]), size=20)
def get_PDZerrDistribution_byMagnitudes(hdf5file, bpzfile, columns): """ It returns the error distribution based on PDZs. --- import alhambrahdf5 as AH #hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.hdf5' #bpzfile = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.bpz' #columns = '/Users/albertomolino/doctorado/photo/catalogos/reduction_v5/GOLD/alhambragold.columns' hdf5file = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.hdf5' bpzfile = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.bpz' columns = '/Users/albertomolino/doctorado/photo/catalogos/specz/spzPDZs/alhambra.spz.columns' basez2b,delta_z_peaks,delta_z_pdzs = AH.get_PDZerrDistribution_byMagnitudes(hdf5file,bpzfile,columns) """ basem = N.arange(18, 26, 2) # basem = N.arange(18,25,2) nm = len(basem) ids, zb, zs, mo = U.get_data(bpzfile, (0, 1, 11, 12)) # ids,zb,zs,mo = U.get_data(bpzfile,(0,1,9,10)) #Readin the PDZs... p = h5py.File(hdf5file, mode='r') pdzo = p.get('FullProbability') zz = p.get('redshift')[:] dz = zz[2] - zz[1] basez2 = N.arange(-0.1, 0.1, dz) basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.) nz = len(basez2) # Defining the final outputs. delta_z_pdzs = N.zeros((nm - 1, nz - 1), float) delta_z_peaks = N.zeros((nm - 1, nz - 1), float) for ii in range(nm - 1): good = N.greater_equal(mo, basem[ii]) * N.less_equal(mo, basem[ii + 1]) idr, zbr, zsr, mor = U.multicompress(good, (ids, zb, zs, mo)) ng = len(idr) pdz = pdzo[good, :, :] # Computing the z error distr. function # based on peak values. temporal_delta_z_peaks = (zbr - zsr) / (1. + zsr) a1, a2 = N.histogram(temporal_delta_z_peaks, basez2) delta_z_peaks[ii, :] = a1[:] for jj in range(ng): pdz_mot = U.sum(pdz[jj, :, :], axis=1) delta_z_pdzs[ii, :] += U.match_resol(zz - zbr[jj], pdz_mot, basez2b) # plt.figure(12, figsize = (8.5,10.),dpi=80, facecolor='w', edgecolor='k') # plt.clf() # plt.subplot(211) # plt.plot(basez2b,a1/float(sum(a1)),'b-',lw=12,alpha=0.6) # plt.plot(basez2b,delta_z_pdzs/float(sum(delta_z_pdzs)),'r-',lw=5,alpha=0.9) # plt.grid() # plt.xlim(-0.1,0.1) # plt.ylabel('P(z)',size=20,labelpad=+1) # plt.legend(['peaks','pdfs'],loc='upper left',fontsize=20) # plt.subplot(212) # resi = 2 # plt.plot(basez2b[::resi],abs((a1[::resi]/float(sum(a1)))-(delta_z_pdzs[::resi]/float(sum(delta_z_pdzs)))),'k-') # plt.grid() # plt.xlim(-0.1,0.1) # plt.xlabel('$\delta_{z}$',size=30) return basez2b, delta_z_peaks, delta_z_pdzs
def alhambra_get2Dmatrix_HDF5_likelihood(inputfile, cond, finalname=None, normed=1): """ Given a probs class, plot z versus T density plot. import alhambrahdf5 from alhambrahdf5 import * inputfile='/Volumes/CLASH/ALHAMBRA/f02p02_colorproext_1_ISO_phz_eB10.hdf5' bpz = '/Volumes/CLASH/ALHAMBRA/f02p02_colorproext_1_ISO_phz_eB11.Prior1peak.bpz' ids,mo = U.get_data(bpz,(0,11)) good = U.greater_equal(mo,18.) * U.less_equal(mo,23.) finalname='/Users/amb/Desktop/testmat/f02p02c01.18m25.norm.mat' mat = alhambra_get2Dmatrix_HDF5_likelihood(inputfile,good,finalname,1) ----------- """ p = h5py.File(inputfile, mode='r') pdz = p.get('/Probs_z_T/Full_Probability') z = p.get('/Probs_z_T/redshift') tt = p.get('/Probs_z_T/type') ll = p.get('/Probs_z_T/Likelihood') # Example: pdz.shape (9651, 7000, 81) no = pdz.shape[0] nz = pdz.shape[1] nt = pdz.shape[2] kk = 0 for ii in range(no): print '%i out of %i' % (ii + 1, no - 1) if cond[ii] == True: if kk < 1: if normed == 1: pepe = U.sum(ll[ii, :, :], axis=1) xx = pepe / U.sum(pepe) elif normed == 2: for ss in range(81): a = ll[ii, :, ss] if a.sum() > 1.0e-30: b = a / sum(a) else: xx = lik[ii, :, :] else: if normed == 1: pepe = U.sum(ll[ii, :, :], axis=1) xx += pepe / U.sum(pepe) elif normed == 2: for ss in range(81): a = ll[ii, :, ss] if a.sum() > 1.0e-30: b += a / sum(a) else: xx += ll[ii, :, :] kk += 1 if normed == 2: xx = b / b.sum() if finalname == None: outname = inputfile + '.mat' else: outname = finalname # U.put_2Darray(outname,xx) return xx
idpos = 0 hdf5file = '/Users/albertomolino/Desktop/CLASH/SN_Colfax/colfaxHost.hdf5' p = h5py.File(hdf5file, mode='r') pdf = p.get('Likelihood') z = p.get('redshift') zz = z[:] t = p.get('type') tt = t[:] deltazz = [zz[1] - zz[0]] deltatt = [tt[1] - tt[0]] # deltazz2 = deltazz[0]/2. # deltatt2 = deltatt[0]/2. basez = U.arange(zz.min(), zz.max() + deltazz, deltazz) baset = U.arange(tt.min(), tt.max() + deltatt, deltatt) matris = pdf[idpos, :, :] temps = U.sum(pdf[idpos, :, :], axis=0) reds = U.sum(pdf[idpos, :, :], axis=1) plt.figure(15, figsize=(12., 9.5), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.ion() plt.show() nullfmt = plt.NullFormatter() # no labels left, width = 0.1, 0.65 bottom, height = 0.1, 0.65 bottom_h = left_h = left + width + 0.02 rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.2] rect_histy = [left_h, bottom, 0.2, height] plt.figure(15, figsize=(8.5, 7.5), dpi=80, facecolor='w', edgecolor='k') axScatter = plt.axes(rect_scatter)
def test_use(self): self.assertLess(useful.sum(1,3),5)
std_value[ii, jj] = float(pepa[2]) out_value[ii, jj] = float(pepa[3]) num_sourc[ii, jj] = float(pepa[4]) except: med_value[ii, jj] = 0.0 std_value[ii, jj] = 0.0 out_value[ii, jj] = 0.0 num_sourc[ii, jj] = 0.0 print 'Model med std out num ' for jj in range(n_models): good = N.greater(std_value[:, jj], 0.0001) linea = '%i, %.3f, %.3f, ' % (jj + 1, U.mean_robust( med_value[good, jj]), U.mean_robust(std_value[good, jj])) linea += '%.3f %i ' % (U.mean_robust( out_value[good, jj]), U.sum(num_sourc[good, jj])) print linea base_sz = N.arange(0.0005, 0.095, 0.015) base_sz_2 = N.arange(-0.095, 0.095, 0.015) plt.clf() for ss in range(n_models): plt.subplot(3, 4, ss + 1) good = N.greater(std_value[:, ss], 0.0001) a1, a2, a3 = plt.hist(std_value[good, ss], base_sz, alpha=0.5, normed=1) b1, b2, b3 = plt.hist(med_value[good, ss], base_sz_2, facecolor='red', alpha=0.5, normed=1) plt.legend([
def global_PDZ(hdf5file, m_max): """ It returns the global P(z) """ p = h5py.File(hdf5file, mode='r') # pdz = p.get('FullProbability') pdz = p.get('Likelihood') z = p.get('redshift') mmm = p.get('m_0')[:] good_sample = N.less_equal(abs(mmm), m_max) pdz = pdz[good_sample, :, :] ngal = N.shape(pdz)[0] globalpdz_red = N.zeros(len(z)) globalpdz_blue = N.zeros(len(z)) globalpdz3_all = N.zeros(len(z)) for ii in range(ngal): try: pepe1 = U.sum(pdz[ii, :, 0:75], axis=1) except: pepe1 = z * 0. try: pepe2 = U.sum(pdz[ii, :, 75:], axis=1) except: pepe2 = z * 0. try: pepe66 = U.sum(pdz[ii, :, :], axis=1) except: pepe66 = z * 0. try: pepe3 = (U.sum(pepe1) + U.sum(pepe2)) * 1. except: pepe3 = z * 0. if ii == 0: try: globalpdz_red = (pepe1 / pepe3) except: globalpdz_red = z * 0. try: globalpdz_blue = (pepe2 / pepe3) except: globalpdz_blue = z * 0. # try: globalpdz3_all = pepe66 try: globalpdz3_all = (pepe66 / pepe66.sum()) except: globalpdz_all = z * 0. else: try: globalpdz_red += (pepe1 / pepe3) except: globalpdz_red += z * 0. try: globalpdz_blue += (pepe2 / pepe3) except: globalpdz_blue += z * 0. try: globalpdz3_all += (pepe66 / pepe66.sum()) # try: globalpdz3_all += pepe66 except: globalpdz_all += z * 0. return z, globalpdz_red / (ngal * 1.), globalpdz_blue / ( ngal * 1.), globalpdz3_all / (ngal * 1.)
def getPDF_by_mag_and_weights(hdf5file, m_max, weights): """ It returns the global P(z) --- m_mag: maximum magnitude to be considered. weights: vector with P(det|gal). """ p = h5py.File(hdf5file, mode='r') pdz = p.get('FullProbability') # pdz = p.get('Likelihood') z = p.get('redshift') mmm = p.get('m_0')[:] good_sample = N.less_equal(mmm, m_max) pdz = pdz[good_sample, :, :] ngal = N.shape(pdz)[0] weights_redu = weights[good_sample] globalpdz_red = N.zeros(len(z)) globalpdz_blue = N.zeros(len(z)) globalpdz3_all = N.zeros(len(z)) for ii in range(ngal): try: pepe1 = U.sum(pdz[ii, :, 0:75], axis=1) except: pepe1 = z * 0. try: pepe2 = U.sum(pdz[ii, :, 75:], axis=1) except: pepe2 = z * 0. try: pepe66 = U.sum(pdz[ii, :, :], axis=1) except: pepe66 = z * 0. try: pepe3 = (U.sum(pepe1) + U.sum(pepe2)) * 1. except: pepe3 = z * 0. if ii == 0: try: globalpdz_red = (pepe1 / pepe3) * weights_redu[ii] except: globalpdz_red = z * 0. try: globalpdz_blue = (pepe2 / pepe3) * weights_redu[ii] except: globalpdz_blue = z * 0. # try: globalpdz3_all = pepe66 try: globalpdz3_all = (pepe66 / pepe66.sum()) * weights_redu[ii] except: globalpdz_all = z * 0. else: try: globalpdz_red += (pepe1 / pepe3) * weights_redu[ii] except: globalpdz_red += z * 0. try: globalpdz_blue += (pepe2 / pepe3) * weights_redu[ii] except: globalpdz_blue += z * 0. try: globalpdz3_all += (pepe66 / pepe66.sum()) * weights_redu[ii] # try: globalpdz3_all += pepe66 except: globalpdz3_all += z * 0. return z, globalpdz_red, globalpdz_blue, globalpdz3_all, ngal
def getPDF_by_mag_templates_and_weights(hdf5file, m_max, weights): """ It returns the global P(z) for each template individually. --- m_mag: maximum magnitude to be considered. weights: vector with P(det|gal). """ p = h5py.File(hdf5file, mode='r') # pdz = p.get('FullProbability') pdz = p.get('Likelihood') z = p.get('redshift') mmm = p.get('m_0')[:] #good_m_sample = N.less_equal(mmm,m_max) #pdz = pdz[good_m_sample,:,:] ngal = N.shape(pdz)[0] #weights_redu = weights[good_m_sample] weights_redu = weights * 1. tts = p.get('type')[:] tbs = N.unique(tts.astype(int)) # indiv integers. ntb = len(tbs) global_pdfs = N.zeros((len(z), ntb), 'float') ngal = 20 for ii in range(ngal): # I need to separate the PDF per each type. print 'Analyzing galaxy %i' % (ii + 1) for uu in range(ntb): if uu < 1: good_T_sample = N.less_equal(tts, tbs[uu + 1] - 0.5) elif ii == ntb - 1: good_T_sample = N.greater_equal(tts, tbs[uu] - 0.5) else: good_T_sample = N.greater_equal(tts, tbs[uu] - 0.5) good_T_sample *= N.less_equal(tts, tbs[uu] + 0.5) # Global PDF_ii global_pdf_galaxy_ii = U.sum(pdz[ii, :, :], axis=1) # Now PDF_ii for each template (uu: good_T_sample). if ii < 1: global_pdfs[:, uu] = U.sum(pdz[ii, :, good_T_sample], axis=1) if global_pdfs[:, uu].sum() > 0.: global_pdfs[:, uu] /= (1. * global_pdf_galaxy_ii) # Normalize global_pdfs[:, uu] *= weights_redu[ ii] # Probability of being galaxy. else: global_pdfs[:, uu] = N.zeros(len(z)) else: global_pdfs[:, uu] += U.sum(pdz[ii, :, good_T_sample], axis=1) if global_pdfs[:, uu].sum() > 0.: global_pdfs[:, uu] /= (1. * global_pdf_galaxy_ii) # Normalize global_pdfs[:, uu] *= weights_redu[ ii] # Probability of being galaxy. else: global_pdfs[:, uu] += N.zeros(len(z)) return z, global_pdfs
def get_PDZerrDistribution_byTemplates(hdf5file, bpzfile, m_max): """ It returns the error distribution based on PDZs. --- import splus_s82_hdf5_tools as to root = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/' root += 'S82/Dec2017/splus_cats_NGSL/' hdf5list = root+'hdf5.list' bpzlist = root+'bpz/master.STRIPE82_Photometry.m21.bpz.list' hdf5_files = U.get_str(hdf5list,0) n_hdf5 = len(hdf5_files) bpz_files = U.get_str(bpzlist,0) n_bpz = len(bpz_files) for ii in range(n_bpz): name = os.path.basename(hdf5_files[ii]) print name try: z,dp,df = to.get_PDZerrDistribution_byTemplates(hdf5_files[ii],bpz_files[ii],19) except: print 'Impossible to run on ',name """ plots = 1 # starting plots if necessary if plots: plt.figure(12, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k') try: ids, zb, zs, mo, tb, odd = U.get_data(bpzfile, (0, 1, 11, 12, 4, 5)) except: ids, zb, zs, mo, tb, odd = U.get_data(bpzfile, (0, 1, 9, 10, 4, 5)) good = N.less_equal(mo, m_max) ids, zb, zs, mo, tb, odd = U.multicompress(good, (ids, zb, zs, mo, tb, odd)) ng = len(ids) #Readin the PDZs... p = h5py.File(hdf5file, mode='r') #pdzo = p.get('FullProbability') pdz = p.get('Likelihood') pdz = pdz[good, :, :] zz = p.get('redshift')[:] dz = (zz[2] - zz[1]) * 100. basez2 = N.arange(-0.2, 0.2, dz) basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.) nz = len(basez2) res = 1 # Computing the z error distr. function # based on peak values. delta_z_peaks = (zb - zs) / (1. + zs) a1, a2 = N.histogram(delta_z_peaks, basez2) delta_z_pdzs = N.zeros(nz - 1) for ii in range(ng): pdz_mot = U.sum(pdz[ii, :, :], axis=1) pdz_mot_peak = pdz_mot / float(max(pdz_mot)) # To get rid of long tails in PDFs with low probabilities. pdz_mot_peak = N.where(pdz_mot_peak < 1.0e-4, 0., pdz_mot_peak) pdz_mot_norm = pdz_mot_peak / float(sum(pdz_mot_peak)) pdz_mot_norm = N.where(pdz_mot_norm < 0., 0., pdz_mot_norm) #pdz_mot_norm = pdz_mot/float(sum(pdz_mot)) pdz_mot_norm_resample = U.match_resol(zz - zs[ii], pdz_mot_norm, basez2b) pdz_mot_norm_resample = N.where(pdz_mot_norm_resample < 0., 0., pdz_mot_norm_resample) delta_z_pdzs += pdz_mot_norm_resample[:] """ if plots: plt.clf() plt.subplot(121) peak_zb_pos = N.argmax(pdz_mot_norm[::res]) print zz[peak_zb_pos] plt.plot(zz[::res]-zs[ii],pdz_mot_norm[::res],'-',lw=5,alpha=0.6) #plt.plot(zz[::res]-zz[peak_zb_pos],pdz_mot_norm[::res],'-',lw=5,alpha=0.6) plt.grid() plt.xlim(-0.2,0.2) #plt.ylim(0.001,0.1) plt.xlabel('$\delta_{z}$',size=30) plt.ylabel('P(z)',size=20,labelpad=+1) plt.legend(['R=%.2f''\n''T=%.1f''\n''O=%.1f'%(mo[ii],tb[ii],odd[ii])],loc='upper right') plt.title('zb = %.2f, zs = %.2f, dz/1+z = %.2f'%(zb[ii],zs[ii],delta_z_peaks[ii]),size=20) plt.subplot(122) plt.plot(basez2b,delta_z_pdzs,'k-',lw=5) plt.grid() plt.xlim(-0.2,0.2) #plt.ylim(0.001,0.1) plt.xlabel('$\delta_{z}$',size=30) plt.ylabel('P(z)',size=20,labelpad=+1) pausa = raw_input('press a bottom to continue') """ # New variables to handle data easily. # It scales the normalized PDFs by the ng! norm_dz_peaks = a1 / float(sum(a1)) norm_dz_pdfs = delta_z_pdzs / float(sum(delta_z_pdzs)) if plots: plt.figure(11, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k') plt.clf() #plt.subplot(212) plt.plot(basez2b, norm_dz_peaks, 'b-', lw=8, alpha=0.6) plt.plot(basez2b, norm_dz_pdfs, 'r-', lw=5, alpha=0.9) plt.grid() plt.xlim(-0.2, 0.2) plt.ylabel('P(z)', size=20, labelpad=+1) plt.legend(['peaks', 'pdfs'], loc='upper left', fontsize=20) plt.xlabel('$\delta_{z}$', size=30) plot_filename = hdf5file[:-4] + 'deltaz.mmax%.2fAB.png' % (m_max) plt.savefig(plot_filename, dpi=80) # Saving data into a file. output_filename = hdf5file[:-4] + 'deltaz.mmax%.2fAB.mat' % (m_max) U.put_data(output_filename, (basez2b, norm_dz_peaks, norm_dz_pdfs), 'z dz_peak dz_PDFs') return basez2b, norm_dz_peaks, norm_dz_pdfs
filters = [ 'u', 'J0378', 'J0395', 'J0410', 'J0430', 'g', 'J0515', 'r', 'J0660', 'i', 'J0861', 'z' ] base_filtros = N.arange(12) + 1 final_zpo = N.zeros((n_cats, 12), float) final_zpe = N.zeros((n_cats, 12), float) for ii in range(n_cats): cali_columns = cats_names[ii][:-4] + '.spz.z05.%s_cali.columns' % ( aperture) final_zpe[ii, :], final_zpo[ii, :] = U.get_data(cali_columns, (3, 4), 12) pepe = U.sum(final_zpe, axis=0) / (1. * n_cats) pepo = U.sum(final_zpo, axis=0) / (1. * n_cats) plt.figure(1, figsize=(14, 8), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.errorbar(base_filtros, pepo - pepo[7], pepe, fmt="-rs", alpha=0.5, ms=10, lw=7) for ii in range(n_cats): plt.plot(base_filtros, final_zpo[ii, :] - final_zpo[ii, 7], 'k.',