def mpi_run(self, snr_min, snr_max, central_data, neighbour_data, nreal, fatal_errors): # Selection function for this bin sel = (central_data.res["snr"] > snr_min) & (central_data.res["snr"] < snr_max) print "Will do %d realisations for this bin:" % nreal De1 = [] De2 = [] sn = [] g1 = [] g2 = [] for j in xrange(nreal): print "%d/%d" % (j + 1, nreal) if not fatal_errors: try: snr, e1, e2 = self.do_main_calculation( central_data, neighbour_data, sel) except: print "Something went wrong - proceeding to next realisation (if you see this persistently it may be indicative of a problem)" continue else: snr, e1, e2 = self.do_main_calculation(central_data, neighbour_data, sel) print "1pt bias = (%f,%f)" % (e1 - self.g[0], e2 - self.g[1]) if np.isfinite(snr) and (-1.0 < e1 < 1.0) and (-1.0 < e2 < 1.0): sn.append(snr) De1.append(e1) De2.append(e2) g1.append(self.g[0]) g2.append(self.g[1]) data = np.zeros(len(g1), dtype=[("e1", float), ("e2", float), ("true_g1", float), ("true_g2", float)]) data["e1"], data["e2"] = np.array(De1), np.array(De2) data["true_g1"], data["true_g2"] = np.array(g1), np.array(g2) bias = di.get_bias(data, nbins=5, names=["m11", "m22", "c11", "c22"], binning="equal_number", silent=True) return np.mean( sn), bias["m11"][0], bias["m11"][1], bias["m22"][0], bias["m22"][1]
def mpi_run(self, snr_min, snr_max, central_data, neighbour_data, nreal, fatal_errors): # Selection function for this bin sel = (central_data.res["snr"]>snr_min) & (central_data.res["snr"]<snr_max) print "Will do %d realisations for this bin:"%nreal De1=[] De2=[] sn=[] g1=[] g2=[] for j in xrange(nreal): print "%d/%d"%(j+1,nreal) if not fatal_errors: try: snr, e1, e2 = self.do_main_calculation(central_data, neighbour_data, sel) except: print "Something went wrong - proceeding to next realisation (if you see this persistently it may be indicative of a problem)" continue else: snr, e1, e2 = self.do_main_calculation(central_data, neighbour_data, sel) print "1pt bias = (%f,%f)"%(e1-self.g[0], e2-self.g[1]) if np.isfinite(snr) and (-1.0<e1<1.0) and (-1.0<e2<1.0): sn.append(snr) De1.append(e1) De2.append(e2) g1.append(self.g[0]) g2.append(self.g[1]) data = np.zeros(len(g1), dtype=[("e1", float), ("e2", float), ("true_g1", float), ("true_g2", float)]) data["e1"], data["e2"] = np.array(De1), np.array(De2) data["true_g1"], data["true_g2"] = np.array(g1), np.array(g2) bias = di.get_bias(data, nbins=5, names=["m11","m22","c11","c22"], binning="equal_number", silent=True) return np.mean(sn), bias["m11"][0], bias["m11"][1], bias["m22"][0], bias["m22"][1]
def compute(self, split_half=0, fit="bord", apply_calibration=False, table_name=None, ellipticity_name="e", sbins=10, rbins=5, binning="equal_number",rlim=(1,3), slim=(10,1000)): print 'measuring bias' if split_half>0: exec "data = self.res%d"%split_half print "using half %d of the catalogue (%d objects)"%(split_half,data.size) if hasattr(self, "truth"): exec "tr = self.truth%d"%split_half else: data = self.res print "using the full catalogue (%d objects)"%(data.size) if hasattr(self, "truth"): tr = self.truth if fit.lower()!="bord": print "Using %s only galaxies"%fit val = int(fit.lower()=="bulge") sel_fit = data["is_bulge"]==val else: sel_fit = np.ones_like(data).astype(bool) data = data[sel_fit] tr = tr[sel_fit] sel_lim = (data["snr"]>slim[0]) & (data["snr"]<slim[1]) & (data["mean_rgpp_rp"]>rlim[0]) & (data["mean_rgpp_rp"]<rlim[1]) data = data[sel_lim] tr = tr[sel_lim] if isinstance(binning,str) : if binning.lower()=="uniform": snr_edges = np.logspace(np.log10(slim[0]),np.log10(slim[1]),sbins+1) rgp_edges = np.linspace(rlim[0],rlim[1],rbins+1) elif binning.lower()=="equal_number": snr_edges = di.find_bin_edges(np.log10(data["snr"]), sbins) rgp_edges = di.find_bin_edges(np.log10(data["mean_rgpp_rp"]), rbins) snr_centres = (snr_edges[1:]+snr_edges[:-1])/2.0 rgp_centres = (rgp_edges[1:]+rgp_edges[:-1])/2.0 list_bias = [] bias_grid=[] b = di.get_bias(tr, data, nbins=5, apply_calibration=apply_calibration, ellipticity_name=ellipticity_name, binning="equal_number", names=["m","c","m11","m22","c11","c22"], silent=True) print "Global biases:" print "m11 : ", b["m11"] print "m22 : ", b["m22"] print "m : ", b["m"] print "c11 : ", b["c11"] print "c22 : ", b["c22"] print "c : ", b["c"] print "Will do dynamic binning in SNR" for i in xrange(len(rgp_edges)-1): snr_samp = data["snr"][(np.log10(data['mean_rgpp_rp']) > rgp_edges[i]) & (np.log10(data['mean_rgpp_rp']) < rgp_edges[i+1])] snr_edges=di.find_bin_edges(np.log10(snr_samp), sbins) for j in xrange(len(snr_edges)-1): empty=False print "bin %d %d snr = [%2.3f-%2.3f] rgpp/rp = [%2.3f-%2.3f]"%(j, i, 10**snr_edges[j], 10**snr_edges[j+1], 10**rgp_edges[i], 10**rgp_edges[i+1] ) # Select in bins of snr and size select = (np.log10(data['snr']) > snr_edges[j]) & (np.log10(data['snr']) < snr_edges[j+1]) & (np.log10(data['mean_rgpp_rp']) > rgp_edges[i]) & (np.log10(data['mean_rgpp_rp']) < rgp_edges[i+1]) ngal = np.nonzero(select.astype(int))[0].size # Raise an error if there are too few simulated galaxies in a given bin if ngal < 60: print "Warning: <100 galaxies in bin %d, %d (ngal=%d)"%(i,j, ngal) empty=False if ngal==0: print "Warning: no galaxies in bin %d, %d "%(i,j) empty=True vrgp_mid = rgp_centres[i] vsnr_mid = snr_centres[j] vrgp_min = rgp_edges[i] vsnr_min = snr_edges[j] vrgp_max = rgp_edges[i+1] vsnr_max = snr_edges[j+1] if ngal==0: print "Warning: no galaxies in bin %d, %d"%(i,j) list_bias.append([j, i, ngal, 0, vrgp_min, vrgp_max, vsnr_min, vsnr_max, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. , 0., 0.]) continue filename_str = 'snr%2.2f.rgpp%2.2f' % (10**vsnr_mid,10**vrgp_mid) b = di.get_bias(tr[select], data[select], apply_calibration=apply_calibration, nbins=5, ellipticity_name=ellipticity_name, binning="equal_number", names=["m","c","m11","m22","c11","c22"], silent=True) a = di.get_alpha(data[select], data[select], nbins=5, xlim=(-0.015, 0.02), binning="equal_number", names=["alpha", "alpha11", "alpha22"], silent=True, use_weights=False) list_bias.append([j, i, ngal, 10**vrgp_min, 10**vrgp_max, 10**vsnr_min, 10**vsnr_max, b["m"][0], b["m"][1], b["c"][0], b["c"][1], b["m11"][0], b["m11"][1], b["m22"][0], b["m22"][1], b["c11"][0], b["c11"][1], b["c22"][0], b["c22"][1], a["alpha"][0], a["alpha"][1], a["alpha11"][0], a["alpha11"][1], a["alpha22"][0], a["alpha22"][1] ]) lab=["j","i","ngal","rgp_lower","rgp_upper","snr_lower","snr_upper","m","err_m","c","err_c","m1","err_m1","m2","err_m2","c1","err_c1","c2", "err_c2","alpha","err_alpha","alpha11","err_alpha11","alpha22","err_alpha22"] dt = {'names': lab, 'formats': ['i4']*3 + ['f8']*22 } arr_bias = np.core.records.fromarrays(np.array(list_bias).transpose(), dtype=dt) if table_name is None: filename_table_bias = 'bias_table-%s-selection_section%d%s.fits'%(fit, split_half,"_calibrated"*apply_calibration) else: filename_table_bias = table_name import pyfits pyfits.writeto(filename_table_bias,arr_bias,clobber=True) print 'saved %s'%filename_table_bias
def analyse1(self, central_data, neighbour_data, nreal=1000): #, central_ellipticity, dneigh=20, central_flux=1912.0, psf_size=3.7, neighbour_flux=1912.0, neighbour_size=3.2, nrealisations=1000): # Level 1 - loop over SNR #----------------------------------------------------------------------- # Initialise the random number generator np.random.seed(self.random_seed) self.object_mask = np.loadtxt("mask_template.txt") m={} m[1]=[] m[2]=[] c={} c[1]=[] c[2]=[] snr=[] # Setup a dummy wcs # We load it here to avoid too much unncessary io # In fact it should provide a skeleton only, since we overwrite the Jacobian with an identity matrix wcs_path = "/share/des/disc2/y1/OPS/coadd/20141118000051_DES0014-4414/coadd/DES0014-4414_r.fits.fz" orig_col = 1000 orig_row = 1000 image_pos = galsim.PositionD(orig_col,orig_row) self.wcs = galsim.FitsWCS(wcs_path) self.opt = p3s.Options("/home/samuroff/shear_pipeline/end-to-end/end-to-end_code/config_files/im3shape/params_disc.ini") self.binning = np.logspace(1,2.8,12) upper = self.binning[1:] lower = self.binning[:-1] # Cycle over the central flux, as a proxy for SNR for i, limits in enumerate(zip(lower, upper)): snr_min = limits[0] snr_max = limits[1] print "Level 1 iteration: %d SNR = %3.3f - %3.3f"%(i+1,snr_min, snr_max) # Selection function for this bin sel = (central_data.res["snr"]>snr_min) & (central_data.res["snr"]<snr_max) print "Will do %d realisations for this bin:"%nreal De1=[] De2=[] sn=[] g1=[] g2=[] for j in xrange(nreal): print "%d/%d"%(j+1,nreal) self.generate_central_realisation(central_data, sel) self.generate_neighbour_realisation(neighbour_data, central_data, sel) # Draw this neighbour realisation repeatedly on a ring of angular positions snr, e1, e2 = self.do_position_loop() sn.append(snr) De1.append(e1) De2.append(e2) g1.append(self.g[0]) g2.append(self.g[1]) data = np.zeros(len(g1), dtype=[("e1", float), ("e2", float), ("true_g1", float), ("true_g2", float)]) data["e1"], data["e2"] = np.array(De1), np.array(De2) data["true_g1"], data["true_g2"] = np.array(g1), np.array(g2) bias = di.get_bias(data, nbins=5, names=["m11","m22","c11","c22"], binning="equal_number", silent=True) print bias m[1].append(bias["m11"][0]) m[2].append(bias["m22"][0]) c[1].append(bias["c11"][0]) c[2].append(bias["c22"][0]) import pdb ; pdb.set_trace() print "Done all loops"
def analyse1( self, central_data, neighbour_data, nreal=1000 ): #, central_ellipticity, dneigh=20, central_flux=1912.0, psf_size=3.7, neighbour_flux=1912.0, neighbour_size=3.2, nrealisations=1000): # Level 1 - loop over SNR #----------------------------------------------------------------------- # Initialise the random number generator np.random.seed(self.random_seed) self.object_mask = np.loadtxt("mask_template.txt") m = {} m[1] = [] m[2] = [] c = {} c[1] = [] c[2] = [] snr = [] # Setup a dummy wcs # We load it here to avoid too much unncessary io # In fact it should provide a skeleton only, since we overwrite the Jacobian with an identity matrix wcs_path = "/share/des/disc2/y1/OPS/coadd/20141118000051_DES0014-4414/coadd/DES0014-4414_r.fits.fz" orig_col = 1000 orig_row = 1000 image_pos = galsim.PositionD(orig_col, orig_row) self.wcs = galsim.FitsWCS(wcs_path) self.opt = p3s.Options( "/home/samuroff/shear_pipeline/end-to-end/end-to-end_code/config_files/im3shape/params_disc.ini" ) self.binning = np.logspace(1, 2.8, 12) upper = self.binning[1:] lower = self.binning[:-1] # Cycle over the central flux, as a proxy for SNR for i, limits in enumerate(zip(lower, upper)): snr_min = limits[0] snr_max = limits[1] print "Level 1 iteration: %d SNR = %3.3f - %3.3f" % ( i + 1, snr_min, snr_max) # Selection function for this bin sel = (central_data.res["snr"] > snr_min) & (central_data.res["snr"] < snr_max) print "Will do %d realisations for this bin:" % nreal De1 = [] De2 = [] sn = [] g1 = [] g2 = [] for j in xrange(nreal): print "%d/%d" % (j + 1, nreal) self.generate_central_realisation(central_data, sel) self.generate_neighbour_realisation(neighbour_data, central_data, sel) # Draw this neighbour realisation repeatedly on a ring of angular positions snr, e1, e2 = self.do_position_loop() sn.append(snr) De1.append(e1) De2.append(e2) g1.append(self.g[0]) g2.append(self.g[1]) data = np.zeros(len(g1), dtype=[("e1", float), ("e2", float), ("true_g1", float), ("true_g2", float)]) data["e1"], data["e2"] = np.array(De1), np.array(De2) data["true_g1"], data["true_g2"] = np.array(g1), np.array(g2) bias = di.get_bias(data, nbins=5, names=["m11", "m22", "c11", "c22"], binning="equal_number", silent=True) print bias m[1].append(bias["m11"][0]) m[2].append(bias["m22"][0]) c[1].append(bias["c11"][0]) c[2].append(bias["c22"][0]) import pdb pdb.set_trace() print "Done all loops"