def test_fits(): try: import fitsio except ImportError: print('Skipping FITS tests, since fitsio is not installed') return get_from_wiki('Aardvark.fit') file_name = os.path.join('data','Aardvark.fit') config = treecorr.read_config('Aardvark.yaml') config['verbose'] = 1 # Just test a few random particular values cat1 = treecorr.Catalog(file_name, config) np.testing.assert_equal(len(cat1.ra), 390935) np.testing.assert_equal(cat1.nobj, 390935) np.testing.assert_almost_equal(cat1.ra[0], 56.4195 * (pi/180.)) np.testing.assert_almost_equal(cat1.ra[390934], 78.4782 * (pi/180.)) np.testing.assert_almost_equal(cat1.dec[290333], 83.1579 * (pi/180.)) np.testing.assert_almost_equal(cat1.g1[46392], 0.0005066675) np.testing.assert_almost_equal(cat1.g2[46392], -0.0001006742) np.testing.assert_almost_equal(cat1.k[46392], -0.0008628797) # The catalog doesn't have x, y, or w, but test that functionality as well. del config['ra_col'] del config['dec_col'] config['x_col'] = 'RA' config['y_col'] = 'DEC' config['w_col'] = 'MU' config['flag_col'] = 'INDEX' config['ignore_flag'] = 64 cat2 = treecorr.Catalog(file_name, config) np.testing.assert_almost_equal(cat2.x[390934], 78.4782, decimal=4) np.testing.assert_almost_equal(cat2.y[290333], 83.1579, decimal=4) np.testing.assert_almost_equal(cat2.w[46392], 0.) # index = 1200379 np.testing.assert_almost_equal(cat2.w[46393], 0.9995946) # index = 1200386 # Test using a limited set of rows config['first_row'] = 101 config['last_row'] = 50000 cat3 = treecorr.Catalog(file_name, config) np.testing.assert_equal(len(cat3.x), 49900) np.testing.assert_equal(cat3.ntot, 49900) np.testing.assert_equal(cat3.nobj, sum(cat3.w != 0)) np.testing.assert_equal(cat3.sumw, sum(cat3.w)) np.testing.assert_equal(cat3.sumw, sum(cat2.w[100:50000])) np.testing.assert_almost_equal(cat3.g1[46292], 0.0005066675) np.testing.assert_almost_equal(cat3.g2[46292], -0.0001006742) np.testing.assert_almost_equal(cat3.k[46292], -0.0008628797) cat4 = treecorr.read_catalogs(config, key='file_name', is_rand=True)[0] np.testing.assert_equal(len(cat4.x), 49900) np.testing.assert_equal(cat4.ntot, 49900) np.testing.assert_equal(cat4.nobj, sum(cat4.w != 0)) np.testing.assert_equal(cat4.sumw, sum(cat4.w)) np.testing.assert_equal(cat4.sumw, sum(cat2.w[100:50000])) assert cat4.g1 is None assert cat4.g2 is None assert cat4.k is None
def test_list(): # Test different ways to read in a list of catalog names. # This is based on the bug report for Issue #10. nobj = 5000 rng = np.random.RandomState(8675309) x_list = [] y_list = [] file_names = [] ncats = 3 for k in range(ncats): x = rng.random_sample(nobj) y = rng.random_sample(nobj) file_name = os.path.join('data','test_list%d.dat'%k) with open(file_name, 'w') as fid: # These are intentionally in a different order from the order we parse them. fid.write('# ra,dec,x,y,k,g1,g2,w,flag\n') for i in range(nobj): fid.write(('%.8f %.8f\n')%(x[i],y[i])) x_list.append(x) y_list.append(y) file_names.append(file_name) # Start with file_name being a list: config = { 'x_col' : 1, 'y_col' : 2, 'file_name' : file_names } cats = treecorr.read_catalogs(config, key='file_name') np.testing.assert_equal(len(cats), ncats) for k in range(ncats): np.testing.assert_almost_equal(cats[k].x, x_list[k]) np.testing.assert_almost_equal(cats[k].y, y_list[k]) # Next check that the list can be just a string with spaces between names: config['file_name'] = " ".join(file_names) # Also check that it is ok to include file_list to read_catalogs. cats = treecorr.read_catalogs(config, 'file_name', 'file_list') np.testing.assert_equal(len(cats), ncats) for k in range(ncats): np.testing.assert_almost_equal(cats[k].x, x_list[k]) np.testing.assert_almost_equal(cats[k].y, y_list[k]) # Next check that having the names in a file_list file works: list_name = os.path.join('data','test_list.txt') with open(list_name, 'w') as fid: for name in file_names: fid.write(name + '\n') del config['file_name'] config['file_list'] = list_name cats = treecorr.read_catalogs(config, 'file_name', 'file_list') np.testing.assert_equal(len(cats), ncats) for k in range(ncats): np.testing.assert_almost_equal(cats[k].x, x_list[k]) np.testing.assert_almost_equal(cats[k].y, y_list[k]) # Also, should be allowed to omit file_name arg: cats = treecorr.read_catalogs(config, list_key='file_list') np.testing.assert_equal(len(cats), ncats) for k in range(ncats): np.testing.assert_almost_equal(cats[k].x, x_list[k]) np.testing.assert_almost_equal(cats[k].y, y_list[k])
def test_fits(): try: import fitsio except ImportError: print('Skipping FITS tests, since fitsio is not installed') return get_from_wiki('Aardvark.fit') file_name = os.path.join('data','Aardvark.fit') config = treecorr.read_config('Aardvark.yaml') config['verbose'] = 1 config['kk_file_name'] = 'kk.fits' config['gg_file_name'] = 'gg.fits' # Just test a few random particular values cat1 = treecorr.Catalog(file_name, config) np.testing.assert_equal(len(cat1.ra), 390935) np.testing.assert_equal(cat1.nobj, 390935) np.testing.assert_almost_equal(cat1.ra[0], 56.4195 * (pi/180.)) np.testing.assert_almost_equal(cat1.ra[390934], 78.4782 * (pi/180.)) np.testing.assert_almost_equal(cat1.dec[290333], 83.1579 * (pi/180.)) np.testing.assert_almost_equal(cat1.g1[46392], 0.0005066675) np.testing.assert_almost_equal(cat1.g2[46392], -0.0001006742) np.testing.assert_almost_equal(cat1.k[46392], -0.0008628797) assert_raises(ValueError, treecorr.Catalog, file_name, config, ra_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, dec_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, r_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, w_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, wpos_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, flag_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, g1_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, g2_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, k_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, ra_col='0') assert_raises(ValueError, treecorr.Catalog, file_name, config, dec_col='0') assert_raises(ValueError, treecorr.Catalog, file_name, config, x_col='x') assert_raises(ValueError, treecorr.Catalog, file_name, config, y_col='y') assert_raises(ValueError, treecorr.Catalog, file_name, config, z_col='z') assert_raises(ValueError, treecorr.Catalog, file_name, config, ra_col='0', dec_col='0') assert_raises(ValueError, treecorr.Catalog, file_name, config, g1_col='0') assert_raises(ValueError, treecorr.Catalog, file_name, config, g2_col='0') assert_raises(ValueError, treecorr.Catalog, file_name, config, k_col='0') assert_raises(TypeError, treecorr.Catalog, file_name, config, x_units='arcmin') assert_raises(TypeError, treecorr.Catalog, file_name, config, y_units='arcmin') del config['ra_units'] assert_raises(TypeError, treecorr.Catalog, file_name, config) del config['dec_units'] assert_raises(TypeError, treecorr.Catalog, file_name, config, ra_units='deg') # The catalog doesn't have x, y, or w, but test that functionality as well. del config['ra_col'] del config['dec_col'] config['x_col'] = 'RA' config['y_col'] = 'DEC' config['w_col'] = 'MU' config['flag_col'] = 'INDEX' config['ignore_flag'] = 64 cat2 = treecorr.Catalog(file_name, config) np.testing.assert_almost_equal(cat2.x[390934], 78.4782, decimal=4) np.testing.assert_almost_equal(cat2.y[290333], 83.1579, decimal=4) np.testing.assert_almost_equal(cat2.w[46392], 0.) # index = 1200379 np.testing.assert_almost_equal(cat2.w[46393], 0.9995946) # index = 1200386 assert_raises(ValueError, treecorr.Catalog, file_name, config, x_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, y_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, z_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, ra_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, dec_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, r_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, w_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, wpos_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, flag_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, g1_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, g2_col='invalid') assert_raises(ValueError, treecorr.Catalog, file_name, config, k_col='invalid') # Test using a limited set of rows config['first_row'] = 101 config['last_row'] = 50000 cat3 = treecorr.Catalog(file_name, config) np.testing.assert_equal(len(cat3.x), 49900) np.testing.assert_equal(cat3.ntot, 49900) np.testing.assert_equal(cat3.nobj, sum(cat3.w != 0)) np.testing.assert_equal(cat3.sumw, sum(cat3.w)) np.testing.assert_equal(cat3.sumw, sum(cat2.w[100:50000])) np.testing.assert_almost_equal(cat3.g1[46292], 0.0005066675) np.testing.assert_almost_equal(cat3.g2[46292], -0.0001006742) np.testing.assert_almost_equal(cat3.k[46292], -0.0008628797) cat4 = treecorr.read_catalogs(config, key='file_name', is_rand=True)[0] np.testing.assert_equal(len(cat4.x), 49900) np.testing.assert_equal(cat4.ntot, 49900) np.testing.assert_equal(cat4.nobj, sum(cat4.w != 0)) np.testing.assert_equal(cat4.sumw, sum(cat4.w)) np.testing.assert_equal(cat4.sumw, sum(cat2.w[100:50000])) assert cat4.g1 is None assert cat4.g2 is None assert cat4.k is None do_pickle(cat1) do_pickle(cat2) do_pickle(cat3) do_pickle(cat4) assert_raises(ValueError, treecorr.Catalog, file_name, config, first_row=-10) assert_raises(ValueError, treecorr.Catalog, file_name, config, first_row=0) assert_raises(ValueError, treecorr.Catalog, file_name, config, first_row=60000) assert_raises(ValueError, treecorr.Catalog, file_name, config, first_row=50001) assert_raises(TypeError, treecorr.read_catalogs, config) assert_raises(TypeError, treecorr.read_catalogs, config, key='file_name', list_key='file_name') # If gg output not given, it is still invalid to only have one or the other of g1,g2. del config['gg_file_name'] assert_raises(ValueError, treecorr.Catalog, file_name, config, g1_col='0') assert_raises(ValueError, treecorr.Catalog, file_name, config, g2_col='0')
def corr3(config, logger=None): """Run the full three-point correlation function code based on the parameters in the given config dict. The function print_corr3_params() will output information about the valid parameters that are expected to be in the config dict. Optionally a logger parameter maybe given, in which case it is used for logging. If not given, the logging will be based on the verbose and log_file parameters. :param config: The configuration dict which defines what to do. :param logger: If desired, a logger object for logging. (default: None, in which case one will be built according to the config dict's verbose level.) """ # Setup logger based on config verbose value if logger is None: logger = treecorr.config.setup_logger( treecorr.config.get(config,'verbose',int,1), config.get('log_file',None)) # Check that config doesn't have any extra parameters. # (Such values are probably typos.) # Also convert the given parameters to the correct type, etc. config = treecorr.config.check_config(config, corr3_valid_params, corr3_aliases, logger) import pprint logger.debug('Using configuration dict:\n%s',pprint.pformat(config)) if ( 'output_dots' not in config and config.get('log_file',None) is None and config['verbose'] >= 2 ): config['output_dots'] = True # Set the number of threads num_threads = config.get('num_threads',None) logger.debug('From config dict, num_threads = %d',num_threads) treecorr.set_omp_threads(num_threads, logger) # Read in the input files. Each of these is a list. cat1 = treecorr.read_catalogs(config, 'file_name', 'file_list', 0, logger) if len(cat1) == 0: raise AttributeError("Either file_name or file_list is required") cat2 = treecorr.read_catalogs(config, 'file_name2', 'rand_file_list2', 1, logger) cat3 = treecorr.read_catalogs(config, 'file_name3', 'rand_file_list3', 1, logger) rand1 = treecorr.read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger) rand2 = treecorr.read_catalogs(config, 'rand_file_name2', 'rand_file_list2', 1, logger) rand3 = treecorr.read_catalogs(config, 'rand_file_name3', 'rand_file_list3', 1, logger) if len(cat2) == 0 and len(rand2) > 0: raise AttributeError("rand_file_name2 is invalid without file_name2") if len(cat3) == 0 and len(rand3) > 0: raise AttributeError("rand_file_name3 is invalid without file_name3") logger.info("Done reading input catalogs") # Do GGG correlation function if necessary if 'ggg_file_name' in config: #or 'm3_file_name' in config: logger.info("Start GGG calculations...") ggg = treecorr.GGGCorrelation(config,logger) ggg.process(cat1,cat2,cat3) logger.info("Done GGG calculations.") if 'ggg_file_name' in config: ggg.write(config['ggg_file_name']) if 'm3_file_name' in config: ggg.writeMapSq(config['m3_file_name']) # Do NNN correlation function if necessary if 'nnn_file_name' in config: if len(rand1) == 0: raise AttributeError("rand_file_name is required for NNN correlation") if len(cat2) > 0 and len(rand2) == 0: raise AttributeError("rand_file_name2 is required for NNN cross-correlation") if len(cat3) > 0 and len(rand3) == 0: raise AttributeError("rand_file_name3 is required for NNN cross-correlation") if (len(cat2) > 0) != (len(cat3) > 0): raise NotImplementedError( "Cannot yet handle 3-point corrleations with only two catalogs. "+ "Need both cat2 and cat3.") logger.info("Start DDD calculations...") ddd = treecorr.NNNCorrelation(config,logger) ddd.process(cat1,cat2,cat3) logger.info("Done DDD calculations.") if len(cat2) == 0: rrr = treecorr.NNNCorrelation(config,logger) rrr.process(rand1) logger.info("Done RRR calculations.") if config['nnn_statistic'] == 'compensated': drr = treecorr.NNNCorrelation(config,logger) drr.process(cat1,rand2,rand3) logger.info("Done DRR calculations.") ddr = treecorr.NNNCorrelation(config,logger) ddr.process(cat1,cat2,rand3) logger.info("Done DDR calculations.") ddd.write(config['nnn_file_name'],rrr,drr,ddr) else: ddd.write(config['nnn_file_name'],rrr) else: rrr = treecorr.NNNCorrelation(config,logger) rrr.process(rand1,rand2,rand3) logger.info("Done RRR calculations.") if config['nnn_statistic'] == 'compensated': drr = treecorr.NNNCorrelation(config,logger) drr.process(cat1,rand2,rand3) logger.info("Done DRR calculations.") ddr = treecorr.NNNCorrelation(config,logger) ddr.process(cat1,cat2,rand3) logger.info("Done DDR calculations.") rdr = treecorr.NNNCorrelation(config,logger) rdr.process(rand1,cat2,rand3) logger.info("Done RDR calculations.") rrd = treecorr.NNNCorrelation(config,logger) rrd.process(rand1,rand2,cat3) logger.info("Done RRD calculations.") drd = treecorr.NNNCorrelation(config,logger) drd.process(cat1,rand2,cat3) logger.info("Done DRD calculations.") rdd = treecorr.NNNCorrelation(config,logger) rdd.process(rand1,cat2,cat3) logger.info("Done RDD calculations.") ddd.write(config['nnn_file_name'],rrr,drr,ddr,rdr,rrd,drd,rdd) else: ddd.write(config['nnn_file_name'],rrr) # Do KKK correlation function if necessary if 'kkk_file_name' in config: logger.info("Start KKK calculations...") kkk = treecorr.KKKCorrelation(config,logger) kkk.process(cat1,cat2,cat3) logger.info("Done KKK calculations.") kkk.write(config['kkk_file_name']) # Do NNG correlation function if necessary if False: #if 'nng_file_name' in config or 'nnm_file_name' in config: if len(cat3) == 0: raise AttributeError("file_name3 is required for nng correlation") logger.info("Start NNG calculations...") nng = treecorr.NNGCorrelation(config,logger) nng.process(cat1,cat2,cat3) logger.info("Done NNG calculation.") # The default ng_statistic is compensated _iff_ rand files are given. rrg = None if len(rand1) == 0: if config.get('nng_statistic',None) == 'compensated': raise AttributeError("rand_files is required for nng_statistic = compensated") elif config.get('nng_statistic','compensated') == 'compensated': rrg = treecorr.NNGCorrelation(config,logger) rrg.process(rand1,rand1,cat2) logger.info("Done RRG calculation.") if 'nng_file_name' in config: nng.write(config['nng_file_name'], rrg) if 'nnm_file_name' in config: nng.writeNNMap(config['nnm_file_name'], rrg) # Do NNK correlation function if necessary if False: #if 'nnk_file_name' in config: if len(cat3) == 0: raise AttributeError("file_name3 is required for nnk correlation") logger.info("Start NNK calculations...") nnk = treecorr.NNKCorrelation(config,logger) nnk.process(cat1,cat2,cat3) logger.info("Done NNK calculation.") rrk = None if len(rand1) == 0: if config.get('nnk_statistic',None) == 'compensated': raise AttributeError("rand_files is required for nnk_statistic = compensated") elif config.get('nnk_statistic','compensated') == 'compensated': rrk = treecorr.NNKCorrelation(config,logger) rrk.process(rand1,rand1,cat2) logger.info("Done RRK calculation.") nnk.write(config['nnk_file_name'], rrk) # Do KKG correlation function if necessary if False: #if 'kkg_file_name' in config: if len(cat3) == 0: raise AttributeError("file_name3 is required for kkg correlation") logger.info("Start KKG calculations...") kkg = treecorr.KKGCorrelation(config,logger) kkg.process(cat1,cat2,cat3) logger.info("Done KKG calculation.") kkg.write(config['kkg_file_name'])
def corr2(config, logger=None): """Run the full two-point correlation function code based on the parameters in the given config dict. The function print_corr2_params() will output information about the valid parameters that are expected to be in the config dict. Optionally a logger parameter maybe given, in which case it is used for logging. If not given, the logging will be based on the verbose and log_file parameters. :param config: The configuration dict which defines what to do. :param logger: If desired, a logger object for logging. (default: None, in which case one will be built according to the config dict's verbose level.) """ # Setup logger based on config verbose value if logger is None: logger = treecorr.config.setup_logger( treecorr.config.get(config,'verbose',int,1), config.get('log_file',None)) # Check that config doesn't have any extra parameters. # (Such values are probably typos.) # Also convert the given parameters to the correct type, etc. config = treecorr.config.check_config(config, corr2_valid_params, corr2_aliases, logger) import pprint logger.debug('Using configuration dict:\n%s',pprint.pformat(config)) if ( 'output_dots' not in config and config.get('log_file',None) is None and config['verbose'] >= 2 ): config['output_dots'] = True # Set the number of threads num_threads = config.get('num_threads',None) logger.debug('From config dict, num_threads = %s',num_threads) treecorr.set_omp_threads(num_threads, logger) # Read in the input files. Each of these is a list. cat1 = treecorr.read_catalogs(config, 'file_name', 'file_list', 0, logger) if len(cat1) == 0: raise AttributeError("Either file_name or file_list is required") cat2 = treecorr.read_catalogs(config, 'file_name2', 'file_list2', 1, logger) rand1 = treecorr.read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger) rand2 = treecorr.read_catalogs(config, 'rand_file_name2', 'rand_file_list2', 1, logger) if len(cat2) == 0 and len(rand2) > 0: raise AttributeError("rand_file_name2 is invalid without file_name2") logger.info("Done reading input catalogs") # Do GG correlation function if necessary if 'gg_file_name' in config or 'm2_file_name' in config: logger.warning("Performing GG calculations...") gg = treecorr.GGCorrelation(config,logger) gg.process(cat1,cat2) logger.info("Done GG calculations.") if 'gg_file_name' in config: gg.write(config['gg_file_name']) logger.warning("Wrote GG correlation to %s",config['gg_file_name']) if 'm2_file_name' in config: gg.writeMapSq(config['m2_file_name'], m2_uform=config['m2_uform']) logger.warning("Wrote Mapsq values to %s",config['m2_file_name']) # Do NG correlation function if necessary if 'ng_file_name' in config or 'nm_file_name' in config or 'norm_file_name' in config: if len(cat2) == 0: raise AttributeError("file_name2 is required for ng correlation") logger.warning("Performing NG calculations...") ng = treecorr.NGCorrelation(config,logger) ng.process(cat1,cat2) logger.info("Done NG calculation.") # The default ng_statistic is compensated _iff_ rand files are given. rg = None if len(rand1) == 0: if config.get('ng_statistic',None) == 'compensated': raise AttributeError("rand_files is required for ng_statistic = compensated") elif config.get('ng_statistic','compensated') == 'compensated': rg = treecorr.NGCorrelation(config,logger) rg.process(rand1,cat2) logger.info("Done RG calculation.") if 'ng_file_name' in config: ng.write(config['ng_file_name'], rg) logger.warning("Wrote NG correlation to %s",config['ng_file_name']) if 'nm_file_name' in config: ng.writeNMap(config['nm_file_name'], rg, m2_uform=config['m2_uform']) logger.warning("Wrote NMap values to %s",config['nm_file_name']) if 'norm_file_name' in config: gg = treecorr.GGCorrelation(config,logger) gg.process(cat2) logger.info("Done GG calculation for norm") dd = treecorr.NNCorrelation(config,logger) dd.process(cat1) logger.info("Done DD calculation for norm") rr = treecorr.NNCorrelation(config,logger) rr.process(rand1) logger.info("Done RR calculation for norm") dr = None if config['nn_statistic'] == 'compensated': dr = treecorr.NNCorrelation(config,logger) dr.process(cat1,rand1) logger.info("Done DR calculation for norm") ng.writeNorm(config['norm_file_name'],gg,dd,rr,dr,rg,m2_uform=config['m2_uform']) logger.warning("Wrote Norm values to %s",config['norm_file_name']) # Do NN correlation function if necessary if 'nn_file_name' in config: if len(rand1) == 0: raise AttributeError("rand_file_name is required for NN correlation") if len(cat2) > 0 and len(rand2) == 0: raise AttributeError("rand_file_name2 is required for NN cross-correlation") logger.warning("Performing DD calculations...") dd = treecorr.NNCorrelation(config,logger) dd.process(cat1,cat2) logger.info("Done DD calculations.") dr = None rd = None if len(cat2) == 0: logger.warning("Performing RR calculations...") rr = treecorr.NNCorrelation(config,logger) rr.process(rand1) logger.info("Done RR calculations.") if config['nn_statistic'] == 'compensated': logger.warning("Performing DR calculations...") dr = treecorr.NNCorrelation(config,logger) dr.process(cat1,rand1) logger.info("Done DR calculations.") else: logger.warning("Performing RR calculations...") rr = treecorr.NNCorrelation(config,logger) rr.process(rand1,rand2) logger.info("Done RR calculations.") if config['nn_statistic'] == 'compensated': logger.warning("Performing DR calculations...") dr = treecorr.NNCorrelation(config,logger) dr.process(cat1,rand2) logger.info("Done DR calculations.") rd = treecorr.NNCorrelation(config,logger) rd.process(rand1,cat2) logger.info("Done RD calculations.") dd.write(config['nn_file_name'],rr,dr,rd) logger.warning("Wrote NN correlation to %s",config['nn_file_name']) # Do KK correlation function if necessary if 'kk_file_name' in config: logger.warning("Performing KK calculations...") kk = treecorr.KKCorrelation(config,logger) kk.process(cat1,cat2) logger.info("Done KK calculations.") kk.write(config['kk_file_name']) logger.warning("Wrote KK correlation to %s",config['kk_file_name']) # Do NG correlation function if necessary if 'nk_file_name' in config: if len(cat2) == 0: raise AttributeError("file_name2 is required for nk correlation") logger.warning("Performing NK calculations...") nk = treecorr.NKCorrelation(config,logger) nk.process(cat1,cat2) logger.info("Done NK calculation.") rk = None if len(rand1) == 0: if config.get('nk_statistic',None) == 'compensated': raise AttributeError("rand_files is required for nk_statistic = compensated") elif config.get('nk_statistic','compensated') == 'compensated': rk = treecorr.NKCorrelation(config,logger) rk.process(rand1,cat2) logger.info("Done RK calculation.") nk.write(config['nk_file_name'], rk) logger.warning("Wrote NK correlation to %s",config['nk_file_name']) # Do KG correlation function if necessary if 'kg_file_name' in config: if len(cat2) == 0: raise AttributeError("file_name2 is required for kg correlation") logger.warning("Performing KG calculations...") kg = treecorr.KGCorrelation(config,logger) kg.process(cat1,cat2) logger.info("Done KG calculation.") kg.write(config['kg_file_name']) logger.warning("Wrote KG correlation to %s",config['kg_file_name'])
def test_list(): # Test different ways to read in a list of catalog names. # This is based on the bug report for Issue #10. nobj = 5000 numpy.random.seed(8675309) x_list = [] y_list = [] file_names = [] ncats = 3 for k in range(ncats): x = numpy.random.random_sample(nobj) y = numpy.random.random_sample(nobj) file_name = os.path.join('data','test_list%d.dat'%k) with open(file_name, 'w') as fid: # These are intentionally in a different order from the order we parse them. fid.write('# ra,dec,x,y,k,g1,g2,w,flag\n') for i in range(nobj): fid.write(('%.8f %.8f\n')%(x[i],y[i])) x_list.append(x) y_list.append(y) file_names.append(file_name) # Start with file_name being a list: config = { 'x_col' : 1, 'y_col' : 2, 'file_name' : file_names } cats = treecorr.read_catalogs(config, key='file_name') numpy.testing.assert_equal(len(cats), ncats) for k in range(ncats): numpy.testing.assert_almost_equal(cats[k].x, x_list[k]) numpy.testing.assert_almost_equal(cats[k].y, y_list[k]) # Next check that the list can be just a string with spaces between names: config['file_name'] = " ".join(file_names) # Also check that it is ok to include file_list to read_catalogs. cats = treecorr.read_catalogs(config, 'file_name', 'file_list') numpy.testing.assert_equal(len(cats), ncats) for k in range(ncats): numpy.testing.assert_almost_equal(cats[k].x, x_list[k]) numpy.testing.assert_almost_equal(cats[k].y, y_list[k]) # Next check that having the names in a file_list file works: list_name = os.path.join('data','test_list.txt') with open(list_name, 'w') as fid: for name in file_names: fid.write(name + '\n') del config['file_name'] config['file_list'] = list_name cats = treecorr.read_catalogs(config, 'file_name', 'file_list') numpy.testing.assert_equal(len(cats), ncats) for k in range(ncats): numpy.testing.assert_almost_equal(cats[k].x, x_list[k]) numpy.testing.assert_almost_equal(cats[k].y, y_list[k]) # Also, should be allowed to omit file_name arg: cats = treecorr.read_catalogs(config, list_key='file_list') numpy.testing.assert_equal(len(cats), ncats) for k in range(ncats): numpy.testing.assert_almost_equal(cats[k].x, x_list[k]) numpy.testing.assert_almost_equal(cats[k].y, y_list[k])
def corr3(config, logger=None): """Run the full three-point correlation function code based on the parameters in the given config dict. The function print_corr3_params() will output information about the valid parameters that are expected to be in the config dict. Optionally a logger parameter maybe given, in which case it is used for logging. If not given, the logging will be based on the verbose and log_file parameters. :param config: The configuration dict which defines what to do. :param logger: If desired, a logger object for logging. (default: None, in which case one will be built according to the config dict's verbose level.) """ # Setup logger based on config verbose value if logger is None: logger = treecorr.config.setup_logger( treecorr.config.get(config, 'verbose', int, 1), config.get('log_file', None)) # Check that config doesn't have any extra parameters. # (Such values are probably typos.) # Also convert the given parameters to the correct type, etc. config = treecorr.config.check_config(config, corr3_valid_params, corr3_aliases, logger) import pprint logger.debug('Using configuration dict:\n%s', pprint.pformat(config)) if ('output_dots' not in config and config.get('log_file', None) is None and config['verbose'] >= 2): config['output_dots'] = True # Set the number of threads num_threads = config.get('num_threads', None) logger.debug('From config dict, num_threads = %s', num_threads) treecorr.set_omp_threads(num_threads, logger) # Read in the input files. Each of these is a list. cat1 = treecorr.read_catalogs(config, 'file_name', 'file_list', 0, logger) if len(cat1) == 0: raise AttributeError("Either file_name or file_list is required") cat2 = treecorr.read_catalogs(config, 'file_name2', 'rand_file_list2', 1, logger) cat3 = treecorr.read_catalogs(config, 'file_name3', 'rand_file_list3', 1, logger) rand1 = treecorr.read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger) rand2 = treecorr.read_catalogs(config, 'rand_file_name2', 'rand_file_list2', 1, logger) rand3 = treecorr.read_catalogs(config, 'rand_file_name3', 'rand_file_list3', 1, logger) if len(cat2) == 0 and len(rand2) > 0: raise AttributeError("rand_file_name2 is invalid without file_name2") if len(cat3) == 0 and len(rand3) > 0: raise AttributeError("rand_file_name3 is invalid without file_name3") logger.info("Done reading input catalogs") # Do GGG correlation function if necessary if 'ggg_file_name' in config: #or 'm3_file_name' in config: logger.info("Start GGG calculations...") ggg = treecorr.GGGCorrelation(config, logger) ggg.process(cat1, cat2, cat3) logger.info("Done GGG calculations.") if 'ggg_file_name' in config: ggg.write(config['ggg_file_name']) if 'm3_file_name' in config: ggg.writeMapSq(config['m3_file_name']) # Do NNN correlation function if necessary if 'nnn_file_name' in config: if len(rand1) == 0: raise AttributeError( "rand_file_name is required for NNN correlation") if len(cat2) > 0 and len(rand2) == 0: raise AttributeError( "rand_file_name2 is required for NNN cross-correlation") if len(cat3) > 0 and len(rand3) == 0: raise AttributeError( "rand_file_name3 is required for NNN cross-correlation") if (len(cat2) > 0) != (len(cat3) > 0): raise NotImplementedError( "Cannot yet handle 3-point corrleations with only two catalogs. " + "Need both cat2 and cat3.") logger.info("Start DDD calculations...") ddd = treecorr.NNNCorrelation(config, logger) ddd.process(cat1, cat2, cat3) logger.info("Done DDD calculations.") if len(cat2) == 0: rrr = treecorr.NNNCorrelation(config, logger) rrr.process(rand1) logger.info("Done RRR calculations.") # For the next step, just make cat2 = cat3 = cat1 and rand2 = rand3 = rand1. cat2 = cat3 = cat1 rand2 = rand3 = rand1 else: rrr = treecorr.NNNCorrelation(config, logger) rrr.process(rand1, rand2, rand3) logger.info("Done RRR calculations.") if config['nnn_statistic'] == 'compensated': drr = treecorr.NNNCorrelation(config, logger) drr.process(cat1, rand2, rand3) logger.info("Done DRR calculations.") ddr = treecorr.NNNCorrelation(config, logger) ddr.process(cat1, cat2, rand3) logger.info("Done DDR calculations.") rdr = treecorr.NNNCorrelation(config, logger) rdr.process(rand1, cat2, rand3) logger.info("Done RDR calculations.") rrd = treecorr.NNNCorrelation(config, logger) rrd.process(rand1, rand2, cat3) logger.info("Done RRD calculations.") drd = treecorr.NNNCorrelation(config, logger) drd.process(cat1, rand2, cat3) logger.info("Done DRD calculations.") rdd = treecorr.NNNCorrelation(config, logger) rdd.process(rand1, cat2, cat3) logger.info("Done RDD calculations.") ddd.write(config['nnn_file_name'], rrr, drr, rdr, rrd, ddr, drd, rdd) else: ddd.write(config['nnn_file_name'], rrr) # Do KKK correlation function if necessary if 'kkk_file_name' in config: logger.info("Start KKK calculations...") kkk = treecorr.KKKCorrelation(config, logger) kkk.process(cat1, cat2, cat3) logger.info("Done KKK calculations.") kkk.write(config['kkk_file_name']) # Do NNG correlation function if necessary if False: #if 'nng_file_name' in config or 'nnm_file_name' in config: if len(cat3) == 0: raise AttributeError("file_name3 is required for nng correlation") logger.info("Start NNG calculations...") nng = treecorr.NNGCorrelation(config, logger) nng.process(cat1, cat2, cat3) logger.info("Done NNG calculation.") # The default ng_statistic is compensated _iff_ rand files are given. rrg = None if len(rand1) == 0: if config.get('nng_statistic', None) == 'compensated': raise AttributeError( "rand_files is required for nng_statistic = compensated") elif config.get('nng_statistic', 'compensated') == 'compensated': rrg = treecorr.NNGCorrelation(config, logger) rrg.process(rand1, rand1, cat2) logger.info("Done RRG calculation.") if 'nng_file_name' in config: nng.write(config['nng_file_name'], rrg) if 'nnm_file_name' in config: nng.writeNNMap(config['nnm_file_name'], rrg) # Do NNK correlation function if necessary if False: #if 'nnk_file_name' in config: if len(cat3) == 0: raise AttributeError("file_name3 is required for nnk correlation") logger.info("Start NNK calculations...") nnk = treecorr.NNKCorrelation(config, logger) nnk.process(cat1, cat2, cat3) logger.info("Done NNK calculation.") rrk = None if len(rand1) == 0: if config.get('nnk_statistic', None) == 'compensated': raise AttributeError( "rand_files is required for nnk_statistic = compensated") elif config.get('nnk_statistic', 'compensated') == 'compensated': rrk = treecorr.NNKCorrelation(config, logger) rrk.process(rand1, rand1, cat2) logger.info("Done RRK calculation.") nnk.write(config['nnk_file_name'], rrk) # Do KKG correlation function if necessary if False: #if 'kkg_file_name' in config: if len(cat3) == 0: raise AttributeError("file_name3 is required for kkg correlation") logger.info("Start KKG calculations...") kkg = treecorr.KKGCorrelation(config, logger) kkg.process(cat1, cat2, cat3) logger.info("Done KKG calculation.") kkg.write(config['kkg_file_name'])
def corr3(config, logger=None): """Run the full three-point correlation function code based on the parameters in the given config dict. The function `print_corr3_params` will output information about the valid parameters that are expected to be in the config dict. Optionally a logger parameter maybe given, in which case it is used for logging. If not given, the logging will be based on the verbose and log_file parameters. :param config: The configuration dict which defines what to do. :param logger: If desired, a logger object for logging. (default: None, in which case one will be built according to the config dict's verbose level.) """ # Setup logger based on config verbose value if logger is None: logger = treecorr.config.setup_logger( treecorr.config.get(config, 'verbose', int, 1), config.get('log_file', None)) # Check that config doesn't have any extra parameters. # (Such values are probably typos.) # Also convert the given parameters to the correct type, etc. config = treecorr.config.check_config(config, corr3_valid_params, corr3_aliases, logger) import pprint logger.debug('Using configuration dict:\n%s', pprint.pformat(config)) if ('output_dots' not in config and config.get('log_file', None) is None and config['verbose'] >= 2): config['output_dots'] = True # Set the number of threads num_threads = config.get('num_threads', None) logger.debug('From config dict, num_threads = %s', num_threads) treecorr.set_omp_threads(num_threads, logger) # Read in the input files. Each of these is a list. cat1 = treecorr.read_catalogs(config, 'file_name', 'file_list', 0, logger) cat2 = treecorr.read_catalogs(config, 'file_name2', 'rand_file_list2', 1, logger) cat3 = treecorr.read_catalogs(config, 'file_name3', 'rand_file_list3', 1, logger) rand1 = treecorr.read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger) rand2 = treecorr.read_catalogs(config, 'rand_file_name2', 'rand_file_list2', 1, logger) rand3 = treecorr.read_catalogs(config, 'rand_file_name3', 'rand_file_list3', 1, logger) if len(cat1) == 0: raise TypeError("Either file_name or file_list is required") if len(cat2) == 0: cat2 = None if len(cat3) == 0: cat3 = None if len(rand1) == 0: rand1 = None if len(rand2) == 0: rand2 = None if len(rand3) == 0: rand3 = None if cat2 is None and rand2 is not None: raise TypeError("rand_file_name2 is invalid without file_name2") if cat3 is None and rand3 is not None: raise TypeError("rand_file_name3 is invalid without file_name3") if (cat2 is None) != (cat3 is None): raise NotImplementedError( "Cannot yet handle 3-point corrleations with only two catalogs. " + "Need both cat2 and cat3.") logger.info("Done reading input catalogs") # Do GGG correlation function if necessary if 'ggg_file_name' in config or 'm3_file_name' in config: logger.warning("Performing GGG calculations...") ggg = treecorr.GGGCorrelation(config, logger) ggg.process(cat1, cat2, cat3) logger.info("Done GGG calculations.") if 'ggg_file_name' in config: ggg.write(config['ggg_file_name']) logger.warning("Wrote GGG correlation to %s", config['ggg_file_name']) if 'm3_file_name' in config: ggg.writeMap3(config['m3_file_name']) logger.warning("Wrote Map3 values to %s", config['m3_file_name']) # Do NNN correlation function if necessary if 'nnn_file_name' in config: logger.warning("Performing DDD calculations...") ddd = treecorr.NNNCorrelation(config, logger) ddd.process(cat1, cat2, cat3) logger.info("Done DDD calculations.") drr = None rdr = None rrd = None ddr = None drd = None rdd = None if rand1 is None: if rand2 is not None or rand3 is not None: raise TypeError( "rand_file_name is required if rand2 or rand3 is given") logger.warning( "No random catalogs given. Only doing ntri calculation.") rrr = None elif cat2 is None: logger.warning("Performing RRR calculations...") rrr = treecorr.NNNCorrelation(config, logger) rrr.process(rand1) logger.info("Done RRR calculations.") # For the next step, just make cat2 = cat3 = cat1 and rand2 = rand3 = rand1. cat2 = cat3 = cat1 rand2 = rand3 = rand1 else: if rand2 is None: raise TypeError( "rand_file_name2 is required when file_name2 is given") if cat3 is not None and rand3 is None: raise TypeError( "rand_file_name3 is required when file_name3 is given") logger.warning("Performing RRR calculations...") rrr = treecorr.NNNCorrelation(config, logger) rrr.process(rand1, rand2, rand3) logger.info("Done RRR calculations.") if rrr is not None and config['nnn_statistic'] == 'compensated': logger.warning("Performing DRR calculations...") drr = treecorr.NNNCorrelation(config, logger) drr.process(cat1, rand2, rand3) logger.info("Done DRR calculations.") logger.warning("Performing DDR calculations...") ddr = treecorr.NNNCorrelation(config, logger) ddr.process(cat1, cat2, rand3) logger.info("Done DDR calculations.") logger.warning("Performing RDR calculations...") rdr = treecorr.NNNCorrelation(config, logger) rdr.process(rand1, cat2, rand3) logger.info("Done RDR calculations.") logger.warning("Performing RRD calculations...") rrd = treecorr.NNNCorrelation(config, logger) rrd.process(rand1, rand2, cat3) logger.info("Done RRD calculations.") logger.warning("Performing DRD calculations...") drd = treecorr.NNNCorrelation(config, logger) drd.process(cat1, rand2, cat3) logger.info("Done DRD calculations.") logger.warning("Performing RDD calculations...") rdd = treecorr.NNNCorrelation(config, logger) rdd.process(rand1, cat2, cat3) logger.info("Done RDD calculations.") ddd.write(config['nnn_file_name'], rrr, drr, rdr, rrd, ddr, drd, rdd) logger.warning("Wrote NNN correlation to %s", config['nnn_file_name']) # Do KKK correlation function if necessary if 'kkk_file_name' in config: logger.warning("Performing KKK calculations...") kkk = treecorr.KKKCorrelation(config, logger) kkk.process(cat1, cat2, cat3) logger.info("Done KKK calculations.") kkk.write(config['kkk_file_name']) logger.warning("Wrote KKK correlation to %s", config['kkk_file_name'])
def corr3(config, logger=None): """Run the full three-point correlation function code based on the parameters in the given config dict. The function `print_corr3_params` will output information about the valid parameters that are expected to be in the config dict. Optionally a logger parameter maybe given, in which case it is used for logging. If not given, the logging will be based on the verbose and log_file parameters. :param config: The configuration dict which defines what to do. :param logger: If desired, a logger object for logging. (default: None, in which case one will be built according to the config dict's verbose level.) """ # Setup logger based on config verbose value if logger is None: logger = treecorr.config.setup_logger( treecorr.config.get(config,'verbose',int,1), config.get('log_file',None)) # Check that config doesn't have any extra parameters. # (Such values are probably typos.) # Also convert the given parameters to the correct type, etc. config = treecorr.config.check_config(config, corr3_valid_params, corr3_aliases, logger) import pprint logger.debug('Using configuration dict:\n%s',pprint.pformat(config)) if ( 'output_dots' not in config and config.get('log_file',None) is None and config['verbose'] >= 2 ): config['output_dots'] = True # Set the number of threads num_threads = config.get('num_threads',None) logger.debug('From config dict, num_threads = %s',num_threads) treecorr.set_omp_threads(num_threads, logger) # Read in the input files. Each of these is a list. cat1 = treecorr.read_catalogs(config, 'file_name', 'file_list', 0, logger) cat2 = treecorr.read_catalogs(config, 'file_name2', 'rand_file_list2', 1, logger) cat3 = treecorr.read_catalogs(config, 'file_name3', 'rand_file_list3', 1, logger) rand1 = treecorr.read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger) rand2 = treecorr.read_catalogs(config, 'rand_file_name2', 'rand_file_list2', 1, logger) rand3 = treecorr.read_catalogs(config, 'rand_file_name3', 'rand_file_list3', 1, logger) if len(cat1) == 0: raise TypeError("Either file_name or file_list is required") if len(cat2) == 0: cat2 = None if len(cat3) == 0: cat3 = None if len(rand1) == 0: rand1 = None if len(rand2) == 0: rand2 = None if len(rand3) == 0: rand3 = None if cat2 is None and rand2 is not None: raise TypeError("rand_file_name2 is invalid without file_name2") if cat3 is None and rand3 is not None: raise TypeError("rand_file_name3 is invalid without file_name3") if (cat2 is None) != (cat3 is None): raise NotImplementedError( "Cannot yet handle 3-point corrleations with only two catalogs. "+ "Need both cat2 and cat3.") logger.info("Done reading input catalogs") # Do GGG correlation function if necessary if 'ggg_file_name' in config or 'm3_file_name' in config: logger.warning("Performing GGG calculations...") ggg = treecorr.GGGCorrelation(config,logger) ggg.process(cat1,cat2,cat3) logger.info("Done GGG calculations.") if 'ggg_file_name' in config: ggg.write(config['ggg_file_name']) logger.warning("Wrote GGG correlation to %s",config['ggg_file_name']) if 'm3_file_name' in config: ggg.writeMap3(config['m3_file_name']) logger.warning("Wrote Map3 values to %s",config['m3_file_name']) # Do NNN correlation function if necessary if 'nnn_file_name' in config: logger.warning("Performing DDD calculations...") ddd = treecorr.NNNCorrelation(config,logger) ddd.process(cat1,cat2,cat3) logger.info("Done DDD calculations.") drr = None rdr = None rrd = None ddr = None drd = None rdd = None if rand1 is None: if rand2 is not None or rand3 is not None: raise TypeError("rand_file_name is required if rand2 or rand3 is given") logger.warning("No random catalogs given. Only doing ntri calculation.") rrr = None elif cat2 is None: logger.warning("Performing RRR calculations...") rrr = treecorr.NNNCorrelation(config,logger) rrr.process(rand1) logger.info("Done RRR calculations.") # For the next step, just make cat2 = cat3 = cat1 and rand2 = rand3 = rand1. cat2 = cat3 = cat1 rand2 = rand3 = rand1 else: if rand2 is None: raise TypeError("rand_file_name2 is required when file_name2 is given") if cat3 is not None and rand3 is None: raise TypeError("rand_file_name3 is required when file_name3 is given") logger.warning("Performing RRR calculations...") rrr = treecorr.NNNCorrelation(config,logger) rrr.process(rand1,rand2,rand3) logger.info("Done RRR calculations.") if rrr is not None and config['nnn_statistic'] == 'compensated': logger.warning("Performing DRR calculations...") drr = treecorr.NNNCorrelation(config,logger) drr.process(cat1,rand2,rand3) logger.info("Done DRR calculations.") logger.warning("Performing DDR calculations...") ddr = treecorr.NNNCorrelation(config,logger) ddr.process(cat1,cat2,rand3) logger.info("Done DDR calculations.") logger.warning("Performing RDR calculations...") rdr = treecorr.NNNCorrelation(config,logger) rdr.process(rand1,cat2,rand3) logger.info("Done RDR calculations.") logger.warning("Performing RRD calculations...") rrd = treecorr.NNNCorrelation(config,logger) rrd.process(rand1,rand2,cat3) logger.info("Done RRD calculations.") logger.warning("Performing DRD calculations...") drd = treecorr.NNNCorrelation(config,logger) drd.process(cat1,rand2,cat3) logger.info("Done DRD calculations.") logger.warning("Performing RDD calculations...") rdd = treecorr.NNNCorrelation(config,logger) rdd.process(rand1,cat2,cat3) logger.info("Done RDD calculations.") ddd.write(config['nnn_file_name'],rrr,drr,rdr,rrd,ddr,drd,rdd) logger.warning("Wrote NNN correlation to %s",config['nnn_file_name']) # Do KKK correlation function if necessary if 'kkk_file_name' in config: logger.warning("Performing KKK calculations...") kkk = treecorr.KKKCorrelation(config,logger) kkk.process(cat1,cat2,cat3) logger.info("Done KKK calculations.") kkk.write(config['kkk_file_name']) logger.warning("Wrote KKK correlation to %s",config['kkk_file_name'])