def test_basic_catalog(): """Test basic operations on Catalog.""" import time t1 = time.time() # First the ASCII version cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') np.testing.assert_equal(cat.ncols, 12) np.testing.assert_equal(cat.nobjects, 3) np.testing.assert_equal(cat.isFits(), False) np.testing.assert_equal(cat.get(1, 11), '15') np.testing.assert_equal(cat.getInt(1, 11), 15) np.testing.assert_almost_equal(cat.getFloat(2, 1), 8000) do_pickle(cat) # Next the FITS version cat = galsim.Catalog(dir='config_input', file_name='catalog.fits') np.testing.assert_equal(cat.ncols, 12) np.testing.assert_equal(cat.nobjects, 3) np.testing.assert_equal(cat.isFits(), True) np.testing.assert_equal(cat.get(1, 'angle2'), 15) np.testing.assert_equal(cat.getInt(1, 'angle2'), 15) np.testing.assert_almost_equal(cat.getFloat(2, 'float2'), 8000) do_pickle(cat) t2 = time.time() print 'time for %s = %.2f' % (funcname(), t2 - t1)
def test_ascii_catalog(): """Test basic operations on an ASCII Catalog.""" cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') np.testing.assert_equal(cat.ncols, 12) np.testing.assert_equal(cat.nobjects, 3) np.testing.assert_equal(cat.isFits(), False) np.testing.assert_equal(cat.get(1, 11), '15') np.testing.assert_equal(cat.getInt(1, 11), 15) np.testing.assert_almost_equal(cat.getFloat(2, 1), 8000) do_pickle(cat) cat2 = galsim.Catalog('catalog.txt', 'config_input', comments='#', file_type='ASCII') assert cat2 == cat assert len(cat2) == cat2.nobjects == cat2.getNObjects() == cat.nobjects assert cat2.ncols == cat.ncols cat2 = galsim.Catalog('catalog2.txt', 'config_input', comments='%') assert cat2.nobjects == cat.nobjects np.testing.assert_array_equal(cat2.data, cat.data) assert cat2 != cat do_pickle(cat2) cat3 = galsim.Catalog('catalog3.txt', 'config_input', comments='') assert len(cat3) == cat3.nobjects == cat.nobjects np.testing.assert_array_equal(cat3.data, cat.data) assert cat3 != cat do_pickle(cat3) # Check construction errors assert_raises(galsim.GalSimValueError, galsim.Catalog, 'catalog.txt', file_type='invalid') assert_raises(ValueError, galsim.Catalog, 'catalog3.txt', 'config_input', comments="#%") assert_raises((IOError, OSError), galsim.Catalog, 'catalog.txt') # Wrong dir assert_raises((IOError, OSError), galsim.Catalog, 'invalid.txt', 'config_input') # Check indexing errors assert_raises(IndexError, cat.get, -1, 11) assert_raises(IndexError, cat.get, 3, 11) assert_raises(IndexError, cat.get, 1, -1) assert_raises(IndexError, cat.get, 1, 50) assert_raises(IndexError, cat.get, 'val', 11) assert_raises(IndexError, cat.get, 3, 'val')
def test_fits_catalog(): """Test basic operations on a FITS Catalog.""" cat = galsim.Catalog(dir='config_input', file_name='catalog.fits') np.testing.assert_equal(cat.ncols, 12) np.testing.assert_equal(cat.nobjects, 3) np.testing.assert_equal(cat.isFits(), True) np.testing.assert_equal(cat.get(1, 'angle2'), 15) np.testing.assert_equal(cat.getInt(1, 'angle2'), 15) np.testing.assert_almost_equal(cat.getFloat(2, 'float2'), 8000) do_pickle(cat) cat2 = galsim.Catalog('catalog.fits', 'config_input', hdu=1, file_type='FITS') assert cat2 == cat assert len(cat2) == cat2.nobjects == cat2.getNObjects() == cat.nobjects assert cat2.ncols == cat.ncols # Check construction errors assert_raises(galsim.GalSimValueError, galsim.Catalog, 'catalog.fits', file_type='invalid') assert_raises((IOError, OSError), galsim.Catalog, 'catalog.fits') # Wrong dir assert_raises((IOError, OSError), galsim.Catalog, 'invalid.fits', 'config_input') # Check indexing errors assert_raises(IndexError, cat.get, -1, 'angle2') assert_raises(IndexError, cat.get, 3, 'angle2') assert_raises(KeyError, cat.get, 1, 'invalid') assert_raises(KeyError, cat.get, 1, 3) assert_raises(IndexError, cat.get, 'val', 'angle2') # Check non-default hdu cat2 = galsim.Catalog('catalog2.fits', 'config_input', hdu=2) assert len(cat2) == cat2.nobjects == cat.nobjects np.testing.assert_array_equal(cat2.data, cat.data) assert cat2 != cat do_pickle(cat2) cat3 = galsim.Catalog('catalog2.fits', 'config_input', hdu='data') assert cat3.nobjects == cat.nobjects np.testing.assert_array_equal(cat3.data, cat.data) assert cat3 != cat assert cat3 != cat2 # Even though these are the same, it doesn't know 'data' is hdu 2. do_pickle(cat3)
def test_extra_truth(): """Test the extra truth field """ nobjects = 6 config = { 'image' : { 'type' : 'Tiled', 'nx_tiles' : nobjects, 'ny_tiles' : 1, 'stamp_xsize' : 32, 'stamp_ysize' : 32, 'random_seed' : 1234, }, 'gal' : { 'type' : 'Gaussian', 'sigma' : { 'type': 'Random', 'min': 1, 'max': 2 }, 'flux' : '$100 * obj_num', }, 'output' : { 'type' : 'Fits', 'file_name' : 'output/test_truth.fits', 'truth' : { 'hdu' : 1, 'columns' : { 'object_id' : 'obj_num', 'flux' : 'gal.flux', # Check several different ways to do calculations 'sigma' : '@gal.sigma', # The @ is not required, but allowed. 'hlr' : '$(@gal.sigma) * np.sqrt(2.*math.log(2))', 'fwhm' : '$(@gal).fwhm', 'pos' : 'image_pos' } } } } galsim.config.Process(config) sigma_list = [] for k in range(nobjects): ud = galsim.UniformDeviate(1234 + k + 1) sigma = ud() + 1. flux = k * 100 gal = galsim.Gaussian(sigma=sigma, flux=flux) sigma_list.append(sigma) sigma = np.array(sigma_list) file_name = 'output/test_truth.fits' cat = galsim.Catalog(file_name, hdu=1) obj_num = np.array(range(nobjects)) np.testing.assert_almost_equal(cat.data['object_id'], obj_num) np.testing.assert_almost_equal(cat.data['flux'], 100.*obj_num) np.testing.assert_almost_equal(cat.data['sigma'], sigma) np.testing.assert_almost_equal(cat.data['hlr'], sigma * galsim.Gaussian._hlr_factor) np.testing.assert_almost_equal(cat.data['fwhm'], sigma * galsim.Gaussian._fwhm_factor) np.testing.assert_almost_equal(cat.data['pos.x'], obj_num * 32 + 16.5) np.testing.assert_almost_equal(cat.data['pos.y'], 16.5)
def import_params(): """ TODO: package input_maker.py and import to avoid this save and load step. """ cat_file_name = os.path.join('input', 'test_input.asc') cat = galsim.Catalog(cat_file_name) return cat
def test_single_row(): """Test that we can read catalogs with just one row (#394) """ filename = "output/test394.txt" with open(filename, 'w') as f: f.write("3 4 5\n") cat = galsim.Catalog(filename, file_type='ascii') np.testing.assert_array_equal( cat.data, np.array([["3","4","5"]]), err_msg="galsim.Catalog.__init__ failed to read 1-row file")
def test_single_row(): """Test that we can read catalogs with just one row (#394) """ import time t1 = time.time() filename = "output/test394.txt" with open(filename, 'w') as f: f.write("3 4 5\n") cat = galsim.Catalog(filename, file_type='ascii') np.testing.assert_array_equal( cat.data, np.array([["3","4","5"]]), err_msg="galsim.Catalog.__init__ failed to read 1-row file") t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_int_value(): """Test various ways to generate an int value """ import time t1 = time.time() config = { 'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' }, 'dict' : [ { 'dir' : 'config_input', 'file_name' : 'dict.p' }, { 'dir' : 'config_input', 'file_name' : 'dict.yaml' }, { 'dir' : 'config_input', 'file_name' : 'dict.json' } ] }, 'val1' : 9, 'val2' : float(8.7), # Reading as int will drop the fraction. 'val3' : -400.8, # Not floor - negatives will round up. 'str1' : '8', 'str2' : '-2', 'cat1' : { 'type' : 'Catalog' , 'col' : 2 }, 'cat2' : { 'type' : 'Catalog' , 'col' : 3 }, 'ran1' : { 'type' : 'Random', 'min' : 0, 'max' : 3 }, 'ran2' : { 'type' : 'Random', 'min' : -5, 'max' : 10 }, 'seq1' : { 'type' : 'Sequence' }, 'seq2' : { 'type' : 'Sequence', 'step' : 3 }, 'seq3' : { 'type' : 'Sequence', 'first' : 1, 'step' : 5 }, 'seq4' : { 'type' : 'Sequence', 'first' : 10, 'step' : -2 }, 'seq5' : { 'type' : 'Sequence', 'first' : 1, 'last' : 2, 'repeat' : 2 }, 'list1' : { 'type' : 'List', 'items' : [ 73, 8, 3 ] }, 'list2' : { 'type' : 'List', 'items' : [ 6, 8, 1, 7, 3, 5, 1, 0, 6, 3, 8, 2 ], 'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }, 'dict1' : { 'type' : 'Dict', 'key' : 'i' }, 'dict2' : { 'type' : 'Dict', 'num' : 1, 'key' : 'i' }, 'dict3' : { 'type' : 'Dict', 'num' : 2, 'key' : 'i' } } galsim.config.ProcessInput(config) # Test direct values val1 = galsim.config.ParseValue(config,'val1',config, int)[0] np.testing.assert_equal(val1, 9) val2 = galsim.config.ParseValue(config,'val2',config, int)[0] np.testing.assert_equal(val2, 8) val3 = galsim.config.ParseValue(config,'val3',config, int)[0] np.testing.assert_equal(val3, -400) # Test conversions from strings str1 = galsim.config.ParseValue(config,'str1',config, int)[0] np.testing.assert_equal(str1, 8) str2 = galsim.config.ParseValue(config,'str2',config, int)[0] np.testing.assert_equal(str2, -2) # Test values read from a Catalog input_cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') cat1 = [] cat2 = [] for k in range(5): config['seq_index'] = k cat1.append(galsim.config.ParseValue(config,'cat1',config, int)[0]) cat2.append(galsim.config.ParseValue(config,'cat2',config, int)[0]) np.testing.assert_array_equal(cat1, [ 9, 0, -4, 9, 0 ]) np.testing.assert_array_equal(cat2, [ -3, 8, 17, -3, 8 ]) # Test values generated from a uniform deviate rng = galsim.UniformDeviate(1234) config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed. for k in range(6): ran1 = galsim.config.ParseValue(config,'ran1',config, int)[0] np.testing.assert_equal(ran1, int(math.floor(rng() * 4))) ran2 = galsim.config.ParseValue(config,'ran2',config, int)[0] np.testing.assert_equal(ran2, int(math.floor(rng() * 16))-5) # Test values generated from a Sequence seq1 = [] seq2 = [] seq3 = [] seq4 = [] seq5 = [] for k in range(6): config['seq_index'] = k seq1.append(galsim.config.ParseValue(config,'seq1',config, int)[0]) seq2.append(galsim.config.ParseValue(config,'seq2',config, int)[0]) seq3.append(galsim.config.ParseValue(config,'seq3',config, int)[0]) seq4.append(galsim.config.ParseValue(config,'seq4',config, int)[0]) seq5.append(galsim.config.ParseValue(config,'seq5',config, int)[0]) np.testing.assert_array_equal(seq1, [ 0, 1, 2, 3, 4, 5 ]) np.testing.assert_array_equal(seq2, [ 0, 3, 6, 9, 12, 15 ]) np.testing.assert_array_equal(seq3, [ 1, 6, 11, 16, 21, 26 ]) np.testing.assert_array_equal(seq4, [ 10, 8, 6, 4, 2, 0 ]) np.testing.assert_array_equal(seq5, [ 1, 1, 2, 2, 1, 1 ]) # Test values taken from a List list1 = [] list2 = [] for k in range(5): config['seq_index'] = k list1.append(galsim.config.ParseValue(config,'list1',config, int)[0]) list2.append(galsim.config.ParseValue(config,'list2',config, int)[0]) np.testing.assert_array_equal(list1, [ 73, 8, 3, 73, 8 ]) np.testing.assert_array_equal(list2, [ 8, 0, 3, 8, 8 ]) # Test values read from a Dict pickle_dict = galsim.Dict(dir='config_input', file_name='dict.p') yaml_dict = galsim.Dict(dir='config_input', file_name='dict.yaml') json_dict = galsim.Dict(dir='config_input', file_name='dict.json') dict = [] dict.append(galsim.config.ParseValue(config,'dict1',config, int)[0]) dict.append(galsim.config.ParseValue(config,'dict2',config, int)[0]) dict.append(galsim.config.ParseValue(config,'dict3',config, int)[0]) np.testing.assert_array_equal(dict, [ 17, 1, -23 ]) t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_output_catalog(): """Test basic operations on Catalog.""" import time t1 = time.time() names = [ 'float1', 'float2', 'int1', 'int2', 'bool1', 'bool2', 'str1', 'str2', 'str3', 'str4', 'angle', 'posi', 'posd', 'shear' ] types = [ float, float, int, int, bool, bool, str, str, str, str, galsim.Angle, galsim.PositionI, galsim.PositionD, galsim.Shear ] out_cat = galsim.OutputCatalog(names, types) out_cat.addRow([ 1.234, 4.131, 9, -3, 1, True, "He's", '"ceased', 'to', 'be"', 1.2 * galsim.degrees, galsim.PositionI(5, 6), galsim.PositionD(0.3, -0.4), galsim.Shear(g1=0.2, g2=0.1) ]) out_cat.addRow((2.345, -900, 0.0, 8, False, 0, "bleedin'", '"bereft', 'of', 'life"', 11 * galsim.arcsec, galsim.PositionI(-35, 106), galsim.PositionD(23.5, 55.1), galsim.Shear(e1=-0.1, e2=0.15))) out_cat.addRow([ 3.4560001, 8.e3, -4, 17.0, 1, 0, 'demised!', '"kicked', 'the', 'bucket"', 0.4 * galsim.radians, galsim.PositionI(88, 99), galsim.PositionD(-0.99, -0.88), galsim.Shear() ]) # First the ASCII version out_cat.write(dir='output', file_name='catalog.dat') cat = galsim.Catalog(dir='output', file_name='catalog.dat') np.testing.assert_equal(cat.ncols, 17) np.testing.assert_equal(cat.nobjects, 3) np.testing.assert_equal(cat.isFits(), False) np.testing.assert_almost_equal(cat.getFloat(1, 0), 2.345) np.testing.assert_almost_equal(cat.getFloat(2, 1), 8000.) np.testing.assert_equal(cat.getInt(0, 2), 9) np.testing.assert_equal(cat.getInt(2, 3), 17) np.testing.assert_equal(cat.getInt(2, 4), 1) np.testing.assert_equal(cat.getInt(0, 5), 1) np.testing.assert_equal(cat.get(2, 6), 'demised!') np.testing.assert_equal(cat.get(1, 7), '"bereft') np.testing.assert_equal(cat.get(0, 8), 'to') np.testing.assert_equal(cat.get(2, 9), 'bucket"') np.testing.assert_almost_equal(cat.getFloat(0, 10), 1.2 * galsim.degrees / galsim.radians) np.testing.assert_almost_equal(cat.getInt(1, 11), -35) np.testing.assert_almost_equal(cat.getInt(1, 12), 106) np.testing.assert_almost_equal(cat.getFloat(2, 13), -0.99) np.testing.assert_almost_equal(cat.getFloat(2, 14), -0.88) np.testing.assert_almost_equal(cat.getFloat(0, 15), 0.2) np.testing.assert_almost_equal(cat.getFloat(0, 16), 0.1) # Next the FITS version out_cat.write(dir='output', file_name='catalog.fits') cat = galsim.Catalog(dir='output', file_name='catalog.fits') np.testing.assert_equal(cat.ncols, 17) np.testing.assert_equal(cat.nobjects, 3) np.testing.assert_equal(cat.isFits(), True) np.testing.assert_almost_equal(cat.getFloat(1, 'float1'), 2.345) np.testing.assert_almost_equal(cat.getFloat(2, 'float2'), 8000.) np.testing.assert_equal(cat.getInt(0, 'int1'), 9) np.testing.assert_equal(cat.getInt(2, 'int2'), 17) np.testing.assert_equal(cat.getInt(2, 'bool1'), 1) np.testing.assert_equal(cat.getInt(0, 'bool2'), 1) np.testing.assert_equal(cat.get(2, 'str1'), 'demised!') np.testing.assert_equal(cat.get(1, 'str2'), '"bereft') np.testing.assert_equal(cat.get(0, 'str3'), 'to') np.testing.assert_equal(cat.get(2, 'str4'), 'bucket"') np.testing.assert_almost_equal(cat.getFloat(0, 'angle.rad'), 1.2 * galsim.degrees / galsim.radians) np.testing.assert_equal(cat.getInt(1, 'posi.x'), -35) np.testing.assert_equal(cat.getInt(1, 'posi.y'), 106) np.testing.assert_almost_equal(cat.getFloat(2, 'posd.x'), -0.99) np.testing.assert_almost_equal(cat.getFloat(2, 'posd.y'), -0.88) np.testing.assert_almost_equal(cat.getFloat(0, 'shear.g1'), 0.2) np.testing.assert_almost_equal(cat.getFloat(0, 'shear.g2'), 0.1) # Check pickling do_pickle(out_cat) out_cat2 = galsim.OutputCatalog(names, types) # No data. do_pickle(out_cat2) t2 = time.time() print 'time for %s = %.2f' % (funcname(), t2 - t1)
def test_psf(): """Test the two kinds of PSF files we have in DES. """ data_dir = 'des_data' psfex_file = "DECam_00154912_12_psfcat.psf" fitpsf_file = "DECam_00154912_12_fitpsf.fits" wcs_file = "DECam_00154912_12_header.fits" wcs = galsim.FitsWCS(wcs_file, dir=data_dir) # We don't require that the files in example_data_dir have been downloaded. If they # haven't, then we just directly set the comparison values that we want here. example_data_dir = '../examples/des/des_data' cat_file = "DECam_00154912_12_cat.fits" image_file = "DECam_00154912_12.fits.fz" try: cat = galsim.Catalog(cat_file, hdu=2, dir=example_data_dir) size = numpy.array( [cat.getFloat(i, 'FLUX_RADIUS') for i in range(cat.nobjects)]) mag = numpy.array( [cat.getFloat(i, 'MAG_AUTO') for i in range(cat.nobjects)]) flags = numpy.array( [cat.getInt(i, 'FLAGS') for i in range(cat.nobjects)]) index = numpy.array(range(cat.nobjects)) xvals = numpy.array( [cat.getFloat(i, 'X_IMAGE') for i in range(cat.nobjects)]) yvals = numpy.array( [cat.getFloat(i, 'Y_IMAGE') for i in range(cat.nobjects)]) # Pick bright small objects as probable stars mask = (flags == 0) & (mag < 14) & (mag > 13) & (size > 2) & (size < 2.5) idx = numpy.argsort(size[mask]) # This choice of a star is fairly isolated from neighbors, isn't too near an edge or a tape # bump, and doesn't have any noticeable image artifacts in its vicinity. x = xvals[mask][idx][27] y = yvals[mask][idx][27] print('Using x,y = ', x, y) image_pos = galsim.PositionD(x, y) print('size, mag = ', size[mask][idx][27], mag[mask][idx][27]) data = galsim.fits.read(image_file, dir=example_data_dir) b = galsim.BoundsI(int(x) - 15, int(x) + 16, int(y) - 15, int(y) + 16) data_stamp = data[b] header = galsim.fits.FitsHeader(image_file, dir=example_data_dir) sky_level = header['SKYBRITE'] data_stamp -= sky_level raw_meas = data_stamp.FindAdaptiveMom() print('raw_meas = ', raw_meas) ref_size = raw_meas.moments_sigma ref_shape = raw_meas.observed_shape print('ref size: ', ref_size) print('ref shape: ', ref_shape) except IOError: x, y = 1195.64074707, 1276.63427734 image_pos = galsim.PositionD(x, y) b = galsim.BoundsI(int(x) - 15, int(x) + 16, int(y) - 15, int(y) + 16) ref_size = 1.80668628216 ref_shape = galsim.Shear(g1=0.022104322221, g2=-0.130925191715) # First the PSFEx model using the wcs_file to get the model is sky coordinates. psfex = galsim.des.DES_PSFEx(psfex_file, wcs_file, dir=data_dir) psf = psfex.getPSF(image_pos) # The getLocalWCS function should return a local WCS assert psfex.getLocalWCS(image_pos).isLocal() # Draw the postage stamp image # Note: the PSF already includes the pixel response, so draw with method 'no_pixel'. stamp = psf.drawImage(wcs=wcs.local(image_pos), bounds=b, method='no_pixel') print('wcs = ', wcs.local(image_pos)) meas = stamp.FindAdaptiveMom() print('meas = ', meas) print('pixel scale = ', stamp.wcs.minLinearScale(image_pos=image_pos)) print('cf sizes: ', ref_size, meas.moments_sigma) print('cf shapes: ', ref_shape, meas.observed_shape) # The agreement for a single star is not great of course, not even 2 decimals. # Divide by 2 to get agreement at 2 dp. numpy.testing.assert_almost_equal(meas.moments_sigma / 2, ref_size / 2, decimal=2, err_msg="PSFEx size doesn't match") numpy.testing.assert_almost_equal(meas.observed_shape.g1 / 2, ref_shape.g1 / 2, decimal=2, err_msg="PSFEx shape.g1 doesn't match") numpy.testing.assert_almost_equal(meas.observed_shape.g2 / 2, ref_shape.g2 / 2, decimal=2, err_msg="PSFEx shape.g2 doesn't match") # Repeat without the wcs_file argument, so the model is in chip coordinates. # Also check the functionality where the file is already open. with pyfits.open(os.path.join(data_dir, psfex_file)) as hdu_list: psfex = galsim.des.DES_PSFEx(hdu_list[1]) psf = psfex.getPSF(image_pos) # In this case, the getLocalWCS function won't return anything useful. assert psfex.getLocalWCS(image_pos) is None # Draw the postage stamp image. This time in image coords, so pixel_scale = 1.0. stamp = psf.drawImage(bounds=b, scale=1.0, method='no_pixel') meas = stamp.FindAdaptiveMom() numpy.testing.assert_almost_equal( meas.moments_sigma / 2, ref_size / 2, decimal=2, err_msg="no-wcs PSFEx size doesn't match") numpy.testing.assert_almost_equal( meas.observed_shape.g1 / 2, ref_shape.g1 / 2, decimal=2, err_msg="no-wcs PSFEx shape.g1 doesn't match") numpy.testing.assert_almost_equal( meas.observed_shape.g2 / 2, ref_shape.g2 / 2, decimal=2, err_msg="no-wcs PSFEx shape.g2 doesn't match") # Now the shapelet PSF model. This model is already in sky coordinates, so no wcs_file needed. fitpsf = galsim.des.DES_Shapelet(os.path.join(data_dir, fitpsf_file)) psf = fitpsf.getPSF(image_pos) # Draw the postage stamp image # Again, the PSF already includes the pixel response. stamp = psf.drawImage(wcs=wcs.local(image_pos), bounds=b, method='no_pixel') meas = stamp.FindAdaptiveMom() numpy.testing.assert_almost_equal( meas.moments_sigma / 2, ref_size / 2, decimal=2, err_msg="Shapelet PSF size doesn't match") numpy.testing.assert_almost_equal( meas.observed_shape.g1 / 2, ref_shape.g1 / 2, decimal=2, err_msg="Shapelet PSF shape.g1 doesn't match") numpy.testing.assert_almost_equal( meas.observed_shape.g2 / 2, ref_shape.g2 / 2, decimal=2, err_msg="Shapelet PSF shape.g2 doesn't match")
def main(argv): """ Make a fits image cube using parameters from an input catalog - The number of images in the cube matches the number of rows in the catalog. - Each image size is computed automatically by GalSim based on the Nyquist size. - Only galaxies. No stars. - PSF is Moffat - Each galaxy is bulge plus disk: deVaucouleurs + Exponential. - A fraction of the disk flux is placed into point sources, which can model knots of star formation. - The catalog's columns are: 0 PSF beta (Moffat exponent) 1 PSF FWHM 2 PSF e1 3 PSF e2 4 PSF trunc 5 Disc half-light-radius 6 Disc e1 7 Disc e2 8 Bulge half-light-radius 9 Bulge e1 10 Bulge e2 11 Galaxy dx (the two components have same center) 12 Galaxy dy - Applied shear is the same for each galaxy - Noise is Poisson using a nominal sky value of 1.e6 """ logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) logger = logging.getLogger("demo4") # Define some parameters we'll use below and make directories if needed. cat_file_name = os.path.join('input', 'galsim_default_input.asc') if not os.path.isdir('output'): os.mkdir('output') multi_file_name = os.path.join('output', 'multi.fits') random_seed = 8241573 sky_level = 1.e6 # ADU / arcsec^2 pixel_scale = 1.0 # arcsec / pixel (size units in input catalog are pixels) gal_flux = 1.e6 # arbitrary choice, makes nice (not too) noisy images gal_g1 = -0.009 # gal_g2 = 0.011 # # the fraction of flux in each component # 40% is in the bulge, 60% in a disk. 70% of that disk light is placed # into point sources distributed as a random walk bulge_frac = 0.4 disk_frac = 0.6 knot_frac = 0.42 smooth_disk_frac = 0.18 # number of knots of star formation. To simulate a nice irregular (all the # flux is in knots) we find ~100 is a minimum number needed, but we will # just use 10 here to make the demo run fast. n_knots = 10 xsize = 64 # pixels ysize = 64 # pixels logger.info('Starting demo script 4 using:') logger.info(' - parameters taken from catalog %r', cat_file_name) logger.info(' - Moffat PSF (parameters from catalog)') logger.info(' - pixel scale = %.2f', pixel_scale) logger.info(' - Bulge + Disc galaxies (parameters from catalog)') logger.info(' - 100 Point sources, distributed as random walk') logger.info(' - Applied gravitational shear = (%.3f,%.3f)', gal_g1, gal_g2) logger.info(' - Poisson noise (sky level = %.1e).', sky_level) # Read in the input catalog cat = galsim.Catalog(cat_file_name) # save a list of the galaxy images in the "images" list variable: images = [] for k in range(cat.nobjects): # Initialize the (pseudo-)random number generator that we will be using below. # Use a different random seed for each object to get different noise realizations. # Using sequential random seeds here is safer than it sounds. We use Mersenne Twister # random number generators that are designed to be used with this kind of seeding. # However, to be extra safe, we actually initialize one random number generator with this # seed, generate and throw away two random values with that, and then use the next value # to seed a completely different Mersenne Twister RNG. The result is that successive # RNGs created this way produce very independent random number streams. rng = galsim.BaseDeviate(random_seed + k + 1) # Take the Moffat beta from the first column (called 0) of the input catalog: # Note: cat.get(k,col) returns a string. To get the value as a float, use either # cat.getFloat(k,col) or float(cat.get(k,col)) beta = cat.getFloat(k, 0) # A Moffat's size may be either scale_radius, fwhm, or half_light_radius. # Here we use fwhm, taking from the catalog as well. fwhm = cat.getFloat(k, 1) # A Moffat profile may be truncated if desired # The units for this are expected to be arcsec (or specifically -- whatever units # you are using for all the size values as defined by the pixel_scale). trunc = cat.getFloat(k, 4) # Note: You may omit the flux, since the default is flux=1. psf = galsim.Moffat(beta=beta, fwhm=fwhm, trunc=trunc) # Take the (e1, e2) shape parameters from the catalog as well. psf = psf.shear(e1=cat.getFloat(k, 2), e2=cat.getFloat(k, 3)) # Galaxy is a bulge + disk(+knots) with parameters taken from the catalog: # put some fraction of the disk light into knots of star formation disk_hlr = cat.getFloat(k, 5) disk_e1 = cat.getFloat(k, 6) disk_e2 = cat.getFloat(k, 7) bulge_hlr = cat.getFloat(k, 8) bulge_e1 = cat.getFloat(k, 9) bulge_e2 = cat.getFloat(k, 10) smooth_disk = galsim.Exponential(flux=smooth_disk_frac, half_light_radius=disk_hlr) knots = galsim.RandomKnots(n_knots, half_light_radius=disk_hlr, flux=knot_frac, rng=rng) disk = galsim.Add([smooth_disk, knots]) disk = disk.shear(e1=disk_e1, e2=disk_e2) # the rest of the light goes into the bulge bulge = galsim.DeVaucouleurs(flux=bulge_frac, half_light_radius=bulge_hlr) bulge = bulge.shear(e1=bulge_e1, e2=bulge_e2) # The flux of an Add object is the sum of the component fluxes. # Note that in demo3.py, a similar addition was performed by the binary operator "+". gal = galsim.Add([disk, bulge]) # This flux may be overridden by withFlux. The relative fluxes of the components # remains the same, but the total flux is set to gal_flux. gal = gal.withFlux(gal_flux) gal = gal.shear(g1=gal_g1, g2=gal_g2) # The center of the object is normally placed at the center of the postage stamp image. # You can change that with shift: gal = gal.shift(dx=cat.getFloat(k, 11), dy=cat.getFloat(k, 12)) final = galsim.Convolve([psf, gal]) # Draw the profile image = galsim.ImageF(xsize, ysize) final.drawImage(image, scale=pixel_scale) # Add Poisson noise to the image: image.addNoise(galsim.PoissonNoise(rng, sky_level * pixel_scale**2)) logger.info('Drew image for object at row %d in the input catalog' % k) # Add the image to our list of images images.append(image) # Now write the images to a multi-extension fits file. Each image will be in its own HDU. galsim.fits.writeMulti(images, multi_file_name) logger.info('Images written to multi-extension fits file %r', multi_file_name)
def main(argv): # For the file names, I pick a particular exposure. The directory structure corresponds # to where the files are stored on folio at UPenn. root = 'DECam_00154912' # Directories in the Galsim repo img_dir = 'des_data' wl_dir = 'des_data' # Directories on Mike's laptop #img_dir = '/Users/Mike/Astro/des/SV/DECam_00154912_wl' #wl_dir = '/Users/Mike/Astro/des/SV/DECam_00154912_wl' # Directories on folio #img_dir = '/data3/DECAM/SV/DECam_154912' #wl_dir = '/data3/DECAM/wl/DECam_00154912_wl' # Set which chips to run on first_chip = 1 last_chip = 62 #first_chip = 12 #last_chip = 12 out_dir = 'output' # The random seed, so the results are deterministic random_seed = 1339201 x_col = 'X_IMAGE' y_col = 'Y_IMAGE' flux_col = 'FLUX_AUTO' flag_col = 'FLAGS' xsize_key = 'NAXIS1' ysize_key = 'NAXIS2' pixel_scale_key = 'PIXSCAL1' sky_level_key = 'SKYBRITE' sky_sigma_key = 'SKYSIGMA' # Make output directory if not already present. if not os.path.isdir(out_dir): os.mkdir(out_dir) for chipnum in range(first_chip, last_chip + 1): print 'Start chip ', chipnum # Setup the file names image_file = '%s_%02d.fits.fz' % (root, chipnum) cat_file = '%s_%02d_cat.fits' % (root, chipnum) psfex_file = '%s_%02d_psfcat.psf' % (root, chipnum) fitpsf_file = '%s_%02d_fitpsf.fits' % (root, chipnum) psfex_image_file = '%s_%02d_psfex_image.fits' % (root, chipnum) fitpsf_image_file = '%s_%02d_fitpsf_image.fits' % (root, chipnum) # Get some parameters about the image from the data image header information image_header = galsim.FitsHeader(image_file, dir=img_dir) xsize = image_header[xsize_key] ysize = image_header[ysize_key] pixel_scale = image_header[pixel_scale_key] sky_sigma = image_header[ sky_sigma_key] # This is sqrt(variance) / pixel sky_level = image_header[sky_level_key] # This is in ADU / pixel gain = sky_level / sky_sigma**2 # an approximation, since gain is missing. # Setup the images: psfex_image = galsim.ImageF(xsize, ysize) psfex_image.scale = pixel_scale fitpsf_image = galsim.ImageF(xsize, ysize) fitpsf_image.scale = pixel_scale # Read the other input files cat = galsim.Catalog(cat_file, hdu=2, dir=img_dir) psfex = galsim.des.DES_PSFEx(psfex_file, dir=wl_dir) fitpsf = galsim.des.DES_Shapelet(fitpsf_file, dir=wl_dir) nobj = cat.nobjects print 'Catalog has ', nobj, ' objects' for k in range(nobj): # The usual random number generator using a different seed for each galaxy. # I'm not actually using the rng for object creation (everything comes from the # input files), but the rng that matches the config version is here just in case. rng = galsim.BaseDeviate(random_seed + k) # Skip objects with a non-zero flag flag = cat.getInt(k, flag_col) if flag: continue # Get the position from the galaxy catalog x = cat.getFloat(k, x_col) y = cat.getFloat(k, y_col) ix = int(math.floor(x + 0.5)) iy = int(math.floor(y + 0.5)) dx = x - ix dy = y - iy image_pos = galsim.PositionD(x, y) print ' pos = ', image_pos # Also get the flux of the galaxy from the catalog flux = cat.getFloat(k, flux_col) # Define the pixel pix = galsim.Pixel(pixel_scale) # First do the PSFEx image: if True: # Define the PSF profile psf = psfex.getPSF(image_pos, pixel_scale) psf.setFlux(flux) # Make the final image, convolving with pix final = galsim.Convolve([pix, psf]) # Apply partial-pixel shift final.applyShift(dx * pixel_scale, dy * pixel_scale) # Draw the postage stamp image stamp = final.draw(dx=pixel_scale) # Recenter the stamp at the desired position: stamp.setCenter(ix, iy) # Find overlapping bounds bounds = stamp.bounds & psfex_image.bounds psfex_image[bounds] += stamp[bounds] # Next do the ShapeletPSF image: # If the position is not within the interpolation bounds, fitpsf will # raise an exception telling us to skip this object. Easier to check here. if fitpsf.bounds.includes(image_pos): # Define the PSF profile psf = fitpsf.getPSF(image_pos) psf.setFlux(flux) # Galsim doesn't have WCS functionality yet. # But for the shapelet PSF, it is important, since it really describes the # PSF in sky coordinates, not pixel coordinates. But to first order, # the DES WCS is 90 degrees rotated from the sky, so for now, just apply # a 90 degree rotation to get the images to look approximately correct. # Eventually, we'll want to have a DES_WCS that can read the full WCS from # the fits header and account for all of the field distortion correctly. psf.applyRotation(-90 * galsim.degrees) # Make the final image, convolving with pix final = galsim.Convolve([pix, psf]) # Apply partial-pixel shift final.applyShift(dx * pixel_scale, dy * pixel_scale) # Draw the postage stamp image stamp = final.draw(dx=pixel_scale) # Recenter the stamp at the desired position: stamp.setCenter(ix, iy) # Find overlapping bounds bounds = stamp.bounds & fitpsf_image.bounds fitpsf_image[bounds] += stamp[bounds] else: pass #print '...not in fitpsf.bounds' # Add background level psfex_image += sky_level fitpsf_image += sky_level # Add noise rng = galsim.BaseDeviate(random_seed + nobj) noise = galsim.CCDNoise(rng, gain=gain) psfex_image.addNoise(noise) # Reset the random seed to match the action of the yaml version # Note: the different between seed and reset matters here. # reset would sever the connection between this rng instance and the one stored in noise. # seed changes the seed while keeping the connection between them. rng.seed(random_seed + nobj) fitpsf_image.addNoise(noise) # Now write the images to disk. psfex_image.write(psfex_image_file, dir=out_dir) fitpsf_image.write(fitpsf_image_file, dir=out_dir) print 'Wrote images to %s and %s' % (os.path.join( out_dir, psfex_image_file), os.path.join(out_dir, fitpsf_image_file))
def simImage(sourceDir,imFile,catFile,psfFile,outFile): """ Create a simulated image using PSFEx modelled PSFs and noise properties of the source image Input: sourceDir: input directory data imFile: imput image file name catFile: catalogue file (output from SeXtractor) psfFile: psf model (output from PSFEx) outFile: name of output file for image Output: writes to fits file. The catFile must contain the fields X_IMAGE, Y_IMAGE, FLUX_APER (or the code to be changed to equivalent for positions of sources and integrated flux). """ #load necessary stuff from files. #NOte that the MCS image files have two HDUs, one with #the WCS information, one with the image information. galHdr1 = galsim.FitsHeader(imFile, dir=sourceDir, hdu=0) galHdr2 = galsim.FitsHeader(imFile, dir=sourceDir, hdu=1) cat = galsim.Catalog(catFile, hdu=2, dir=sourceDir, file_type="FITS") psfex=des.DES_PSFEx(psfFile,imFile,dir=sourceDir) image=galsim.fits.read(imFile,sourceDir,hdu=1) #get setup the image. match the (currently trivial) WCS with the image, and #create a blank image wcs = galsim.FitsWCS(header=galHdr1) xSize=galHdr2['NAXIS1'] ySize=galHdr2['NAXIS2'] simImage = galsim.Image(xSize, ySize, wcs=wcs) #some definitions for extracting catalogue columsn xCol="X_IMAGE" yCol="Y_IMAGE" fluxCol="FLUX_APER" #get noise statistics. Read in the catalogue positions to estimate the centre #of the image in whatever rotation it has. This is so we get the noise statistics #from teh mask region, excludin the rest of the image. xVals=cat.data[xCol] yVals=cat.data[yCol] xMean=int(xVals.mean()) yMean=int(yVals.mean()) radius=1800 subIm=image.array[int(xMean-radius):int(xMean+radius),int(yMean-radius):int(yMean+radius)] im,a,b=sigmaclip(subIm.ravel(),5,5) skyLevel=im.mean() skySigma=im.std() gain = skyLevel / skySigma**2 #this definition from teh galsim tutorials nobj = cat.nobjects print('Catalog has ',nobj,' objects. Sky level is ',int(skyLevel),' Sky sigma is ',int(skySigma)) #now cycle over the catalogue. for k in range(nobj): #get position and flux x = cat.getFloat(k,xCol) y = cat.getFloat(k,yCol) flux = cat.getFloat(k,fluxCol)*5 #some position calculation for the galsim routines # + 0.5 to account for even-size postage stamps x=x+0.5 y=y+0.5 ix = int(math.floor(x+0.5)) iy = int(math.floor(y+0.5)) dx = x-ix dy = y-iy imagePos = galsim.PositionD(x,y) offset = galsim.PositionD(dx,dy) #calculate PSF for given position and flux psf=psfex.getPSF(imagePos).withFlux(flux) #make image stamp = psf.drawImage(wcs=wcs.local(imagePos), offset=offset, method='no_pixel') stamp.setCenter(ix,iy) #and place on image, taking into consideration edges bounds = stamp.bounds & simImage.bounds simImage[bounds] += stamp[bounds] #now that we've done all the spots, add noise #background sky level simImage += skyLevel #CCD noise random_seed = 1339201 rng=galsim.BaseDeviate(random_seed) noise = galsim.CCDNoise(rng, gain=gain) #poisonnian noise simImage.addNoise(noise) noise = galsim.PoissonNoise(rng, sky_level=skyLevel) simImage.addNoise(noise) #and dump to a file. Will overwrite existing file. simImage.write(outFile,clobber=True)
def test_float_value(): """Test various ways to generate a float value """ import time t1 = time.time() config = { 'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' }, 'dict' : [ { 'dir' : 'config_input', 'file_name' : 'dict.p' }, { 'dir' : 'config_input', 'file_name' : 'dict.yaml' }, { 'dir' : 'config_input', 'file_name' : 'dict.json' } ] }, 'val1' : 9.9, 'val2' : int(400), 'str1' : '8.73', 'str2' : '2.33e-9', 'str3' : '6.e-9', 'cat1' : { 'type' : 'Catalog' , 'col' : 0 }, 'cat2' : { 'type' : 'Catalog' , 'col' : 1 }, 'ran1' : { 'type' : 'Random', 'min' : 0.5, 'max' : 3 }, 'ran2' : { 'type' : 'Random', 'min' : -5, 'max' : 0 }, 'gauss1' : { 'type' : 'RandomGaussian', 'sigma' : 1 }, 'gauss2' : { 'type' : 'RandomGaussian', 'sigma' : 3, 'mean' : 4 }, 'gauss3' : { 'type' : 'RandomGaussian', 'sigma' : 1.5, 'min' : -2, 'max' : 2 }, 'gauss4' : { 'type' : 'RandomGaussian', 'sigma' : 0.5, 'min' : 0, 'max' : 0.8 }, 'gauss5' : { 'type' : 'RandomGaussian', 'sigma' : 0.3, 'mean' : 0.5, 'min' : 0, 'max' : 0.5 }, 'dist1' : { 'type' : 'RandomDistribution', 'function' : 'config_input/distribution.txt', 'interpolant' : 'linear' }, 'dist2' : { 'type' : 'RandomDistribution', 'function' : 'config_input/distribution2.txt', 'interpolant' : 'linear' }, 'dist3' : { 'type' : 'RandomDistribution', 'function' : 'x*x', 'x_min' : 0., 'x_max' : 2.0 }, 'seq1' : { 'type' : 'Sequence' }, 'seq2' : { 'type' : 'Sequence', 'step' : 0.1 }, 'seq3' : { 'type' : 'Sequence', 'first' : 1.5, 'step' : 0.5 }, 'seq4' : { 'type' : 'Sequence', 'first' : 10, 'step' : -2 }, 'seq5' : { 'type' : 'Sequence', 'first' : 1, 'last' : 2.1, 'repeat' : 2 }, 'list1' : { 'type' : 'List', 'items' : [ 73, 8.9, 3.14 ] }, 'list2' : { 'type' : 'List', 'items' : [ 0.6, 1.8, 2.1, 3.7, 4.3, 5.5, 6.1, 7.0, 8.6, 9.3, 10.8, 11.2 ], 'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }, 'dict1' : { 'type' : 'Dict', 'key' : 'f' }, 'dict2' : { 'type' : 'Dict', 'num' : 1, 'key' : 'f' }, 'dict3' : { 'type' : 'Dict', 'num' : 2, 'key' : 'f' }, 'dict4' : { 'type' : 'Dict', 'num' : 1, 'key' : 'noise.models.1.gain' } } galsim.config.ProcessInput(config) # Test direct values val1 = galsim.config.ParseValue(config,'val1',config, float)[0] np.testing.assert_almost_equal(val1, 9.9) val2 = galsim.config.ParseValue(config,'val2',config, float)[0] np.testing.assert_almost_equal(val2, 400) # Test conversions from strings str1 = galsim.config.ParseValue(config,'str1',config, float)[0] np.testing.assert_almost_equal(str1, 8.73) str2 = galsim.config.ParseValue(config,'str2',config, float)[0] np.testing.assert_almost_equal(str2, 2.33e-9) str3 = galsim.config.ParseValue(config,'str3',config, float)[0] np.testing.assert_almost_equal(str3, 6.0e-9) # Test values read from a Catalog input_cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') cat1 = [] cat2 = [] for k in range(5): config['seq_index'] = k cat1.append(galsim.config.ParseValue(config,'cat1',config, float)[0]) cat2.append(galsim.config.ParseValue(config,'cat2',config, float)[0]) np.testing.assert_array_almost_equal(cat1, [ 1.234, 2.345, 3.456, 1.234, 2.345 ]) np.testing.assert_array_almost_equal(cat2, [ 4.131, -900, 8000, 4.131, -900 ]) # Test values generated from a uniform deviate rng = galsim.UniformDeviate(1234) config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed. for k in range(6): ran1 = galsim.config.ParseValue(config,'ran1',config, float)[0] np.testing.assert_almost_equal(ran1, rng() * 2.5 + 0.5) ran2 = galsim.config.ParseValue(config,'ran2',config, float)[0] np.testing.assert_almost_equal(ran2, rng() * 5 - 5) # Test values generated from a Gaussian deviate gd = galsim.GaussianDeviate(rng) for k in range(6): gauss1 = galsim.config.ParseValue(config,'gauss1',config, float)[0] gd.setMean(0) gd.setSigma(1) np.testing.assert_almost_equal(gauss1, gd()) gauss2 = galsim.config.ParseValue(config,'gauss2',config, float)[0] gd.setMean(4) gd.setSigma(3) np.testing.assert_almost_equal(gauss2, gd()) gauss3 = galsim.config.ParseValue(config,'gauss3',config, float)[0] gd.setMean(0) gd.setSigma(1.5) gd_val = gd() while math.fabs(gd_val) > 2: gd_val = gd() np.testing.assert_almost_equal(gauss3, gd_val) gauss4 = galsim.config.ParseValue(config,'gauss4',config, float)[0] gd.setMean(0) gd.setSigma(0.5) gd_val = math.fabs(gd()) while gd_val > 0.8: gd_val = math.fabs(gd()) np.testing.assert_almost_equal(gauss4, gd_val) gauss5 = galsim.config.ParseValue(config,'gauss5',config, float)[0] gd.setMean(0.5) gd.setSigma(0.3) gd_val = gd() if gd_val > 0.5: gd_val = 1-gd_val while gd_val < 0: gd_val = gd() if gd_val > 0.5: gd_val = 1-gd_val np.testing.assert_almost_equal(gauss5, gd_val) # Test values generated from a distribution in a file dd=galsim.DistDeviate(rng,function='config_input/distribution.txt',interpolant='linear') for k in range(6): dist1 = galsim.config.ParseValue(config,'dist1',config, float)[0] np.testing.assert_almost_equal(dist1, dd()) dd=galsim.DistDeviate(rng,function='config_input/distribution2.txt',interpolant='linear') for k in range(6): dist2 = galsim.config.ParseValue(config,'dist2',config, float)[0] np.testing.assert_almost_equal(dist2, dd()) dd=galsim.DistDeviate(rng,function=lambda x: x*x,x_min=0.,x_max=2.) for k in range(6): dist3 = galsim.config.ParseValue(config,'dist3',config, float)[0] np.testing.assert_almost_equal(dist3, dd()) # Test values generated from a Sequence seq1 = [] seq2 = [] seq3 = [] seq4 = [] seq5 = [] for k in range(6): config['seq_index'] = k seq1.append(galsim.config.ParseValue(config,'seq1',config, float)[0]) seq2.append(galsim.config.ParseValue(config,'seq2',config, float)[0]) seq3.append(galsim.config.ParseValue(config,'seq3',config, float)[0]) seq4.append(galsim.config.ParseValue(config,'seq4',config, float)[0]) seq5.append(galsim.config.ParseValue(config,'seq5',config, float)[0]) np.testing.assert_array_almost_equal(seq1, [ 0, 1, 2, 3, 4, 5 ]) np.testing.assert_array_almost_equal(seq2, [ 0, 0.1, 0.2, 0.3, 0.4, 0.5 ]) np.testing.assert_array_almost_equal(seq3, [ 1.5, 2, 2.5, 3, 3.5, 4 ]) np.testing.assert_array_almost_equal(seq4, [ 10, 8, 6, 4, 2, 0 ]) np.testing.assert_array_almost_equal(seq5, [ 1, 1, 2, 2, 1, 1 ]) # Test values taken from a List list1 = [] list2 = [] for k in range(5): config['seq_index'] = k list1.append(galsim.config.ParseValue(config,'list1',config, float)[0]) list2.append(galsim.config.ParseValue(config,'list2',config, float)[0]) np.testing.assert_array_almost_equal(list1, [ 73, 8.9, 3.14, 73, 8.9 ]) np.testing.assert_array_almost_equal(list2, [ 10.8, 7.0, 4.3, 1.8, 10.8 ]) # Test values read from a Dict dict = [] dict.append(galsim.config.ParseValue(config,'dict1',config, float)[0]) dict.append(galsim.config.ParseValue(config,'dict2',config, float)[0]) dict.append(galsim.config.ParseValue(config,'dict3',config, float)[0]) dict.append(galsim.config.ParseValue(config,'dict4',config, float)[0]) np.testing.assert_array_almost_equal(dict, [ 23.17, 0.1, -17.23, 1.9 ]) t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_str_value(): """Test various ways to generate a str value """ import time t1 = time.time() config = { 'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' }, 'dict' : [ { 'dir' : 'config_input', 'file_name' : 'dict.p' }, { 'dir' : 'config_input', 'file_name' : 'dict.json' }, { 'dir' : 'config_input', 'file_name' : 'dict.yaml' } ] }, 'val1' : -93, 'val2' : True, 'val3' : 123.8, 'str1' : "Norwegian", 'str2' : u"Blue", 'cat1' : { 'type' : 'Catalog' , 'col' : 6 }, 'cat2' : { 'type' : 'Catalog' , 'col' : 7 }, 'list1' : { 'type' : 'List', 'items' : [ 'Beautiful', 'plumage!', 'Ay?' ] }, 'file1' : { 'type' : 'NumberedFile', 'root' : 'file', 'num' : 5, 'ext' : '.fits.fz', 'digits' : 3 }, 'file2' : { 'type' : 'NumberedFile', 'root' : 'file', 'num' : 5 }, 'fs1' : { 'type' : 'FormattedStr', 'format' : 'realgal_type%02d_dilation%d.fits', 'items' : [ { 'type' : 'Sequence' , 'repeat' : 3 }, { 'type' : 'Sequence' , 'nitems' : 3 } ] }, 'fs2' : { 'type' : 'FormattedStr', 'format' : '%%%d %i %x %o%i %lf=%g=%e %hi%u %r%s %%', 'items' : [4, 5, 12, 9, 9, math.pi, math.pi, math.pi, 11, -11, 'Goodbye cruel world.', ', said Pink.'] }, 'dict1' : { 'type' : 'Dict', 'key' : 's' }, 'dict2' : { 'type' : 'Dict', 'num' : 1, 'key' : 's' }, 'dict3' : { 'type' : 'Dict', 'num' : 2, 'key' : 's' } } test_yaml = True try: galsim.config.ProcessInput(config) except: # We don't require PyYAML as a dependency, so if this fails, just remove the YAML dict. del config['input']['dict'][2] galsim.config.ProcessInput(config) test_yaml = False # Test direct values val1 = galsim.config.ParseValue(config,'val1',config, str)[0] np.testing.assert_equal(val1, '-93') val2 = galsim.config.ParseValue(config,'val2',config, str)[0] np.testing.assert_equal(val2, 'True') val3 = galsim.config.ParseValue(config,'val3',config, str)[0] np.testing.assert_equal(val3, '123.8') # Test conversions from strings str1 = galsim.config.ParseValue(config,'str1',config, str)[0] np.testing.assert_equal(str1, 'Norwegian') str2 = galsim.config.ParseValue(config,'str2',config, str)[0] np.testing.assert_equal(str2, 'Blue') # Test values read from a Catalog input_cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') cat1 = [] cat2 = [] config['index_key'] = 'obj_num' for k in range(3): config['obj_num'] = k cat1.append(galsim.config.ParseValue(config,'cat1',config, str)[0]) cat2.append(galsim.config.ParseValue(config,'cat2',config, str)[0]) np.testing.assert_array_equal(cat1, ["He's", "bleedin'", "demised!"]) # Note: white space in the input catalog always separates columns. ' and " don't work. np.testing.assert_array_equal(cat2, ['"ceased', '"bereft', '"kicked']) # Test values taken from a List list1 = [] config['index_key'] = 'image_num' for k in range(5): config['image_num'] = k list1.append(galsim.config.ParseValue(config,'list1',config, str)[0]) np.testing.assert_array_equal(list1, ['Beautiful', 'plumage!', 'Ay?', 'Beautiful', 'plumage!']) # Test values built using NumberedFile file1 = galsim.config.ParseValue(config,'file1',config, str)[0] np.testing.assert_equal(file1, 'file005.fits.fz') file2 = galsim.config.ParseValue(config,'file2',config, str)[0] np.testing.assert_equal(file2, 'file5') # Test value built from FormattedStr config['index_key'] = 'obj_num' for k in range(9): config['obj_num'] = k type = k / 3 dil = k % 3 fs1 = galsim.config.ParseValue(config,'fs1',config, str)[0] np.testing.assert_equal(fs1, 'realgal_type%02d_dilation%d.fits'%(type,dil)) fs2 = galsim.config.ParseValue(config,'fs2',config, str)[0] np.testing.assert_equal(fs2, "%4 5 c 119 3.141593=3.14159=3.141593e+00 11-11 'Goodbye cruel world.', said Pink. %") # Test values read from a Dict dict = [] dict.append(galsim.config.ParseValue(config,'dict1',config, str)[0]) dict.append(galsim.config.ParseValue(config,'dict2',config, str)[0]) if test_yaml: dict.append(galsim.config.ParseValue(config,'dict3',config, str)[0]) else: dict.append('Brian') np.testing.assert_array_equal(dict, [ 'Life', 'of', 'Brian' ]) t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_bool_value(): """Test various ways to generate a bool value """ import time t1 = time.time() config = { 'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' }, 'dict' : [ { 'dir' : 'config_input', 'file_name' : 'dict.p' }, { 'dir' : 'config_input', 'file_name' : 'dict.json' }, { 'dir' : 'config_input', 'file_name' : 'dict.yaml' } ] }, 'val1' : True, 'val2' : 1, 'val3' : 0.0, 'str1' : 'true', 'str2' : '0', 'str3' : 'yes', 'str4' : 'No', 'cat1' : { 'type' : 'Catalog' , 'col' : 4 }, 'cat2' : { 'type' : 'Catalog' , 'col' : 5 }, 'ran1' : { 'type' : 'Random' }, 'dev1' : { 'type' : 'RandomBinomial', 'N' : 1 }, 'dev2' : { 'type' : 'RandomBinomial', 'N' : 1, 'p' : 0.5 }, 'dev3' : { 'type' : 'RandomBinomial', 'p' : 0.2 }, 'seq1' : { 'type' : 'Sequence' }, 'seq2' : { 'type' : 'Sequence', 'first' : True, 'repeat' : 2 }, 'list1' : { 'type' : 'List', 'items' : [ 'yes', 'no', 'no' ] }, 'list2' : { 'type' : 'List', 'items' : [ 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0 ], 'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }, 'dict1' : { 'type' : 'Dict', 'key' : 'b' }, 'dict2' : { 'type' : 'Dict', 'num' : 1, 'key' : 'b' }, 'dict3' : { 'type' : 'Dict', 'num' : 2, 'key' : 'b' } } test_yaml = True try: galsim.config.ProcessInput(config) except: # We don't require PyYAML as a dependency, so if this fails, just remove the YAML dict. del config['input']['dict'][2] galsim.config.ProcessInput(config) test_yaml = False # Test direct values val1 = galsim.config.ParseValue(config,'val1',config, bool)[0] np.testing.assert_equal(val1, True) val2 = galsim.config.ParseValue(config,'val2',config, bool)[0] np.testing.assert_equal(val2, True) val3 = galsim.config.ParseValue(config,'val3',config, bool)[0] np.testing.assert_equal(val3, False) # Test conversions from strings str1 = galsim.config.ParseValue(config,'str1',config, bool)[0] np.testing.assert_equal(str1, True) str2 = galsim.config.ParseValue(config,'str2',config, bool)[0] np.testing.assert_equal(str2, False) str3 = galsim.config.ParseValue(config,'str3',config, bool)[0] np.testing.assert_equal(str3, True) str4 = galsim.config.ParseValue(config,'str4',config, bool)[0] np.testing.assert_equal(str4, False) # Test values read from a Catalog input_cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') cat1 = [] cat2 = [] config['index_key'] = 'obj_num' for k in range(5): config['obj_num'] = k cat1.append(galsim.config.ParseValue(config,'cat1',config, bool)[0]) cat2.append(galsim.config.ParseValue(config,'cat2',config, bool)[0]) np.testing.assert_array_equal(cat1, [ 1, 0, 1, 1, 0 ]) np.testing.assert_array_equal(cat2, [ 1, 0, 0, 1, 0 ]) # Test values generated from a uniform deviate rng = galsim.UniformDeviate(1234) config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed. for k in range(6): config['obj_num'] = k ran1 = galsim.config.ParseValue(config,'ran1',config, bool)[0] np.testing.assert_equal(ran1, rng() < 0.5) # Test values generated from binomial deviate for k in range(6): config['obj_num'] = k dev = galsim.BinomialDeviate(rng, N=1) dev1 = galsim.config.ParseValue(config,'dev1',config, bool)[0] np.testing.assert_almost_equal(dev1, dev()) dev = galsim.BinomialDeviate(rng, N=1, p=0.5) dev2 = galsim.config.ParseValue(config,'dev2',config, bool)[0] np.testing.assert_almost_equal(dev2, dev()) dev = galsim.BinomialDeviate(rng, N=1, p=0.2) dev3 = galsim.config.ParseValue(config,'dev3',config, bool)[0] np.testing.assert_almost_equal(dev3, dev()) # Test values generated from a Sequence seq1 = [] seq2 = [] config['index_key'] = 'obj_num' for k in range(6): config['obj_num'] = k seq1.append(galsim.config.ParseValue(config,'seq1',config, bool)[0]) seq2.append(galsim.config.ParseValue(config,'seq2',config, bool)[0]) np.testing.assert_array_equal(seq1, [ 0, 1, 0, 1, 0, 1 ]) np.testing.assert_array_equal(seq2, [ 1, 1, 0, 0, 1, 1 ]) # Test values taken from a List list1 = [] list2 = [] config['index_key'] = 'file_num' for k in range(5): config['file_num'] = k list1.append(galsim.config.ParseValue(config,'list1',config, bool)[0]) list2.append(galsim.config.ParseValue(config,'list2',config, bool)[0]) np.testing.assert_array_equal(list1, [ 1, 0, 0, 1, 0 ]) np.testing.assert_array_equal(list2, [ 0, 1, 1, 1, 0 ]) # Test values read from a Dict dict = [] dict.append(galsim.config.ParseValue(config,'dict1',config, bool)[0]) dict.append(galsim.config.ParseValue(config,'dict2',config, bool)[0]) if test_yaml: dict.append(galsim.config.ParseValue(config,'dict3',config, bool)[0]) else: dict.append(False) np.testing.assert_array_equal(dict, [ True, False, False ]) t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_int_value(): """Test various ways to generate an int value """ import time t1 = time.time() config = { 'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' }, 'dict' : [ { 'dir' : 'config_input', 'file_name' : 'dict.p' }, { 'dir' : 'config_input', 'file_name' : 'dict.json' }, { 'dir' : 'config_input', 'file_name' : 'dict.yaml' } ] }, 'val1' : 9, 'val2' : float(8.7), # Reading as int will drop the fraction. 'val3' : -400.8, # Not floor - negatives will round up. 'str1' : '8', 'str2' : '-2', 'cat1' : { 'type' : 'Catalog' , 'col' : 2 }, 'cat2' : { 'type' : 'Catalog' , 'col' : 3 }, 'ran1' : { 'type' : 'Random', 'min' : 0, 'max' : 3 }, 'ran2' : { 'type' : 'Random', 'min' : -5, 'max' : 10 }, 'dev1' : { 'type' : 'RandomPoisson', 'mean' : 137 }, 'dev2' : { 'type' : 'RandomBinomial', 'N' : 17 }, 'dev3' : { 'type' : 'RandomBinomial', 'N' : 17, 'p' : 0.2 }, 'seq1' : { 'type' : 'Sequence' }, 'seq2' : { 'type' : 'Sequence', 'step' : 3 }, 'seq3' : { 'type' : 'Sequence', 'first' : 1, 'step' : 5 }, 'seq4' : { 'type' : 'Sequence', 'first' : 10, 'step' : -2 }, 'seq5' : { 'type' : 'Sequence', 'first' : 1, 'last' : 2, 'repeat' : 2 }, 'seq_file' : { 'type' : 'Sequence', 'index_key' : 'file_num' }, 'seq_image' : { 'type' : 'Sequence', 'index_key' : 'image_num' }, 'seq_obj' : { 'type' : 'Sequence', 'index_key' : 'obj_num' }, 'seq_obj2' : { 'type' : 'Sequence', 'index_key' : 'obj_num_in_file' }, 'list1' : { 'type' : 'List', 'items' : [ 73, 8, 3 ] }, 'list2' : { 'type' : 'List', 'items' : [ 6, 8, 1, 7, 3, 5, 1, 0, 6, 3, 8, 2 ], 'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }, 'dict1' : { 'type' : 'Dict', 'key' : 'i' }, 'dict2' : { 'type' : 'Dict', 'num' : 1, 'key' : 'i' }, 'dict3' : { 'type' : 'Dict', 'num' : 2, 'key' : 'i' }, 'sum1' : { 'type' : 'Sum', 'items' : [ 72.3, '2', { 'type' : 'Dict', 'key' : 'i' } ] } } test_yaml = True try: galsim.config.ProcessInput(config) except: # We don't require PyYAML as a dependency, so if this fails, just remove the YAML dict. del config['input']['dict'][2] galsim.config.ProcessInput(config) test_yaml = False # Test direct values val1 = galsim.config.ParseValue(config,'val1',config, int)[0] np.testing.assert_equal(val1, 9) val2 = galsim.config.ParseValue(config,'val2',config, int)[0] np.testing.assert_equal(val2, 8) val3 = galsim.config.ParseValue(config,'val3',config, int)[0] np.testing.assert_equal(val3, -400) # Test conversions from strings str1 = galsim.config.ParseValue(config,'str1',config, int)[0] np.testing.assert_equal(str1, 8) str2 = galsim.config.ParseValue(config,'str2',config, int)[0] np.testing.assert_equal(str2, -2) # Test values read from a Catalog input_cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') cat1 = [] cat2 = [] config['index_key'] = 'image_num' for k in range(5): config['image_num'] = k cat1.append(galsim.config.ParseValue(config,'cat1',config, int)[0]) cat2.append(galsim.config.ParseValue(config,'cat2',config, int)[0]) np.testing.assert_array_equal(cat1, [ 9, 0, -4, 9, 0 ]) np.testing.assert_array_equal(cat2, [ -3, 8, 17, -3, 8 ]) # Test values generated from a uniform deviate rng = galsim.UniformDeviate(1234) config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed. for k in range(6): config['obj_num'] = k ran1 = galsim.config.ParseValue(config,'ran1',config, int)[0] np.testing.assert_equal(ran1, int(math.floor(rng() * 4))) ran2 = galsim.config.ParseValue(config,'ran2',config, int)[0] np.testing.assert_equal(ran2, int(math.floor(rng() * 16))-5) # Test values generated from various other deviates for k in range(6): config['obj_num'] = k dev = galsim.PoissonDeviate(rng, mean=137) dev1 = galsim.config.ParseValue(config,'dev1',config, int)[0] np.testing.assert_almost_equal(dev1, dev()) dev = galsim.BinomialDeviate(rng, N=17) dev2 = galsim.config.ParseValue(config,'dev2',config, int)[0] np.testing.assert_almost_equal(dev2, dev()) dev = galsim.BinomialDeviate(rng, N=17, p=0.2) dev3 = galsim.config.ParseValue(config,'dev3',config, int)[0] np.testing.assert_almost_equal(dev3, dev()) # Test values generated from a Sequence seq1 = [] seq2 = [] seq3 = [] seq4 = [] seq5 = [] config['index_key'] = 'obj_num' for k in range(6): config['obj_num'] = k seq1.append(galsim.config.ParseValue(config,'seq1',config, int)[0]) seq2.append(galsim.config.ParseValue(config,'seq2',config, int)[0]) seq3.append(galsim.config.ParseValue(config,'seq3',config, int)[0]) seq4.append(galsim.config.ParseValue(config,'seq4',config, int)[0]) seq5.append(galsim.config.ParseValue(config,'seq5',config, int)[0]) np.testing.assert_array_equal(seq1, [ 0, 1, 2, 3, 4, 5 ]) np.testing.assert_array_equal(seq2, [ 0, 3, 6, 9, 12, 15 ]) np.testing.assert_array_equal(seq3, [ 1, 6, 11, 16, 21, 26 ]) np.testing.assert_array_equal(seq4, [ 10, 8, 6, 4, 2, 0 ]) np.testing.assert_array_equal(seq5, [ 1, 1, 2, 2, 1, 1 ]) # This is more like how the indexing actually happens in a regular config run: seq_file = [] seq_image = [] seq_obj = [] seq_obj2 = [] config['file_num'] = 0 config['image_num'] = 0 config['obj_num'] = 0 for file_num in range(3): config['start_obj_num'] = config['obj_num'] for image_num in range(2): for obj_num in range(5): seq_file.append(galsim.config.ParseValue(config,'seq_file',config, int)[0]) seq_image.append(galsim.config.ParseValue(config,'seq_image',config, int)[0]) seq_obj.append(galsim.config.ParseValue(config,'seq_obj',config, int)[0]) seq_obj2.append(galsim.config.ParseValue(config,'seq_obj2',config, int)[0]) config['obj_num'] += 1 config['image_num'] += 1 config['file_num'] += 1 np.testing.assert_array_equal(seq_file, [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 ]) np.testing.assert_array_equal(seq_image, [ 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5 ]) np.testing.assert_array_equal(seq_obj, [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29 ]) np.testing.assert_array_equal(seq_obj2, [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ]) # Test values taken from a List list1 = [] list2 = [] config['index_key'] = 'obj_num' for k in range(5): config['obj_num'] = k list1.append(galsim.config.ParseValue(config,'list1',config, int)[0]) list2.append(galsim.config.ParseValue(config,'list2',config, int)[0]) np.testing.assert_array_equal(list1, [ 73, 8, 3, 73, 8 ]) np.testing.assert_array_equal(list2, [ 8, 0, 3, 8, 8 ]) # Test values read from a Dict dict = [] dict.append(galsim.config.ParseValue(config,'dict1',config, int)[0]) dict.append(galsim.config.ParseValue(config,'dict2',config, int)[0]) if test_yaml: dict.append(galsim.config.ParseValue(config,'dict3',config, int)[0]) else: dict.append(1) np.testing.assert_array_equal(dict, [ 17, -23, 1 ]) sum1 = galsim.config.ParseValue(config,'sum1', config, int)[0] np.testing.assert_almost_equal(sum1, 72 + 2 + 17) t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)
def main(argv): """ Make a fits image cube using parameters from an input catalog - The number of images in the cube matches the number of rows in the catalog. - Each image size is computed automatically by GalSim based on the Nyquist size. - Only galaxies. No stars. - PSF is Moffat - Each galaxy is bulge plus disk: deVaucouleurs + Exponential. - The catalog's columns are: 0 PSF beta (Moffat exponent) 1 PSF FWHM 2 PSF e1 3 PSF e2 4 PSF trunc 5 Disc half-light-radius 6 Disc e1 7 Disc e2 8 Bulge half-light-radius 9 Bulge e1 10 Bulge e2 11 Galaxy dx (the two components have same center) 12 Galaxy dy - Applied shear is the same for each galaxy - Noise is Poisson using a nominal sky value of 1.e6 """ logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) logger = logging.getLogger("demo4") # Define some parameters we'll use below and make directories if needed. cat_file_name = os.path.join('..', 'examples', 'input', 'galsim_default_input.asc') if not os.path.isdir('output'): os.mkdir('output') multi_file_name = os.path.join('output', 'multi.fits') random_seed = 8241573 sky_level = 1.e6 # ADU / arcsec^2 pixel_scale = 1.0 # arcsec / pixel (size units in input catalog are pixels) gal_flux = 1.e6 # arbitrary choice, makes nice (not too) noisy images gal_g1 = -0.009 # gal_g2 = 0.011 # xsize = 64 # pixels ysize = 64 # pixels logger.info('Starting demo script 4 using:') logger.info(' - parameters taken from catalog %r', cat_file_name) logger.info(' - Moffat PSF (parameters from catalog)') logger.info(' - pixel scale = %.2f', pixel_scale) logger.info(' - Bulge + Disc galaxies (parameters from catalog)') logger.info(' - Applied gravitational shear = (%.3f,%.3f)', gal_g1, gal_g2) logger.info(' - Poisson noise (sky level = %.1e).', sky_level) # Read in the input catalog cat = galsim.Catalog(cat_file_name) # save a list of the galaxy images in the "images" list variable: images = [] for k in range(cat.nobjects): # Initialize the (pseudo-)random number generator that we will be using below. # Use a different random seed for each object to get different noise realizations. rng = galsim.BaseDeviate(random_seed + k) # Take the Moffat beta from the first column (called 0) of the input catalog: # Note: cat.get(k,col) returns a string. To get the value as a float, use either # cat.getFloat(k,col) or float(cat.get(k,col)) beta = cat.getFloat(k, 0) # A Moffat's size may be either scale_radius, fwhm, or half_light_radius. # Here we use fwhm, taking from the catalog as well. fwhm = cat.getFloat(k, 1) # A Moffat profile may be truncated if desired # The units for this are expected to be arcsec (or specifically -- whatever units # you are using for all the size values as defined by the pixel_scale). trunc = cat.getFloat(k, 4) # Note: You may omit the flux, since the default is flux=1. psf = galsim.Moffat(beta=beta, fwhm=fwhm, trunc=trunc) # Take the (e1, e2) shape parameters from the catalog as well. psf.applyShear(e1=cat.getFloat(k, 2), e2=cat.getFloat(k, 3)) pix = galsim.Pixel(pixel_scale) # Galaxy is a bulge + disk with parameters taken from the catalog: disk = galsim.Exponential(flux=0.6, half_light_radius=cat.getFloat(k, 5)) disk.applyShear(e1=cat.getFloat(k, 6), e2=cat.getFloat(k, 7)) bulge = galsim.DeVaucouleurs(flux=0.4, half_light_radius=cat.getFloat(k, 8)) bulge.applyShear(e1=cat.getFloat(k, 9), e2=cat.getFloat(k, 10)) # The flux of an Add object is the sum of the component fluxes. # Note that in demo3.py, a similar addition was performed by the binary operator "+". gal = galsim.Add([disk, bulge]) # This flux may be overridden by setFlux. The relative fluxes of the components # remains the same, but the total flux is set to gal_flux. gal.setFlux(gal_flux) gal.applyShear(g1=gal_g1, g2=gal_g2) # The center of the object is normally placed at the center of the postage stamp image. # You can change that with applyShift: gal.applyShift(dx=cat.getFloat(k, 11), dy=cat.getFloat(k, 12)) final = galsim.Convolve([psf, pix, gal]) # Draw the profile image = galsim.ImageF(xsize, ysize) final.draw(image, dx=pixel_scale) # Add Poisson noise to the image: image.addNoise(galsim.PoissonNoise(rng, sky_level * pixel_scale**2)) logger.info('Drew image for object at row %d in the input catalog' % k) # Add the image to our list of images images.append(image) # Now write the images to a multi-extension fits file. Each image will be in its own HDU. galsim.fits.writeMulti(images, multi_file_name) logger.info('Images written to multi-extension fits file %r', multi_file_name)
def test_output_catalog(): """Test basic operations on Catalog.""" names = [ 'float1', 'float2', 'int1', 'int2', 'bool1', 'bool2', 'str1', 'str2', 'str3', 'str4', 'angle', 'posi', 'posd', 'shear' ] types = [ float, 'f8', int, 'i4', bool, 'bool', str, 'str', 'S', 'S0', galsim.Angle, galsim.PositionI, galsim.PositionD, galsim.Shear ] out_cat = galsim.OutputCatalog(names, types) row1 = (1.234, 4.131, 9, -3, 1, True, "He's", '"ceased', 'to', 'be"', 1.2 * galsim.degrees, galsim.PositionI(5, 6), galsim.PositionD(0.3, -0.4), galsim.Shear(g1=0.2, g2=0.1)) row2 = (2.345, -900, 0.0, 8, False, 0, "bleedin'", '"bereft', 'of', 'life"', 11 * galsim.arcsec, galsim.PositionI(-35, 106), galsim.PositionD(23.5, 55.1), galsim.Shear(e1=-0.1, e2=0.15)) row3 = (3.4560001, 8.e3, -4, 17.0, 1, 0, 'demised!', '"kicked', 'the', 'bucket"', 0.4 * galsim.radians, galsim.PositionI(88, 99), galsim.PositionD(-0.99, -0.88), galsim.Shear()) out_cat.addRow(row1) out_cat.addRow(row2) out_cat.addRow(row3) assert out_cat.names == out_cat.getNames() == names assert out_cat.types == out_cat.getTypes() == types assert len(out_cat) == out_cat.getNObjects() == out_cat.nobjects == 3 assert out_cat.getNCols() == out_cat.ncols == len(names) # Can also set the types after the fact. # MJ: I think this used to be used by the "truth" catalog extra output. # But it doesn't seem to be used there anymore. Probably not by anything then. # I'm not sure how useful it is, I guess it doesn't hurt to leave it in. out_cat2 = galsim.OutputCatalog(names) assert out_cat2.types == [float] * len(names) out_cat2.setTypes(types) assert out_cat2.types == out_cat2.getTypes() == types # Another feature that doesn't seem to be used anymore is you can add the rows out of order # and just give a key to use for sorting at the end. out_cat2.addRow(row3, 3) out_cat2.addRow(row1, 1) out_cat2.addRow(row2, 2) # Check ASCII round trip out_cat.write(dir='output', file_name='catalog.dat') cat = galsim.Catalog(dir='output', file_name='catalog.dat') np.testing.assert_equal(cat.ncols, 17) np.testing.assert_equal(cat.nobjects, 3) np.testing.assert_equal(cat.isFits(), False) np.testing.assert_almost_equal(cat.getFloat(1, 0), 2.345) np.testing.assert_almost_equal(cat.getFloat(2, 1), 8000.) np.testing.assert_equal(cat.getInt(0, 2), 9) np.testing.assert_equal(cat.getInt(2, 3), 17) np.testing.assert_equal(cat.getInt(2, 4), 1) np.testing.assert_equal(cat.getInt(0, 5), 1) np.testing.assert_equal(cat.get(2, 6), 'demised!') np.testing.assert_equal(cat.get(1, 7), '"bereft') np.testing.assert_equal(cat.get(0, 8), 'to') np.testing.assert_equal(cat.get(2, 9), 'bucket"') np.testing.assert_almost_equal(cat.getFloat(0, 10), 1.2 * galsim.degrees / galsim.radians) np.testing.assert_almost_equal(cat.getInt(1, 11), -35) np.testing.assert_almost_equal(cat.getInt(1, 12), 106) np.testing.assert_almost_equal(cat.getFloat(2, 13), -0.99) np.testing.assert_almost_equal(cat.getFloat(2, 14), -0.88) np.testing.assert_almost_equal(cat.getFloat(0, 15), 0.2) np.testing.assert_almost_equal(cat.getFloat(0, 16), 0.1) # Check FITS round trip out_cat.write(dir='output', file_name='catalog.fits') cat = galsim.Catalog(dir='output', file_name='catalog.fits') np.testing.assert_equal(cat.ncols, 17) np.testing.assert_equal(cat.nobjects, 3) np.testing.assert_equal(cat.isFits(), True) np.testing.assert_almost_equal(cat.getFloat(1, 'float1'), 2.345) np.testing.assert_almost_equal(cat.getFloat(2, 'float2'), 8000.) np.testing.assert_equal(cat.getInt(0, 'int1'), 9) np.testing.assert_equal(cat.getInt(2, 'int2'), 17) np.testing.assert_equal(cat.getInt(2, 'bool1'), 1) np.testing.assert_equal(cat.getInt(0, 'bool2'), 1) np.testing.assert_equal(cat.get(2, 'str1'), 'demised!') np.testing.assert_equal(cat.get(1, 'str2'), '"bereft') np.testing.assert_equal(cat.get(0, 'str3'), 'to') np.testing.assert_equal(cat.get(2, 'str4'), 'bucket"') np.testing.assert_almost_equal(cat.getFloat(0, 'angle.rad'), 1.2 * galsim.degrees / galsim.radians) np.testing.assert_equal(cat.getInt(1, 'posi.x'), -35) np.testing.assert_equal(cat.getInt(1, 'posi.y'), 106) np.testing.assert_almost_equal(cat.getFloat(2, 'posd.x'), -0.99) np.testing.assert_almost_equal(cat.getFloat(2, 'posd.y'), -0.88) np.testing.assert_almost_equal(cat.getFloat(0, 'shear.g1'), 0.2) np.testing.assert_almost_equal(cat.getFloat(0, 'shear.g2'), 0.1) # The one that was made out of order should write the same file. out_cat2.write(dir='output', file_name='catalog2.fits') cat2 = galsim.Catalog(dir='output', file_name='catalog2.fits') np.testing.assert_array_equal(cat2.data, cat.data) assert cat2 != cat # Because file_name is different. # Check that it properly overwrites an existing output file. out_cat.addRow([ 1.234, 4.131, 9, -3, 1, True, "He's", '"ceased', 'to', 'be"', 1.2 * galsim.degrees, galsim.PositionI(5, 6), galsim.PositionD(0.3, -0.4), galsim.Shear(g1=0.2, g2=0.1) ]) assert out_cat.rows[3] == out_cat.rows[0] out_cat.write(dir='output', file_name='catalog.fits') # Same name as above. cat2 = galsim.Catalog(dir='output', file_name='catalog.fits') np.testing.assert_equal(cat2.ncols, 17) np.testing.assert_equal(cat2.nobjects, 4) for key in names[:10]: assert cat2.data[key][3] == cat2.data[key][0] # Check pickling do_pickle(out_cat) out_cat2 = galsim.OutputCatalog(names, types) # No data. do_pickle(out_cat2) # Check errors with assert_raises(galsim.GalSimValueError): out_cat.addRow((1, 2, 3)) # Wrong length with assert_raises(galsim.GalSimValueError): out_cat.write(dir='output', file_name='catalog.txt', file_type='invalid')
def test_basic_dict(): """Test basic operations on Dict.""" import yaml # Pickle d = galsim.Dict(dir='config_input', file_name='dict.p') np.testing.assert_equal(len(d), 4) np.testing.assert_equal(d.file_type, 'PICKLE') np.testing.assert_equal(d['i'], 17) np.testing.assert_equal(d.get('s'), 'Life') np.testing.assert_equal(d.get('s2', 'Grail'), 'Grail') # Not in dict. Use default. np.testing.assert_almost_equal(d.get('f', 999.), 23.17) # In dict. Ignore default. d2 = galsim.Dict(dir='config_input', file_name='dict.p', file_type='pickle') assert d == d2 do_pickle(d) # JSON d = galsim.Dict(dir='config_input', file_name='dict.json') np.testing.assert_equal(len(d), 4) np.testing.assert_equal(d.file_type, 'JSON') np.testing.assert_equal(d['i'], -23) np.testing.assert_equal(d.get('s'), 'of') np.testing.assert_equal(d.get('s2', 'Grail'), 'Grail') # Not in dict. Use default. np.testing.assert_almost_equal(d.get('f', 999.), -17.23) # In dict. Ignore default. d2 = galsim.Dict(dir='config_input', file_name='dict.json', file_type='json') assert d == d2 do_pickle(d) # YAML d = galsim.Dict(dir='config_input', file_name='dict.yaml') np.testing.assert_equal(len(d), 5) np.testing.assert_equal(d.file_type, 'YAML') np.testing.assert_equal(d['i'], 1) np.testing.assert_equal(d.get('s'), 'Brian') np.testing.assert_equal(d.get('s2', 'Grail'), 'Grail') # Not in dict. Use default. np.testing.assert_almost_equal(d.get('f', 999.), 0.1) # In dict. Ignore default. d2 = galsim.Dict(dir='config_input', file_name='dict.yaml', file_type='yaml') assert d == d2 do_pickle(d) # We also have longer chained keys in dict.yaml np.testing.assert_equal(d.get('noise.models.0.variance'), 0.12) np.testing.assert_equal(d.get('noise.models.1.gain'), 1.9) with assert_raises(KeyError): d.get('invalid') with assert_raises(KeyError): d.get('noise.models.invalid') with assert_raises(KeyError): d.get('noise.models.1.invalid') with assert_raises(IndexError): d.get('noise.models.2.invalid') with assert_raises(TypeError): d.get('noise.models.1.gain.invalid') # It's really hard to get to this error. I think this is the only (contrived) way. d3 = galsim.Dict('dict.yaml', 'config_input', key_split=None) with assert_raises(KeyError): d3.get('') do_pickle(d3) with assert_raises(galsim.GalSimValueError): galsim.Dict(dir='config_input', file_name='dict.yaml', file_type='invalid') with assert_raises(galsim.GalSimValueError): galsim.Dict(dir='config_input', file_name='dict.txt') with assert_raises((IOError, OSError)): galsim.Catalog('invalid.yaml', 'config_input') # Check some dict equivalences. assert 'noise' in d assert len(d) == 5 assert sorted(d.keys()) == ['b', 'f', 'i', 'noise', 's'] assert all(d[k] == v for k, v in d.items()) assert all(d[k] == v for k, v in zip(d.keys(), d.values())) assert all(d[k] == v for k, v in d.iteritems()) assert all(d[k] == v for k, v in zip(d.iterkeys(), d.itervalues())) assert all(k in d for k in d)
def main(argv): root = 'DECam_00154912' data_dir = 'des_data' if not os.path.exists(data_dir): print('You will need to download some DES data to use this script.') print('Run the following commands from the directory GalSim/examples/des:') print() print(' wget http://www.sas.upenn.edu/~mjarvis/des_data.tar.gz') print(' tar xfz des_data.tar.gz') print() print('Then try running this script again. It should work now.') sys.exit() # Set which chips to run on first_chip = 1 last_chip = 62 #first_chip = 12 #last_chip = 12 # quick and dirty command line parsing. for var in argv: if var.startswith('first='): first_chip = int(var[6:]) if var.startswith('last='): last_chip = int(var[5:]) print('Processing chips %d .. %d'%(first_chip, last_chip)) out_dir = 'output' # The random seed, so the results are deterministic random_seed = 1339201 x_col = 'X_IMAGE' y_col = 'Y_IMAGE' flux_col = 'FLUX_AUTO' flag_col = 'FLAGS' xsize_key = 'NAXIS1' ysize_key = 'NAXIS2' sky_level_key = 'SKYBRITE' sky_sigma_key = 'SKYSIGMA' # Make output directory if not already present. if not os.path.isdir(out_dir): os.mkdir(out_dir) for chipnum in range(first_chip,last_chip+1): print('Start chip ',chipnum) # Setup the file names image_file = '%s_%02d.fits.fz'%(root,chipnum) cat_file = '%s_%02d_cat.fits'%(root,chipnum) psfex_file = '%s_%02d_psfcat.psf'%(root,chipnum) fitpsf_file = '%s_%02d_fitpsf.fits'%(root,chipnum) psfex_image_file = '%s_%02d_psfex_image.fits'%(root,chipnum) fitpsf_image_file = '%s_%02d_fitpsf_image.fits'%(root,chipnum) # Get some parameters about the image from the data image header information image_header = galsim.FitsHeader(image_file, dir=data_dir) xsize = image_header[xsize_key] ysize = image_header[ysize_key] sky_sigma = image_header[sky_sigma_key] # This is sqrt(variance) / pixel sky_level = image_header[sky_level_key] # This is in ADU / pixel gain = sky_level / sky_sigma**2 # an approximation, since gain is missing. # Read the WCS wcs = galsim.FitsWCS(header=image_header) # Setup the images: psfex_image = galsim.Image(xsize, ysize, wcs=wcs) fitpsf_image = galsim.Image(xsize, ysize, wcs=wcs) # Read the other input files cat = galsim.Catalog(cat_file, hdu=2, dir=data_dir) psfex = galsim.des.DES_PSFEx(psfex_file, image_file, dir=data_dir) fitpsf = galsim.des.DES_Shapelet(fitpsf_file, dir=data_dir) nobj = cat.nobjects print('Catalog has ',nobj,' objects') for k in range(nobj): sys.stdout.write('.') sys.stdout.flush() # Skip objects with a non-zero flag flag = cat.getInt(k,flag_col) if flag: continue # Get the position from the galaxy catalog x = cat.getFloat(k,x_col) y = cat.getFloat(k,y_col) image_pos = galsim.PositionD(x,y) #print ' pos = ',image_pos x += 0.5 # + 0.5 to account for even-size postage stamps y += 0.5 ix = int(math.floor(x+0.5)) # round to nearest pixel iy = int(math.floor(y+0.5)) dx = x-ix dy = y-iy offset = galsim.PositionD(dx,dy) # Also get the flux of the galaxy from the catalog flux = cat.getFloat(k,flux_col) #print ' flux = ',flux #print ' wcs = ',wcs.local(image_pos) # First do the PSFEx image: if True: # Define the PSF profile psf = psfex.getPSF(image_pos).withFlux(flux) #print ' psfex psf = ',psf # Draw the postage stamp image # Note: Use no_pixel method, since the PSFEx estimate of the PSF already includes # the pixel response. stamp = psf.drawImage(wcs=wcs.local(image_pos), offset=offset, method='no_pixel') # Recenter the stamp at the desired position: stamp.setCenter(ix,iy) # Find overlapping bounds bounds = stamp.bounds & psfex_image.bounds psfex_image[bounds] += stamp[bounds] # Next do the ShapeletPSF image: # If the position is not within the interpolation bounds, fitpsf will # raise an exception telling us to skip this object. Easier to check here. if fitpsf.bounds.includes(image_pos): # Define the PSF profile psf = fitpsf.getPSF(image_pos).withFlux(flux) #print ' fitpsf psf = ',psf # Draw the postage stamp image # Again, the PSF already includes the pixel response. stamp = psf.drawImage(wcs=wcs.local(image_pos), offset=offset, method='no_pixel') # Recenter the stamp at the desired position: stamp.setCenter(ix,iy) # Find overlapping bounds bounds = stamp.bounds & fitpsf_image.bounds fitpsf_image[bounds] += stamp[bounds] else: pass #print '...not in fitpsf.bounds' print() # Add background level psfex_image += sky_level fitpsf_image += sky_level # Add noise rng = galsim.BaseDeviate(random_seed) noise = galsim.CCDNoise(rng, gain=gain) psfex_image.addNoise(noise) # Reset the random seed to match the action of the yaml version # Note: the difference between seed and reset matters here. # reset would sever the connection between this rng instance and the one stored in noise. # seed changes the seed while keeping the connection between them. rng.seed(random_seed) fitpsf_image.addNoise(noise) # Now write the images to disk. psfex_image.write(psfex_image_file, dir=out_dir) fitpsf_image.write(fitpsf_image_file, dir=out_dir) print('Wrote images to %s and %s'%( os.path.join(out_dir,psfex_image_file), os.path.join(out_dir,fitpsf_image_file))) # Increment the random seed by the number of objects in the file random_seed += nobj
def test_angle_value(): """Test various ways to generate an Angle value """ import time t1 = time.time() config = { 'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' } }, 'val1' : 1.9 * galsim.radians, 'val2' : -41 * galsim.degrees, 'str1' : '0.73 radians', 'str2' : '240 degrees', 'str3' : '1.2 rad', 'str4' : '45 deg', 'str5' : '6 hrs', 'str6' : '21 hour', 'str7' : '-240 arcmin', 'str8' : '1800 arcsec', 'cat1' : { 'type' : 'Radians' , 'theta' : { 'type' : 'Catalog' , 'col' : 10 } }, 'cat2' : { 'type' : 'Degrees' , 'theta' : { 'type' : 'Catalog' , 'col' : 11 } }, 'ran1' : { 'type' : 'Random' }, 'seq1' : { 'type' : 'Rad', 'theta' : { 'type' : 'Sequence' } }, 'seq2' : { 'type' : 'Deg', 'theta' : { 'type' : 'Sequence', 'first' : 45, 'step' : 80 } }, 'list1' : { 'type' : 'List', 'items' : [ 73 * galsim.arcmin, 8.9 * galsim.arcmin, 3.14 * galsim.arcmin ] }, 'sum1' : { 'type' : 'Sum', 'items' : [ 72 * galsim.degrees, '2.33 degrees' ] } } galsim.config.ProcessInput(config) # Test direct values val1 = galsim.config.ParseValue(config,'val1',config, galsim.Angle)[0] np.testing.assert_almost_equal(val1.rad(), 1.9) val2 = galsim.config.ParseValue(config,'val2',config, galsim.Angle)[0] np.testing.assert_almost_equal(val2.rad(), -41 * math.pi/180) # Test conversions from strings str1 = galsim.config.ParseValue(config,'str1',config, galsim.Angle)[0] np.testing.assert_almost_equal(str1.rad(), 0.73) str2 = galsim.config.ParseValue(config,'str2',config, galsim.Angle)[0] np.testing.assert_almost_equal(str2 / galsim.degrees, 240) str3 = galsim.config.ParseValue(config,'str3',config, galsim.Angle)[0] np.testing.assert_almost_equal(str3.rad(), 1.2) str4 = galsim.config.ParseValue(config,'str4',config, galsim.Angle)[0] np.testing.assert_almost_equal(str4.rad(), math.pi/4) str5 = galsim.config.ParseValue(config,'str5',config, galsim.Angle)[0] np.testing.assert_almost_equal(str5.rad(), math.pi/2) str6 = galsim.config.ParseValue(config,'str6',config, galsim.Angle)[0] np.testing.assert_almost_equal(str6.rad(), 7*math.pi/4) str7 = galsim.config.ParseValue(config,'str7',config, galsim.Angle)[0] np.testing.assert_almost_equal(str7 / galsim.degrees, -4) str8 = galsim.config.ParseValue(config,'str8',config, galsim.Angle)[0] np.testing.assert_almost_equal(str8 / galsim.degrees, 0.5) # Test values read from a Catalog input_cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') cat1 = [] cat2 = [] config['index_key'] = 'file_num' for k in range(5): config['file_num'] = k cat1.append(galsim.config.ParseValue(config,'cat1',config, galsim.Angle)[0].rad()) cat2.append(galsim.config.ParseValue(config,'cat2',config, galsim.Angle)[0]/galsim.degrees) np.testing.assert_array_almost_equal(cat1, [ 1.2, 0.1, -0.9, 1.2, 0.1 ]) np.testing.assert_array_almost_equal(cat2, [ 23, 15, 82, 23, 15 ]) # Test values generated from a uniform deviate rng = galsim.UniformDeviate(1234) config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed. for k in range(6): config['obj_num'] = k ran1 = galsim.config.ParseValue(config,'ran1',config, galsim.Angle)[0] theta = rng() * 2 * math.pi np.testing.assert_almost_equal(ran1.rad(), theta) # Test values generated from a Sequence seq1 = [] seq2 = [] config['index_key'] = 'obj_num' for k in range(6): config['obj_num'] = k seq1.append(galsim.config.ParseValue(config,'seq1',config, galsim.Angle)[0].rad()) seq2.append(galsim.config.ParseValue(config,'seq2',config, galsim.Angle)[0]/galsim.degrees) np.testing.assert_array_almost_equal(seq1, [ 0, 1, 2, 3, 4, 5 ]) np.testing.assert_array_almost_equal(seq2, [ 45, 125, 205, 285, 365, 445 ]) # Test values taken from a List list1 = [] config['index_key'] = 'obj_num' for k in range(5): config['obj_num'] = k list1.append(galsim.config.ParseValue(config,'list1',config, galsim.Angle)[0]/galsim.arcmin) np.testing.assert_array_almost_equal(list1, [ 73, 8.9, 3.14, 73, 8.9 ]) sum1 = galsim.config.ParseValue(config,'sum1', config, galsim.Angle)[0] np.testing.assert_almost_equal(sum1 / galsim.degrees, 72 + 2.33) t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_float_value(): """Test various ways to generate a float value """ import time t1 = time.time() config = { 'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' }, 'dict' : [ { 'dir' : 'config_input', 'file_name' : 'dict.p' }, { 'dir' : 'config_input', 'file_name' : 'dict.json' }, { 'dir' : 'config_input', 'file_name' : 'dict.yaml' } ] }, 'val1' : 9.9, 'val2' : int(400), 'str1' : '8.73', 'str2' : '2.33e-9', 'str3' : '6.e-9', 'cat1' : { 'type' : 'Catalog' , 'col' : 0 }, 'cat2' : { 'type' : 'Catalog' , 'col' : 1 }, 'ran1' : { 'type' : 'Random', 'min' : 0.5, 'max' : 3 }, 'ran2' : { 'type' : 'Random', 'min' : -5, 'max' : 0 }, 'gauss1' : { 'type' : 'RandomGaussian', 'sigma' : 1 }, 'gauss2' : { 'type' : 'RandomGaussian', 'sigma' : 3, 'mean' : 4 }, 'gauss3' : { 'type' : 'RandomGaussian', 'sigma' : 1.5, 'min' : -2, 'max' : 2 }, 'gauss4' : { 'type' : 'RandomGaussian', 'sigma' : 0.5, 'min' : 0, 'max' : 0.8 }, 'gauss5' : { 'type' : 'RandomGaussian', 'sigma' : 0.3, 'mean' : 0.5, 'min' : 0, 'max' : 0.5 }, 'dist1' : { 'type' : 'RandomDistribution', 'function' : 'config_input/distribution.txt', 'interpolant' : 'linear' }, 'dist2' : { 'type' : 'RandomDistribution', 'function' : 'config_input/distribution2.txt', 'interpolant' : 'linear' }, 'dist3' : { 'type' : 'RandomDistribution', 'function' : 'x*x', 'x_min' : 0., 'x_max' : 2.0 }, 'dev1' : { 'type' : 'RandomPoisson', 'mean' : 137 }, 'dev2' : { 'type' : 'RandomBinomial', 'N' : 17 }, 'dev3' : { 'type' : 'RandomBinomial', 'N' : 17, 'p' : 0.2 }, 'dev4' : { 'type' : 'RandomWeibull', 'a' : 1.7, 'b' : 4.3 }, 'dev5' : { 'type' : 'RandomGamma', 'k' : 1, 'theta' : 4 }, 'dev6' : { 'type' : 'RandomGamma', 'k' : 1.9, 'theta' : 4.1 }, 'dev7' : { 'type' : 'RandomChi2', 'n' : 17}, 'seq1' : { 'type' : 'Sequence' }, 'seq2' : { 'type' : 'Sequence', 'step' : 0.1 }, 'seq3' : { 'type' : 'Sequence', 'first' : 1.5, 'step' : 0.5 }, 'seq4' : { 'type' : 'Sequence', 'first' : 10, 'step' : -2 }, 'seq5' : { 'type' : 'Sequence', 'first' : 1, 'last' : 2.1, 'repeat' : 2 }, 'list1' : { 'type' : 'List', 'items' : [ 73, 8.9, 3.14 ] }, 'list2' : { 'type' : 'List', 'items' : [ 0.6, 1.8, 2.1, 3.7, 4.3, 5.5, 6.1, 7.0, 8.6, 9.3, 10.8, 11.2 ], 'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }, 'dict1' : { 'type' : 'Dict', 'key' : 'f' }, 'dict2' : { 'type' : 'Dict', 'num' : 1, 'key' : 'f' }, 'dict3' : { 'type' : 'Dict', 'num' : 2, 'key' : 'f' }, 'dict4' : { 'type' : 'Dict', 'num' : 2, 'key' : 'noise.models.1.gain' }, 'sum1' : { 'type' : 'Sum', 'items' : [ 72, '2.33', { 'type' : 'Dict', 'key' : 'f' } ] } } test_yaml = True try: galsim.config.ProcessInput(config) except: # We don't require PyYAML as a dependency, so if this fails, just remove the YAML dict. del config['input']['dict'][2] galsim.config.ProcessInput(config) test_yaml = False # Test direct values val1 = galsim.config.ParseValue(config,'val1',config, float)[0] np.testing.assert_almost_equal(val1, 9.9) val2 = galsim.config.ParseValue(config,'val2',config, float)[0] np.testing.assert_almost_equal(val2, 400) # Test conversions from strings str1 = galsim.config.ParseValue(config,'str1',config, float)[0] np.testing.assert_almost_equal(str1, 8.73) str2 = galsim.config.ParseValue(config,'str2',config, float)[0] np.testing.assert_almost_equal(str2, 2.33e-9) str3 = galsim.config.ParseValue(config,'str3',config, float)[0] np.testing.assert_almost_equal(str3, 6.0e-9) # Test values read from a Catalog input_cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') cat1 = [] cat2 = [] config['index_key'] = 'file_num' for k in range(5): config['file_num'] = k cat1.append(galsim.config.ParseValue(config,'cat1',config, float)[0]) cat2.append(galsim.config.ParseValue(config,'cat2',config, float)[0]) np.testing.assert_array_almost_equal(cat1, [ 1.234, 2.345, 3.456, 1.234, 2.345 ]) np.testing.assert_array_almost_equal(cat2, [ 4.131, -900, 8000, 4.131, -900 ]) # Test values generated from a uniform deviate rng = galsim.UniformDeviate(1234) config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed. for k in range(6): config['obj_num'] = k # The Random type doesn't use obj_num, but this keeps it # from thinking current_val is still current. ran1 = galsim.config.ParseValue(config,'ran1',config, float)[0] np.testing.assert_almost_equal(ran1, rng() * 2.5 + 0.5) ran2 = galsim.config.ParseValue(config,'ran2',config, float)[0] np.testing.assert_almost_equal(ran2, rng() * 5 - 5) # Test values generated from a Gaussian deviate gd = galsim.GaussianDeviate(rng) for k in range(6): config['obj_num'] = k gauss1 = galsim.config.ParseValue(config,'gauss1',config, float)[0] gd.setMean(0) gd.setSigma(1) np.testing.assert_almost_equal(gauss1, gd()) gauss2 = galsim.config.ParseValue(config,'gauss2',config, float)[0] gd.setMean(4) gd.setSigma(3) np.testing.assert_almost_equal(gauss2, gd()) gauss3 = galsim.config.ParseValue(config,'gauss3',config, float)[0] gd.setMean(0) gd.setSigma(1.5) gd_val = gd() while math.fabs(gd_val) > 2: gd_val = gd() np.testing.assert_almost_equal(gauss3, gd_val) gauss4 = galsim.config.ParseValue(config,'gauss4',config, float)[0] gd.setMean(0) gd.setSigma(0.5) gd_val = math.fabs(gd()) while gd_val > 0.8: gd_val = math.fabs(gd()) np.testing.assert_almost_equal(gauss4, gd_val) gauss5 = galsim.config.ParseValue(config,'gauss5',config, float)[0] gd.setMean(0.5) gd.setSigma(0.3) gd_val = gd() if gd_val > 0.5: gd_val = 1-gd_val while gd_val < 0: gd_val = gd() if gd_val > 0.5: gd_val = 1-gd_val np.testing.assert_almost_equal(gauss5, gd_val) # Test values generated from a distribution in a file dd=galsim.DistDeviate(rng,function='config_input/distribution.txt',interpolant='linear') for k in range(6): config['obj_num'] = k dist1 = galsim.config.ParseValue(config,'dist1',config, float)[0] np.testing.assert_almost_equal(dist1, dd()) dd=galsim.DistDeviate(rng,function='config_input/distribution2.txt',interpolant='linear') for k in range(6): config['obj_num'] = k dist2 = galsim.config.ParseValue(config,'dist2',config, float)[0] np.testing.assert_almost_equal(dist2, dd()) dd=galsim.DistDeviate(rng,function=lambda x: x*x,x_min=0.,x_max=2.) for k in range(6): config['obj_num'] = k dist3 = galsim.config.ParseValue(config,'dist3',config, float)[0] np.testing.assert_almost_equal(dist3, dd()) # Test values generated from various other deviates for k in range(6): config['obj_num'] = k dev = galsim.PoissonDeviate(rng, mean=137) dev1 = galsim.config.ParseValue(config,'dev1',config, float)[0] np.testing.assert_almost_equal(dev1, dev()) dev = galsim.BinomialDeviate(rng, N=17) dev2 = galsim.config.ParseValue(config,'dev2',config, float)[0] np.testing.assert_almost_equal(dev2, dev()) dev = galsim.BinomialDeviate(rng, N=17, p=0.2) dev3 = galsim.config.ParseValue(config,'dev3',config, float)[0] np.testing.assert_almost_equal(dev3, dev()) dev = galsim.WeibullDeviate(rng, a=1.7, b=4.3) dev4 = galsim.config.ParseValue(config,'dev4',config, float)[0] np.testing.assert_almost_equal(dev4, dev()) dev = galsim.GammaDeviate(rng, k=1, theta=4) dev5 = galsim.config.ParseValue(config,'dev5',config, float)[0] np.testing.assert_almost_equal(dev5, dev()) dev = galsim.GammaDeviate(rng, k=1.9, theta=4.1) dev6 = galsim.config.ParseValue(config,'dev6',config, float)[0] np.testing.assert_almost_equal(dev6, dev()) dev = galsim.Chi2Deviate(rng, n=17) dev7 = galsim.config.ParseValue(config,'dev7',config, float)[0] np.testing.assert_almost_equal(dev7, dev()) # Test values generated from a Sequence seq1 = [] seq2 = [] seq3 = [] seq4 = [] seq5 = [] config['index_key'] = 'file_num' for k in range(6): config['file_num'] = k seq1.append(galsim.config.ParseValue(config,'seq1',config, float)[0]) config['index_key'] = 'image_num' for k in range(6): config['image_num'] = k seq2.append(galsim.config.ParseValue(config,'seq2',config, float)[0]) config['index_key'] = 'obj_num' for k in range(6): config['obj_num'] = k seq3.append(galsim.config.ParseValue(config,'seq3',config, float)[0]) config['index_key'] = 'obj_num_in_file' config['start_obj_num'] = 10 for k in range(6): config['obj_num'] = k+10 seq4.append(galsim.config.ParseValue(config,'seq4',config, float)[0]) seq5.append(galsim.config.ParseValue(config,'seq5',config, float)[0]) np.testing.assert_array_almost_equal(seq1, [ 0, 1, 2, 3, 4, 5 ]) np.testing.assert_array_almost_equal(seq2, [ 0, 0.1, 0.2, 0.3, 0.4, 0.5 ]) np.testing.assert_array_almost_equal(seq3, [ 1.5, 2, 2.5, 3, 3.5, 4 ]) np.testing.assert_array_almost_equal(seq4, [ 10, 8, 6, 4, 2, 0 ]) np.testing.assert_array_almost_equal(seq5, [ 1, 1, 2, 2, 1, 1 ]) # Test values taken from a List list1 = [] list2 = [] config['index_key'] = 'obj_num' for k in range(5): config['obj_num'] = k list1.append(galsim.config.ParseValue(config,'list1',config, float)[0]) list2.append(galsim.config.ParseValue(config,'list2',config, float)[0]) np.testing.assert_array_almost_equal(list1, [ 73, 8.9, 3.14, 73, 8.9 ]) np.testing.assert_array_almost_equal(list2, [ 10.8, 7.0, 4.3, 1.8, 10.8 ]) # Test values read from a Dict dict = [] dict.append(galsim.config.ParseValue(config,'dict1',config, float)[0]) dict.append(galsim.config.ParseValue(config,'dict2',config, float)[0]) if test_yaml: dict.append(galsim.config.ParseValue(config,'dict3',config, float)[0]) dict.append(galsim.config.ParseValue(config,'dict4',config, float)[0]) else: dict.append(0.1) dict.append(1.9) np.testing.assert_array_almost_equal(dict, [ 23.17, -17.23, 0.1, 1.9 ]) sum1 = galsim.config.ParseValue(config,'sum1',config, float)[0] np.testing.assert_almost_equal(sum1, 72 + 2.33 + 23.17) t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)
def test_bool_value(): """Test various ways to generate a bool value """ import time t1 = time.time() config = { 'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' }, 'dict' : [ { 'dir' : 'config_input', 'file_name' : 'dict.p' }, { 'dir' : 'config_input', 'file_name' : 'dict.yaml' }, { 'dir' : 'config_input', 'file_name' : 'dict.json' } ] }, 'val1' : True, 'val2' : 1, 'val3' : 0.0, 'str1' : 'true', 'str2' : '0', 'str3' : 'yes', 'str4' : 'No', 'cat1' : { 'type' : 'Catalog' , 'col' : 4 }, 'cat2' : { 'type' : 'Catalog' , 'col' : 5 }, 'ran1' : { 'type' : 'Random' }, 'seq1' : { 'type' : 'Sequence' }, 'seq2' : { 'type' : 'Sequence', 'first' : True, 'repeat' : 2 }, 'list1' : { 'type' : 'List', 'items' : [ 'yes', 'no', 'no' ] }, 'list2' : { 'type' : 'List', 'items' : [ 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0 ], 'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }, 'dict1' : { 'type' : 'Dict', 'key' : 'b' }, 'dict2' : { 'type' : 'Dict', 'num' : 1, 'key' : 'b' }, 'dict3' : { 'type' : 'Dict', 'num' : 2, 'key' : 'b' } } galsim.config.ProcessInput(config) # Test direct values val1 = galsim.config.ParseValue(config,'val1',config, bool)[0] np.testing.assert_equal(val1, True) val2 = galsim.config.ParseValue(config,'val2',config, bool)[0] np.testing.assert_equal(val2, True) val3 = galsim.config.ParseValue(config,'val3',config, bool)[0] np.testing.assert_equal(val3, False) # Test conversions from strings str1 = galsim.config.ParseValue(config,'str1',config, bool)[0] np.testing.assert_equal(str1, True) str2 = galsim.config.ParseValue(config,'str2',config, bool)[0] np.testing.assert_equal(str2, False) str3 = galsim.config.ParseValue(config,'str3',config, bool)[0] np.testing.assert_equal(str3, True) str4 = galsim.config.ParseValue(config,'str4',config, bool)[0] np.testing.assert_equal(str4, False) # Test values read from a Catalog input_cat = galsim.Catalog(dir='config_input', file_name='catalog.txt') cat1 = [] cat2 = [] for k in range(5): config['seq_index'] = k cat1.append(galsim.config.ParseValue(config,'cat1',config, bool)[0]) cat2.append(galsim.config.ParseValue(config,'cat2',config, bool)[0]) np.testing.assert_array_equal(cat1, [ 1, 0, 1, 1, 0 ]) np.testing.assert_array_equal(cat2, [ 1, 0, 0, 1, 0 ]) # Test values generated from a uniform deviate rng = galsim.UniformDeviate(1234) config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed. for k in range(6): ran1 = galsim.config.ParseValue(config,'ran1',config, bool)[0] np.testing.assert_equal(ran1, rng() < 0.5) # Test values generated from a Sequence seq1 = [] seq2 = [] for k in range(6): config['seq_index'] = k seq1.append(galsim.config.ParseValue(config,'seq1',config, bool)[0]) seq2.append(galsim.config.ParseValue(config,'seq2',config, bool)[0]) np.testing.assert_array_equal(seq1, [ 0, 1, 0, 1, 0, 1 ]) np.testing.assert_array_equal(seq2, [ 1, 1, 0, 0, 1, 1 ]) # Test values taken from a List list1 = [] list2 = [] for k in range(5): config['seq_index'] = k list1.append(galsim.config.ParseValue(config,'list1',config, bool)[0]) list2.append(galsim.config.ParseValue(config,'list2',config, bool)[0]) np.testing.assert_array_equal(list1, [ 1, 0, 0, 1, 0 ]) np.testing.assert_array_equal(list2, [ 0, 1, 1, 1, 0 ]) # Test values read from a Dict pickle_dict = galsim.Dict(dir='config_input', file_name='dict.p') yaml_dict = galsim.Dict(dir='config_input', file_name='dict.yaml') json_dict = galsim.Dict(dir='config_input', file_name='dict.json') dict = [] dict.append(galsim.config.ParseValue(config,'dict1',config, bool)[0]) dict.append(galsim.config.ParseValue(config,'dict2',config, bool)[0]) dict.append(galsim.config.ParseValue(config,'dict3',config, bool)[0]) np.testing.assert_array_equal(dict, [ True, False, False ]) t2 = time.time() print 'time for %s = %.2f'%(funcname(),t2-t1)