def buildWCS(self, config, base, logger): index, index_key = galsim.config.GetIndex(config, base) if index == 0: return galsim.TanWCS( galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024)), galsim.CelestialCoord(-5 * galsim.arcmin, -25 * galsim.degrees)) elif index == 1: return galsim.TanWCS( galsim.AffineTransform(0.25, -0.02, 0.01, 0.24, galsim.PositionD(1024,1024)), galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)) else: raise ValueError("Custom WCS only supports building 2 WCS's")
def buildWCS(self, config, base, logger): """Build the TanWCS based on the specifications in the config dict. @param config The configuration dict for the wcs type. @param base The base configuration dict. @param logger If provided, a logger for logging debug statements. @returns the constructed WCS object. """ req = { "dudx" : float, "dudy" : float, "dvdx" : float, "dvdy" : float, "ra" : galsim.Angle, "dec" : galsim.Angle } opt = { "units" : str, "origin" : galsim.PositionD } params, safe = galsim.config.GetAllParams(config, base, req=req, opt=opt) dudx = params['dudx'] dudy = params['dudy'] dvdx = params['dvdx'] dvdy = params['dvdy'] ra = params['ra'] dec = params['dec'] units = params.get('units', 'arcsec') origin = params.get('origin', None) affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin) world_origin = galsim.CelestialCoord(ra, dec) units = galsim.angle.get_angle_unit(units) return galsim.TanWCS(affine=affine, world_origin=world_origin, units=units)
def get_nominal_wcs(self, chipnum): """Given a chip, return a galsim wcs object with reasonable values. Useful for generating fake stars. :param chipnum: Chip we are looking at :returns wcs: Galsim wcs object .. note:: This is a EuclideanWCS, NOT a CelestialWCS! All I have done here is create a galsim wcs object whose center (u,v) = (0,0) corresponds to the focal plane (focal_x, focal_y) = (0, 0) and who has a reasonable jacobian transformation about the center. For simplicity, set dudx, dvdy = 0, and dudy, dvdx = -0.26. This is pretty close to what the y1 test images look like... """ import galsim # get center and convert to pixels. xpix, ypix = self.getPixel_chipnum([chipnum], [0], [0]) # now we know that dudx etc, so convert xpix and ypix to uarcsec varcsec arcsec_over_pixel = 0.26 # also a minus sign because the axes also flip uarcsec = ypix * -arcsec_over_pixel varcsec = xpix * -arcsec_over_pixel world_origin = galsim.PositionD(uarcsec, varcsec) wcs = galsim.AffineTransform(0, -arcsec_over_pixel, -arcsec_over_pixel, 0, world_origin=-world_origin) return wcs
def TanWCSBuilder(dudx, dudy, dvdx, dvdy, ra, dec, units='arcsec', origin=galsim.PositionD(0,0)): # The TanWCS uses a custom builder because the normal function takes an AffineTransform, which # we need to construct. It also takes a CelestialCoord for its world_origin parameter, so we # make that out of ra and dec parameters. affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin) world_origin = galsim.CelestialCoord(ra, dec) units = galsim.angle.get_angle_unit(units) return galsim.TanWCS(affine, world_origin, units)
def test_pickle(): """Test the reading a file written with python 2 pickling is readable with python 2 or 3. """ if __name__ == '__main__': logger = piff.config.setup_logger(verbose=2) else: logger = piff.config.setup_logger(log_file='output/test_pickle.log') # First, this is the output file written by the above test_single function on python 2. # Shoudl be trivially readable by python 2, but make sure it is also readable by python 3. psf = piff.read('input/test_single_py27.piff', logger=logger) wcs1 = galsim.TanWCS( galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(-5 * galsim.arcmin, -25 * galsim.degrees)) wcs2 = galsim.TanWCS( galsim.AffineTransform(0.25, -0.02, 0.01, 0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)) data1 = fitsio.read('input/test_single_cat1.fits') data2 = fitsio.read('input/test_single_cat2.fits') field_center = galsim.CelestialCoord(0 * galsim.degrees, -25 * galsim.degrees) for chipnum, data, wcs in [(1, data1, wcs1), (2, data2, wcs2)]: for k in range(len(data)): x = data['x'][k] y = data['y'][k] e1 = data['e1'][k] e2 = data['e2'][k] s = data['s'][k] #print('k,x,y = ',k,x,y) #print(' true s,e1,e2 = ',s,e1,e2) image_pos = galsim.PositionD(x, y) star = piff.Star.makeTarget(x=x, y=y, wcs=wcs, stamp_size=48, pointing=field_center, chipnum=chipnum) star = psf.drawStar(star) #print(' fitted s,e1,e2 = ',star.fit.params) np.testing.assert_almost_equal(star.fit.params, [s, e1, e2], decimal=6)
def setup(): """Build an input image and catalog used by a few tests below. """ wcs = galsim.TanWCS( galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024, 1024)), #galsim.AffineTransform(0.26, 0., 0., 0.26, galsim.PositionD(1024,1024)), galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)) # Make the image (copied from test_single_image in test_simple.py) image = galsim.Image(2048, 2048, wcs=wcs) # Where to put the stars. x_list = [ 123.12, 345.98, 567.25, 1094.94, 924.15, 1532.74, 1743.11, 888.39, 1033.29, 1409.31 ] y_list = [ 345.43, 567.45, 1094.32, 924.29, 1532.92, 1743.83, 888.83, 1033.19, 1409.20, 123.11 ] # Draw a Gaussian PSF at each location on the image. sigma = 1.3 g1 = 0.23 g2 = -0.17 du = 0.09 # in arcsec dv = -0.07 flux = 123.45 psf = galsim.Gaussian(sigma=sigma).shear(g1=g1, g2=g2).shift(du, dv) * flux for x, y in zip(x_list, y_list): bounds = galsim.BoundsI(int(x - 31), int(x + 32), int(y - 31), int(y + 32)) offset = galsim.PositionD(x - int(x) - 0.5, y - int(y) - 0.5) psf.drawImage(image=image[bounds], method='no_pixel', offset=offset) image.addNoise( galsim.GaussianNoise(rng=galsim.BaseDeviate(1234), sigma=1e-6)) # Write out the image to a file image_file = os.path.join('output', 'test_stats_image.fits') image.write(image_file) # Write out the catalog to a file dtype = [('x', 'f8'), ('y', 'f8')] data = np.empty(len(x_list), dtype=dtype) data['x'] = x_list data['y'] = y_list cat_file = os.path.join('output', 'test_stats_cat.fits') fitsio.write(cat_file, data, clobber=True)
def mk_wcs(self, center, theta = 0, sky_center = galsim.CelestialCoord(ra=19.3*galsim.degrees, dec=-33.1*galsim.degrees)): '''Creates wcs for an image Parameters ---------- theta: float rotation angle for the image center: galsim.PositionD position of the reference pixel used as the center of the affin transform for the wcs sky_center: galsim.CelestialCoord Reference coordinates of the center of the image in celestial coordinates Returns ------- wcs: WCS ''' #Affine transformation dudx = np.cos(theta) * self.pix if theta == 0: dudy = 0 dvdx = 0 else: dudy = -np.sin(theta) * self.pix dvdx = np.sin(theta) * self.pix dvdy = np.cos(theta) * self.pix affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=center) #Creating WCS w = WCS.WCS(naxis=2) galsim_wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) w.wcs.ctype = ["RA---AIR", "DEC--AIR"] w.wcs.crpix = galsim_wcs.crpix w.wcs.pc = galsim_wcs.cd w.wcs.crval = [galsim_wcs.center._ra._rad, galsim_wcs.center._dec._rad] w.array_shape = self.shape return w
def make_sim_wcs(dim): dims = [dim] * 2 cen = (np.array(dims) - 1) / 2 image_origin = galsim.PositionD(x=cen[1], y=cen[0]) mat = np.array([[SCALE, 0.0], [0.0, SCALE]], ) return galsim.TanWCS( affine=galsim.AffineTransform( mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1], origin=image_origin, world_origin=galsim.PositionD(0, 0), ), world_origin=WORLD_ORIGIN, units=galsim.arcsec, )
def test_invert_affine_transform_wcs(): wcs = galsim.AffineTransform( dudx=0.5, dudy=-3.0, dvdx=0.7, dvdy=-10, origin=galsim.PositionD(x=9.3, y=-11), world_origin=galsim.PositionD(x=-900, y=100), ) rng = np.random.RandomState(seed=10) u = rng.uniform(low=-100, high=100, size=10) v = rng.uniform(low=-100, high=100, size=10) x, y = invert_affine_transform_wcs(u, v, wcs) for i in range(10): world_pos = galsim.PositionD(x=u[i], y=v[i]) pos = wcs.toImage(world_pos) assert np.allclose(pos.x, x[i]) assert np.allclose(pos.y, y[i])
def make_wcs(*, scale, image_origin, world_origin, theta=None): """ make and return a wcs object Parameters ---------- scale: float Pixel scale image_origin: galsim.PositionD Image origin position world_origin: galsim.CelestialCoord Origin on the sky theta: float, optional Rotation angle in radians Returns ------- A galsim wcs object, currently a TanWCS """ mat = np.array([[scale, 0.0], [0.0, scale]], ) if theta is not None: costheta = np.cos(theta) sintheta = np.sin(theta) rot = np.array([[costheta, -sintheta], [sintheta, costheta]], ) mat = np.dot(mat, rot) return galsim.TanWCS( affine=galsim.AffineTransform( mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1], origin=image_origin, ), world_origin=world_origin, units=galsim.arcsec, )
def make_stack_psf_wcs(*, dims, offset, jac, world_origin): """ convert the galsim jacobian wcs to stack wcs for a tan projection Parameters ---------- dims: (ny, nx) dims of the psf offset: seq or array xoffset, yoffset jac: galsim jacobian From wcs world_origin: origin of wcs get from coadd_wcs.center """ import galsim cy, cx = (np.array(dims) - 1) / 2 cy += offset.y cx += offset.x origin = galsim.PositionD(x=cx, y=cy) tan_wcs = galsim.TanWCS( affine=galsim.AffineTransform( jac.dudx, jac.dudy, jac.dvdx, jac.dvdy, origin=origin, world_origin=galsim.PositionD(0, 0), ), world_origin=world_origin, units=galsim.arcsec, ) return make_stack_wcs(tan_wcs)
def test_meds(): """ Create two objects, each with three exposures. Save them to a MEDS file. Load the MEDS file. Compare the created objects with the one read by MEDS. """ # initialise empty MultiExposureObject list objlist = [] # we will be using 2 objects for testing, each with 3 cutouts n_obj_test = 2 n_cut_test = 3 # set the image size box_size = 32 # first obj img11 = galsim.Image(box_size, box_size, init_value=111) img12 = galsim.Image(box_size, box_size, init_value=112) img13 = galsim.Image(box_size, box_size, init_value=113) seg11 = galsim.Image(box_size, box_size, init_value=121) seg12 = galsim.Image(box_size, box_size, init_value=122) seg13 = galsim.Image(box_size, box_size, init_value=123) wth11 = galsim.Image(box_size, box_size, init_value=131) wth12 = galsim.Image(box_size, box_size, init_value=132) wth13 = galsim.Image(box_size, box_size, init_value=133) psf11 = galsim.Image(box_size, box_size, init_value=141) psf12 = galsim.Image(box_size, box_size, init_value=142) psf13 = galsim.Image(box_size, box_size, init_value=143) dudx = 11.1 dudy = 11.2 dvdx = 11.3 dvdy = 11.4 x0 = 11.5 y0 = 11.6 wcs11 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) dudx = 12.1 dudy = 12.2 dvdx = 12.3 dvdy = 12.4 x0 = 12.5 y0 = 12.6 wcs12 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) dudx = 13.1 dudy = 13.2 dvdx = 13.3 dvdy = 13.4 x0 = 13.5 y0 = 13.6 wcs13 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) # create lists images = [img11, img12, img13] weight = [wth11, wth12, wth13] seg = [seg11, seg12, seg13] psf = [psf11, psf12, psf13] wcs = [wcs11, wcs12, wcs13] # create object obj1 = galsim.des.MultiExposureObject(images=images, weight=weight, seg=seg, psf=psf, wcs=wcs, id=1) # second obj img21 = galsim.Image(box_size, box_size, init_value=211) img22 = galsim.Image(box_size, box_size, init_value=212) img23 = galsim.Image(box_size, box_size, init_value=213) seg21 = galsim.Image(box_size, box_size, init_value=221) seg22 = galsim.Image(box_size, box_size, init_value=222) seg23 = galsim.Image(box_size, box_size, init_value=223) wth21 = galsim.Image(box_size, box_size, init_value=231) wth22 = galsim.Image(box_size, box_size, init_value=332) wth23 = galsim.Image(box_size, box_size, init_value=333) psf21 = galsim.Image(box_size, box_size, init_value=241) psf22 = galsim.Image(box_size, box_size, init_value=342) psf23 = galsim.Image(box_size, box_size, init_value=343) dudx = 21.1 dudy = 21.2 dvdx = 21.3 dvdy = 21.4 x0 = 21.5 y0 = 21.6 wcs21 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) dudx = 22.1 dudy = 22.2 dvdx = 22.3 dvdy = 22.4 x0 = 22.5 y0 = 22.6 wcs22 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) dudx = 23.1 dudy = 23.2 dvdx = 23.3 dvdy = 23.4 x0 = 23.5 y0 = 23.6 wcs23 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) # create lists images = [img21, img22, img23] weight = [wth21, wth22, wth23] seg = [seg21, seg22, seg23] psf = [psf21, psf22, psf23] wcs = [wcs21, wcs22, wcs23] # create object # This time put the wcs in the image and get it there. img21.wcs = wcs21 img22.wcs = wcs22 img23.wcs = wcs23 obj2 = galsim.des.MultiExposureObject(images=images, weight=weight, seg=seg, psf=psf, id=2) # create an object list objlist = [obj1, obj2] # save objects to MEDS file filename_meds = 'output/test_meds.fits' galsim.des.WriteMEDS(objlist, filename_meds, clobber=True) # Note that while there are no tests prior to this, the above still checks for # syntax errors in the meds creation software, so it's still worth running as part # of the normal unit tests. # But for the rest of the tests, we'll use the meds module to make sure our code # stays in sync with any changes there. try: import meds except ImportError: print('Failed to import meds. Unable to do tests of meds file.') return try: # Meds will import this, so check for this too. import fitsio except ImportError: print('Failed to import fitsio. Unable to do tests of meds file.') return # Run meds module's validate function try: meds.util.validate_meds(filename_meds) except AttributeError: print( 'Seems to be the wrong meds package. Unable to do tests of meds file.' ) return m = meds.MEDS(filename_meds) # Check the image_info extension: ref_info = meds.util.get_image_info_dtype(1) info = m.get_image_info() for name, dt in ref_info: dt = numpy.dtype(dt) print(name, dt, info.dtype[name], dt.char, info.dtype[name].char) assert name in info.dtype.names, "column %s not present in image_info extension" % name assert dt.char == info.dtype[ name].char, "column %s is the wrong type" % name # Check the basic structure of the object_data extension cat = m.get_cat() ref_data = meds.util.get_meds_output_dtype(1) for tup in ref_data: # Some of these tuples have 3 items, not 2. The last two are the full dtype tuple. name = tup[0] if len(tup) == 2: dt = tup[1] else: dt = tup[1:] dt = numpy.dtype(dt) print(name, dt, cat.dtype[name], dt.char, cat.dtype[name].char) assert name in cat.dtype.names, "column %s not present in object_data extension" % name assert dt.char == cat.dtype[ name].char, "column %s is the wrong type" % name # Check that we have the right number of objects. n_obj = len(cat) print('number of objects is %d' % n_obj) numpy.testing.assert_equal(n_obj, n_obj_test, err_msg="MEDS file has wrong number of objects") # loop over objects and exposures - test get_cutout for iobj in range(n_obj): # check ID is correct numpy.testing.assert_equal( cat['id'][iobj], iobj + 1, err_msg="MEDS file has wrong id for object %d" % iobj) # get number of cutouts and check if it's right n_cut = cat['ncutout'][iobj] numpy.testing.assert_equal( n_cut, n_cut_test, err_msg="MEDS file has wrong ncutout for object %d" % iobj) # loop over cutouts for icut in range(n_cut): # get the images etc to compare with originals img = m.get_cutout(iobj, icut, type='image') wth = m.get_cutout(iobj, icut, type='weight') seg = m.get_cutout(iobj, icut, type='seg') psf = m.get_psf(iobj, icut) wcs_meds = m.get_jacobian(iobj, icut) # Note: col == x, row == y. wcs_array_meds = numpy.array([ wcs_meds['dudcol'], wcs_meds['dudrow'], wcs_meds['dvdcol'], wcs_meds['dvdrow'], wcs_meds['col0'], wcs_meds['row0'] ]) # compare numpy.testing.assert_array_equal( img, objlist[iobj].images[icut].array, err_msg="MEDS cutout has wrong img for object %d" % iobj) numpy.testing.assert_array_equal( wth, objlist[iobj].weight[icut].array, err_msg="MEDS cutout has wrong wth for object %d" % iobj) numpy.testing.assert_array_equal( seg, objlist[iobj].seg[icut].array, err_msg="MEDS cutout has wrong seg for object %d" % iobj) numpy.testing.assert_array_equal( psf, objlist[iobj].psf[icut].array, err_msg="MEDS cutout has wrong psf for object %d" % iobj) wcs_orig = objlist[iobj].wcs[icut] wcs_array_orig = numpy.array([ wcs_orig.dudx, wcs_orig.dudy, wcs_orig.dvdx, wcs_orig.dvdy, wcs_orig.origin.x, wcs_orig.origin.y ]) numpy.testing.assert_array_equal( wcs_array_meds, wcs_array_orig, err_msg="MEDS cutout has wrong wcs for object %d" % iobj) # get the mosaic to compare with originals img = m.get_mosaic(iobj, type='image') wth = m.get_mosaic(iobj, type='weight') seg = m.get_mosaic(iobj, type='seg') # There is currently no get_mosaic option for the psfs. #psf = m.get_mosaic( iobj, type='psf') psf = numpy.concatenate( [m.get_psf(iobj, icut) for icut in range(n_cut)], axis=0) # get the concatenated images - create the true mosaic true_mosaic_img = numpy.concatenate( [x.array for x in objlist[iobj].images], axis=0) true_mosaic_wth = numpy.concatenate( [x.array for x in objlist[iobj].weight], axis=0) true_mosaic_seg = numpy.concatenate( [x.array for x in objlist[iobj].seg], axis=0) true_mosaic_psf = numpy.concatenate( [x.array for x in objlist[iobj].psf], axis=0) # compare numpy.testing.assert_array_equal( true_mosaic_img, img, err_msg="MEDS mosaic has wrong img for object %d" % iobj) numpy.testing.assert_array_equal( true_mosaic_wth, wth, err_msg="MEDS mosaic has wrong wth for object %d" % iobj) numpy.testing.assert_array_equal( true_mosaic_seg, seg, err_msg="MEDS mosaic has wrong seg for object %d" % iobj) numpy.testing.assert_array_equal( true_mosaic_psf, psf, err_msg="MEDS mosaic has wrong psf for object %d" % iobj)
def test_meds(): """ Create two objects, each with two exposures. Save them to a MEDS file. Load the MEDS file. Compare the created objects with the one read by MEDS. """ # initialise empty MultiExposureObject list objlist = [] # we will be using 2 objects for testing, each with 2 cutouts n_obj_test = 2 n_cut_test = 2 # set the image size box_size = 32 # first obj img11 = galsim.Image(box_size, box_size, init_value=111) img12 = galsim.Image(box_size, box_size, init_value=112) seg11 = galsim.Image(box_size, box_size, init_value=121) seg12 = galsim.Image(box_size, box_size, init_value=122) wth11 = galsim.Image(box_size, box_size, init_value=131) wth12 = galsim.Image(box_size, box_size, init_value=132) dudx = 11.1; dudy = 11.2; dvdx = 11.3; dvdy = 11.4; x0 = 11.5; y0 = 11.6; wcs11 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) dudx = 12.1; dudy = 12.2; dvdx = 12.3; dvdy = 12.4; x0 = 12.5; y0 = 12.6; wcs12 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) # create lists images = [img11, img12] weights = [wth11, wth12] segs = [seg11, seg12] wcs = [wcs11, wcs12] # create object obj1 = galsim.des.MultiExposureObject(images=images, weights=weights, segs=segs, wcs=wcs, id=1) # second obj img21 = galsim.Image(box_size, box_size, init_value=211) img22 = galsim.Image(box_size, box_size, init_value=212) seg21 = galsim.Image(box_size, box_size, init_value=221) seg22 = galsim.Image(box_size, box_size, init_value=222) wth21 = galsim.Image(box_size, box_size, init_value=231) wth22 = galsim.Image(box_size, box_size, init_value=332) dudx = 21.1; dudy = 21.2; dvdx = 21.3; dvdy = 21.4; x0 = 21.5; y0 = 21.6; wcs21 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) dudx = 22.1; dudy = 22.2; dvdx = 22.3; dvdy = 22.4; x0 = 22.5; y0 = 22.6; wcs22 = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, galsim.PositionD(x0, y0)) # create lists images = [img21, img22] weights = [wth21, wth22] segs = [seg21, seg22] wcs = [wcs22, wcs22] # create object obj2 = galsim.des.MultiExposureObject(images=images, weights=weights, segs=segs, wcs=wcs, id=2) # create an object list objlist = [obj1, obj2] # save objects to MEDS file filename_meds = 'test_meds.fits' galsim.des.write_meds(filename_meds, objlist, clobber=True) print 'wrote MEDS file %s ' % filename_meds # test functions in des_meds.py print 'reading %s' % filename_meds import meds m = meds.MEDS(filename_meds) # get the catalog cat = m.get_cat() # get number of objects n_obj = len(cat) # check if the number of objects is correct numpy.testing.assert_equal(n_obj,n_obj_test) print 'number of objects is %d' % n_obj print 'testing if loaded images are the same as original images' # loop over objects and exposures - test get_cutout for iobj in range(n_obj): # check ID is correct numpy.testing.assert_equal(cat['id'][iobj], iobj+1) # get number of cutouts and check if it's right n_cut = cat['ncutout'][iobj] numpy.testing.assert_equal(n_cut,n_cut_test) # loop over cutouts for icut in range(n_cut): # get the images etc to compare with originals img = m.get_cutout( iobj, icut, type='image') wth = m.get_cutout( iobj, icut, type='weight') seg = m.get_cutout( iobj, icut, type='seg') wcs_meds = m.get_jacobian(iobj, icut) wcs_array_meds= numpy.array( [ wcs_meds['dudrow'], wcs_meds['dudcol'], wcs_meds['dvdrow'], wcs_meds['dvdcol'], wcs_meds['row0'], wcs_meds['col0'] ] ) # compare numpy.testing.assert_array_equal(img, objlist[iobj].images[icut].array) numpy.testing.assert_array_equal(wth, objlist[iobj].weights[icut].array) numpy.testing.assert_array_equal(seg, objlist[iobj].segs[icut].array) wcs_orig = objlist[iobj].wcs[icut] wcs_array_orig = numpy.array( [ wcs_orig.dudx, wcs_orig.dudy, wcs_orig.dvdx, wcs_orig.dvdy, wcs_orig.origin.x, wcs_orig.origin.y ]) numpy.testing.assert_array_equal(wcs_array_meds, wcs_array_orig) print 'test passed get_cutout obj=%d icut=%d' % (iobj, icut) # loop over objects - test get_mosaic for iobj in range(n_obj): # get the mosaic to compare with originals img = m.get_mosaic( iobj, type='image') wth = m.get_mosaic( iobj, type='weight') seg = m.get_mosaic( iobj, type='seg') # get the concatenated images - create the true mosaic true_mosaic_img = numpy.concatenate([x.array for x in objlist[iobj].images], axis=0) true_mosaic_wth = numpy.concatenate([x.array for x in objlist[iobj].weights], axis=0) true_mosaic_seg = numpy.concatenate([x.array for x in objlist[iobj].segs], axis=0) # compare numpy.testing.assert_array_equal(true_mosaic_img, img) numpy.testing.assert_array_equal(true_mosaic_wth, wth) numpy.testing.assert_array_equal(true_mosaic_seg, seg) print 'test passed get_mosaic for obj=%d' % (iobj) print 'all asserts succeeded'
def gen_affine_wcs(*, rng, position_angle_range, dither_scale, scale, scale_frac_std, shear_std, world_origin, origin): """Generate a random AffineTransform WCS. Parameters ---------- rng : np.random.RandomState An RNG to use to generate the random WCS. position_angle_range : 2-tuple of floats The range of position angles to select from for rotating the image WCS coordinares. dither_scale : float The scale for dither in units of the mean pixel scale. scale : float The mean pixel scale of the image, scale_frac_std : float The fractional variance in the generated image pixel scale. shear_std : float The standard deviation of the Gaussian shear put into the WCS. world_origin : galsim.PositionD The location of the origin of the image coordinate system in the world coordinate system. origin : galsim.PositionD The location of the origin of the world coordinate system in the image coordinate system. Note that the imaage origin is dithered if requested to keep the world origin fixed. Returns ------- wcs : galsim.AffineTransform The randomly generated AffineTransform WCS object. """ # an se wcs is generated from # 1) a pixel scale # 2) a shear # 3) a rotation angle # 4) a dither in the u,v plane of the location of the # the image origin g1 = rng.normal() * shear_std g2 = rng.normal() * shear_std scale = (1.0 + rng.normal() * scale_frac_std) * scale theta = rng.uniform(low=position_angle_range[0], high=position_angle_range[1]) / 180.0 * np.pi dither_range = (-dither_scale / 2, dither_scale / 2) dither_u = rng.uniform(low=dither_range[0], high=dither_range[1]) * scale dither_v = rng.uniform(low=dither_range[0], high=dither_range[1]) * scale costheta = np.cos(theta) sintheta = np.sin(theta) jac_matrix = scale * np.dot( galsim.Shear(g1=g1, g2=g2).getMatrix(), np.array([[costheta, -sintheta], [sintheta, costheta]])) dudx = jac_matrix[0, 0] dudy = jac_matrix[0, 1] dvdx = jac_matrix[1, 0] dvdy = jac_matrix[1, 1] dxdy = np.dot(np.linalg.inv(jac_matrix), np.array([dither_u, dither_v])) wcs = galsim.AffineTransform( dudx, dudy, dvdx, dvdy, origin=origin + galsim.PositionD(x=dxdy[0], y=dxdy[1]), world_origin=world_origin, ) LOGGER.debug("generated wcs: %s", wcs) return wcs
def main(argv): """ Make images using model PSFs and galaxy cluster shear: - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles (like in demo10) and parametric fits to those profiles. We chose parametric fits since these are required for chromatic galaxies (ones with filter response included) - The real galaxy images include some initial correlated noise from the original HST observation, which would need to be whitened. But we are using parametric galaxies, so this isn't a concern. """ global logger logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) logger = logging.getLogger("mock_superbit_data") M = MPIHelper() # Define some parameters we'll use below. sbparams = SuperBITParameters(argv=argv) # Set up the NFWHalo: nfw = galsim.NFWHalo(mass=sbparams.mass, conc=sbparams.nfw_conc, redshift=sbparams.nfw_z_halo, omega_m=sbparams.omega_m, omega_lam=sbparams.omega_lam) logger.info('Set up NFW halo for lensing') # Read in galaxy catalog, as well as catalog containing # information from COSMOS fits like redshifts, hlr, etc. cosmos_cat = galsim.COSMOSCatalog(sbparams.cat_file_name, dir=sbparams.cosmosdir) fitcat = Table.read( os.path.join(os.path.join(sbparams.cosmosdir, sbparams.fit_file_name))) logger.info('Read in %d galaxies from catalog and associated fit info', cosmos_cat.nobjects) cluster_cat = galsim.COSMOSCatalog(sbparams.cluster_cat_name) print('Read in %d cluster galaxies from catalog' % cosmos_cat.nobjects) ### Now create PSF. First, define Zernicke polynomial component ### note: aberrations were definined for lam = 550, and close to the ### center of the camera. The PSF degrades at the edge of the FOV lam_over_diam = sbparams.lam * 1.e-9 / sbparams.tel_diam # radians lam_over_diam *= 206265. aberrations = numpy.zeros(38) # Set the initial size. aberrations[0] = 0. # First entry must be zero aberrations[1] = -0.00305127 aberrations[4] = -0.02474205 # Noll index 4 = Defocus aberrations[11] = -0.01544329 # Noll index 11 = Spherical aberrations[22] = 0.00199235 aberrations[26] = 0.00000017 aberrations[37] = 0.00000004 logger.info('Calculated lambda over diam = %f arcsec', lam_over_diam) # will store the Zernicke component of the PSF optics = galsim.OpticalPSF(lam=sbparams.lam, diam=sbparams.tel_diam, obscuration=sbparams.obscuration, nstruts=sbparams.nstruts, strut_angle=sbparams.strut_angle, strut_thick=sbparams.strut_thick, aberrations=aberrations) logger.info('Made telescope PSF profile') # load SuperBIT bandpass bandpass = galsim.Bandpass(sbparams.bp_file, wave_type='nm', blue_limit=310, red_limit=1100) ### ### LOOP OVER PSFs TO MAKE GROUPS OF IMAGES ### WITHIN EACH PSF, ITERATE n TIMES TO MAKE n SEPARATE IMAGES ### #all_psfs=glob.glob(sbparams.psf_path+"/*121*.psf") logger.info('Beginning loop over jitter/optical psfs') for im in np.arange(1): for i in numpy.arange(1, sbparams.nexp + 1): # get MPI processes in sync at start of each image M.barrier() logger.info('Beginning loop %d' % i) #rng = galsim.BaseDeviate(sbparams.noise_seed+i) try: timescale = str(sbparams.exp_time) outname = ''.join( ['superbit_gaussPSF_', str(i).zfill(3), '.fits']) truth_file_name = ''.join([ sbparams.outdir, '/truth_gaussPSF_', str(i).zfill(3), '.dat' ]) file_name = os.path.join(sbparams.outdir, outname) except galsim.errors.GalSimError: print("naming failed, check path") pdb.set_trace() # Setting up a truth catalog names = [ 'gal_num', 'x_image', 'y_image', 'ra', 'dec', 'g1_meas', 'g2_meas', 'nfw_mu', 'redshift', 'flux', 'truth_fwhm', 'truth_mom' ] types = [ int, float, float, float, float, float, float, float, float, float, float, float ] truth_catalog = galsim.OutputCatalog(names, types) # Set up the image: full_image = galsim.ImageF(sbparams.image_xsize, sbparams.image_ysize) sky_level = sbparams.exp_time * sbparams.sky_bkg # fill with sky_level moved until after MPI results summed full_image.fill(sky_level) full_image.setOrigin(0, 0) # We keep track of how much noise is already in the image from the RealGalaxies. noise_image = galsim.ImageF(sbparams.image_xsize, sbparams.image_ysize) noise_image.setOrigin(0, 0) # If you wanted to make a non-trivial WCS system, could set theta to a non-zero number theta = 0.0 * galsim.degrees dudx = numpy.cos(theta) * sbparams.pixel_scale dudy = -numpy.sin(theta) * sbparams.pixel_scale dvdx = numpy.sin(theta) * sbparams.pixel_scale dvdy = numpy.cos(theta) * sbparams.pixel_scale image_center = full_image.true_center affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center) sky_center = galsim.CelestialCoord(ra=sbparams.center_ra, dec=sbparams.center_dec) wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) full_image.wcs = wcs # Now let's read in the PSFEx PSF model. We read the image directly into an # InterpolatedImage GSObject, so we can manipulate it as needed psf_wcs = wcs #psf = galsim.des.DES_PSFEx(psf_filen,wcs=psf_wcs) logger.info('Constructed PSF object from PSFEx file') ##### ## Loop over galaxy objects: ##### # get local range to iterate over in this process local_start, local_end = M.mpi_local_range(sbparams.nobj) for k in range(local_start, local_end): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(sbparams.galobj_seed + k + 1) try: # make single galaxy object stamp, truth = make_a_galaxy(ud=ud, wcs=wcs, affine=affine, fitcat=fitcat, cosmos_cat=cosmos_cat, optics=optics, nfw=nfw, bandpass=bandpass, sbparams=sbparams) # Find the overlapping bounds: bounds = stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. # noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info( 'Galaxy %d positioned relative to center t=%f s', k, tot_time) this_flux = numpy.sum(stamp.array) row = [ k, truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu, truth.z, this_flux, truth.fwhm, truth.mom_size ] truth_catalog.addRow(row) except galsim.errors.GalSimError: logger.info('Galaxy %d has failed, skipping...', k) ##### ### Inject cluster galaxy objects: ##### center_coords = galsim.CelestialCoord(sbparams.center_ra, sbparams.center_dec) centerpix = wcs.toImage(center_coords) # get local range to iterate over in this process local_start, local_end = M.mpi_local_range(sbparams.nclustergal) for k in range(local_start, local_end): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(sbparams.cluster_seed + k + 1) try: # make single galaxy object cluster_stamp, truth = make_cluster_galaxy( ud=ud, wcs=wcs, affine=affine, centerpix=centerpix, cluster_cat=cluster_cat, optics=optics, bandpass=bandpass, sbparams=sbparams) # Find the overlapping bounds: bounds = cluster_stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. #noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += cluster_stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info( 'Cluster galaxy %d positioned relative to center t=%f s', k, tot_time) this_flux = numpy.sum(stamp.array) row = [ k, truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu, truth.z, this_flux, truth.fwhm, truth.mom_size ] truth_catalog.addRow(row) except galsim.errors.GalSimError: logger.info('Cluster galaxy %d has failed, skipping...', k) ##### ### Now repeat process for stars! ##### # get local range to iterate over in this process local_start, local_end = M.mpi_local_range(sbparams.nstars) for k in range(local_start, local_end): time1 = time.time() ud = galsim.UniformDeviate(sbparams.stars_seed + k + 1) star_stamp, truth = make_a_star(ud=ud, wcs=wcs, affine=affine, optics=optics, sbparams=sbparams) bounds = star_stamp.bounds & full_image.bounds # Add the stamp to the full image. try: full_image[bounds] += star_stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info( 'Star %d: positioned relative to center, t=%f s', k, tot_time) this_flux = numpy.sum(star_stamp.array) row = [ k, truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu, truth.z, this_flux, truth.fwhm, truth.mom_size ] truth_catalog.addRow(row) except galsim.errors.GalSimError: logger.info('Star %d has failed, skipping...', k) # Gather results from MPI processes, reduce to single result on root # Using same names on left and right sides is hiding lots of MPI magic full_image = M.gather(full_image) truth_catalog = M.gather(truth_catalog) #noise_image = M.gather(noise_image) if M.is_mpi_root(): full_image = reduce(combine_images, full_image) truth_catalog = reduce(combine_catalogs, truth_catalog) #noise_image = reduce(combine_images, noise_image) else: # do the adding of noise and writing to disk entirely on root # root and the rest meet again at barrier at start of loop continue # The first thing to do is to make the Gaussian noise uniform across the whole image. # If real-type COSMOS galaxies are used, the noise across the image won't be uniform. Since this code is # using parametric-type galaxies, the following section is commented out. # max_current_variance = numpy.max(noise_image.array) # noise_image = max_current_variance - noise_image # The first thing to do is to make the Gaussian noise uniform across the whole image. # Add dark current logger.info('Adding Dark current') dark_noise = sbparams.dark_current * sbparams.exp_time # np.random.normal( # sbparams.dark_current, sbparams.dark_current_std, # size=(sbparams.image_ysize, sbparams.image_xsize)) * sbparams.exp_time # dark_noise = np.clip(dark_noise, a_min=0, a_max=2**16) full_image += dark_noise # Add ccd noise; removed rng in noise logger.info('Adding CCD noise') noise = galsim.CCDNoise(sky_level=0, gain=1 / sbparams.gain, read_noise=sbparams.read_noise) full_image.addNoise(noise) logger.debug('Added noise to final output image') if not os.path.exists(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) full_image.write(file_name) # Write truth catalog to file. truth_catalog.write(truth_file_name) logger.info('Wrote image to %r', file_name) logger.info(' ') logger.info('completed run %d', im) i = i + 1 logger.info(' ') logger.info(' ') logger.info('completed all images') logger.info(' ')
def test_draw(): """Test the various options of the PSF.draw command. """ if __name__ == '__main__': logger = piff.config.setup_logger(verbose=2) else: logger = piff.config.setup_logger(log_file='output/test_draw.log') # Use an existing Piff solution to match as closely as possible how users would actually # use this function. psf = piff.read('input/test_single_py27.piff', logger=logger) # Data that was used to make that file. wcs = galsim.TanWCS( galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(-5 * galsim.arcmin, -25 * galsim.degrees)) data = fitsio.read('input/test_single_cat1.fits') field_center = galsim.CelestialCoord(0 * galsim.degrees, -25 * galsim.degrees) chipnum = 1 for k in range(len(data)): x = data['x'][k] y = data['y'][k] e1 = data['e1'][k] e2 = data['e2'][k] s = data['s'][k] print('k,x,y = ', k, x, y) #print(' true s,e1,e2 = ',s,e1,e2) # First, the same test with this file that is in test_wcs.py:test_pickle() image_pos = galsim.PositionD(x, y) star = piff.Star.makeTarget(x=x, y=y, wcs=wcs, stamp_size=48, pointing=field_center, chipnum=chipnum) star = psf.drawStar(star) #print(' fitted s,e1,e2 = ',star.fit.params) np.testing.assert_almost_equal(star.fit.params, [s, e1, e2], decimal=6) # Now use the regular PSF.draw() command. This version is equivalent to the above. # (It's not equal all the way to machine precision, but pretty close.) im1 = psf.draw(x, y, chipnum, stamp_size=48) np.testing.assert_allclose(im1.array, star.data.image.array, rtol=1.e-14, atol=1.e-14) # The wcs in the image is the wcs of the original image assert im1.wcs == psf.wcs[1] # The image is 48 x 48 assert im1.array.shape == (48, 48) # The bounds are centered close to x,y. Within 0.5 pixel. np.testing.assert_allclose(im1.bounds.true_center.x, x, atol=0.5) np.testing.assert_allclose(im1.bounds.true_center.y, y, atol=0.5) # This version draws the star centered at (x,y). Check the hsm centroid. hsm = im1.FindAdaptiveMom() #print('hsm = ',hsm) np.testing.assert_allclose(hsm.moments_centroid.x, x, atol=0.01) np.testing.assert_allclose(hsm.moments_centroid.y, y, atol=0.01) # The total flux should be close to 1. np.testing.assert_allclose(im1.array.sum(), 1.0, rtol=1.e-3) # We can center the star at an arbitrary location on the image. # The default is equivalent to center=(x,y). So check that this is equivalent. # Also, 48 is the default stamp size, so that can be omitted here. im2 = psf.draw(x, y, chipnum, center=(x, y)) assert im2.bounds == im1.bounds np.testing.assert_allclose(im2.array, im1.array, rtol=1.e-14, atol=1.e-14) # Moving by an integer number of pixels should be very close to the same image # over a different slice of the array. im3 = psf.draw(x, y, chipnum, center=(x + 1, y + 3)) assert im3.bounds == im1.bounds # (Remember -- numpy indexing is y,x!) # Also, the FFTs will be different in detail, so only match to 1.e-6. #print('im1 argmax = ',np.unravel_index(np.argmax(im1.array),im1.array.shape)) #print('im3 argmax = ',np.unravel_index(np.argmax(im3.array),im3.array.shape)) np.testing.assert_allclose(im3.array[3:, 1:], im1.array[:-3, :-1], rtol=1.e-6, atol=1.e-6) hsm = im3.FindAdaptiveMom() np.testing.assert_allclose(hsm.moments_centroid.x, x + 1, atol=0.01) np.testing.assert_allclose(hsm.moments_centroid.y, y + 3, atol=0.01) # Can center at other locations, and the hsm centroids should come out centered pretty # close to that location. # (Of course the array will be different here, so can't test that.) im4 = psf.draw(x, y, chipnum, center=(x + 1.3, y - 0.8)) assert im4.bounds == im1.bounds hsm = im4.FindAdaptiveMom() np.testing.assert_allclose(hsm.moments_centroid.x, x + 1.3, atol=0.01) np.testing.assert_allclose(hsm.moments_centroid.y, y - 0.8, atol=0.01) # Also allowed is center=True to place in the center of the image. im5 = psf.draw(x, y, chipnum, center=True) assert im5.bounds == im1.bounds assert im5.array.shape == (48, 48) np.testing.assert_allclose(im5.bounds.true_center.x, x, atol=0.5) np.testing.assert_allclose(im5.bounds.true_center.y, y, atol=0.5) np.testing.assert_allclose(im5.array.sum(), 1., rtol=1.e-3) hsm = im5.FindAdaptiveMom() center = im5.true_center np.testing.assert_allclose(hsm.moments_centroid.x, center.x, atol=0.01) np.testing.assert_allclose(hsm.moments_centroid.y, center.y, atol=0.01) # Some invalid ways to try to do this. (Must be either True or a tuple.) np.testing.assert_raises(ValueError, psf.draw, x, y, chipnum, center='image') np.testing.assert_raises(ValueError, psf.draw, x, y, chipnum, center=im5.true_center) # If providing your own image with bounds far away from the star (say centered at 0), # then center=True works fine to draw in the center of that image. im6 = im5.copy() im6.setCenter(0, 0) psf.draw(x, y, chipnum, center=True, image=im6) assert im6.bounds.center == galsim.PositionI(0, 0) np.testing.assert_allclose(im6.array.sum(), 1., rtol=1.e-3) hsm = im6.FindAdaptiveMom() center = im6.true_center np.testing.assert_allclose(hsm.moments_centroid.x, center.x, atol=0.01) np.testing.assert_allclose(hsm.moments_centroid.y, center.y, atol=0.01) np.testing.assert_allclose(im6.array, im5.array, rtol=1.e-14, atol=1.e-14) # Check non-even stamp size. Also, not unit flux while we're at it. im7 = psf.draw(x, y, chipnum, center=(x + 1.3, y - 0.8), stamp_size=43, flux=23.7) assert im7.array.shape == (43, 43) np.testing.assert_allclose(im7.bounds.true_center.x, x, atol=0.5) np.testing.assert_allclose(im7.bounds.true_center.y, y, atol=0.5) np.testing.assert_allclose(im7.array.sum(), 23.7, rtol=1.e-3) hsm = im7.FindAdaptiveMom() np.testing.assert_allclose(hsm.moments_centroid.x, x + 1.3, atol=0.01) np.testing.assert_allclose(hsm.moments_centroid.y, y - 0.8, atol=0.01) # Can't do mixed even/odd shape with stamp_size, but it will respect a provided image. im8 = galsim.Image(43, 44) im8.setCenter( x, y ) # It will respect the given bounds, so put it near the right place. psf.draw(x, y, chipnum, center=(x + 1.3, y - 0.8), image=im8, flux=23.7) assert im8.array.shape == (44, 43) np.testing.assert_allclose(im8.array.sum(), 23.7, rtol=1.e-3) hsm = im8.FindAdaptiveMom() np.testing.assert_allclose(hsm.moments_centroid.x, x + 1.3, atol=0.01) np.testing.assert_allclose(hsm.moments_centroid.y, y - 0.8, atol=0.01) # The offset parameter can add an additional to whatever center is used. # Here center=None, so this is equivalent to im4 above. im9 = psf.draw(x, y, chipnum, offset=(1.3, -0.8)) assert im9.bounds == im1.bounds hsm = im9.FindAdaptiveMom() np.testing.assert_allclose(im9.array, im4.array, rtol=1.e-14, atol=1.e-14) # With both, they are effectively added together. Not sure if there would be a likely # use for this, but it's allowed. (The above with default center is used in unit # tests a number of times, so that version at least is useful if only for us. # I'm hard pressed to imaging end users wanting to specify things this way though.) im10 = psf.draw(x, y, chipnum, center=(x + 0.8, y - 0.3), offset=(0.5, -0.5)) assert im10.bounds == im1.bounds np.testing.assert_allclose(im10.array, im4.array, rtol=1.e-14, atol=1.e-14)
def main(argv): """ Make images using constant PSF and variable shear: - The main image is 2048 x 2048 pixels. - Pixel scale is 0.2 arcsec/pixel, hence the image is about 0.11 degrees on a side. - Applied shear is from a cosmological power spectrum read in from file. - The PSF is a real one from SDSS, and corresponds to a convolution of atmospheric PSF, optical PSF, and pixel response, which has been sampled at pixel centers. We used a PSF from SDSS in order to have a PSF profile that could correspond to what you see with a real telescope. However, in order that the galaxy resolution not be too poor, we tell GalSim that the pixel scale for that PSF image is 0.2" rather than 0.396". We are simultaneously lying about the intrinsic size of the PSF and about the pixel scale when we do this. - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles (like in demo10) and parametric fits to those profiles. We choose 30% of the galaxies to use the images, and the other 60% to use the parametric fits - The real galaxy images include some initial correlated noise from the original HST observation. However, we whiten the noise of the final image so the final image has stationary Gaussian noise, rather than correlated noise. """ logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) logger = logging.getLogger("demo11") # Define some parameters we'll use below. # Normally these would be read in from some parameter file. pixel_scale = 0.2 # arcsec/pixel image_size = 2048 # size of image in pixels image_size_arcsec = image_size * pixel_scale # size of big image in each dimension (arcsec) noise_variance = 5.e4 # ADU^2 (Just use simple Gaussian noise here.) nobj = 288 # number of galaxies in entire field # (This corresponds to 8 galaxies / arcmin^2) grid_spacing = 90.0 # The spacing between the samples for the power spectrum # realization (arcsec) tel_diam = 4 # Let's figure out the flux for a 4 m class telescope exp_time = 300 # exposing for 300 seconds. center_ra = 19.3 * galsim.hours # The RA, Dec of the center of the image on the sky center_dec = -33.1 * galsim.degrees # The catalog returns objects that are appropriate for HST in 1 second exposures. So for our # telescope we scale up by the relative area and exposure time. Note that what is important is # the *effective* area after taking into account obscuration. For HST, the telescope diameter # is 2.4 but there is obscuration (a linear factor of 0.33). Here, we assume that the telescope # we're simulating effectively has no obscuration factor. We're also ignoring the pi/4 factor # since it appears in the numerator and denominator, so we use area = diam^2. hst_eff_area = 2.4**2 * (1. - 0.33**2) flux_scaling = (tel_diam**2 / hst_eff_area) * exp_time # random_seed is used for both the power spectrum realization and the random properties # of the galaxies. random_seed = 24783923 file_name = os.path.join('output', 'tabulated_power_spectrum.fits.fz') logger.info('Starting demo script 11') # Read in galaxy catalog # The COSMOSCatalog uses the same input file as we have been using for RealGalaxyCatalogs # along with a second file called real_galaxy_catalog_23.5_examples_fits.fits, which stores # the information about the parameteric fits. There is no need to specify the second file # name, since the name is derivable from the name of the main catalog. if True: # The catalog we distribute with the GalSim code only has 100 galaxies. # The galaxies will typically be reused several times here. cat_file_name = 'real_galaxy_catalog_23.5_example.fits' dir = 'data' cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir) else: # If you've run galsim_download_cosmos, you can leave out the cat_file_name and dir # to use the full COSMOS catalog with 56,000 galaxies in it. cosmos_cat = galsim.COSMOSCatalog() logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects) # Setup the PowerSpectrum object we'll be using: # To do this, we first have to read in the tabulated shear power spectrum, often denoted # C_ell(ell), where ell has units of inverse angle and C_ell has units of angle^2. However, # GalSim works in the flat-sky approximation, so we use this notation interchangeably with # P(k). GalSim does not calculate shear power spectra for users, who must be able to provide # their own (or use the examples in the repository). # # Here we use a tabulated power spectrum from iCosmo (http://icosmo.org), with the following # cosmological parameters and survey design: # H_0 = 70 km/s/Mpc # Omega_m = 0.25 # Omega_Lambda = 0.75 # w_0 = -1.0 # w_a = 0.0 # n_s = 0.96 # sigma_8 = 0.8 # Smith et al. prescription for the non-linear power spectrum. # Eisenstein & Hu transfer function with wiggles. # Default dN/dz with z_med = 1.0 # The file has, as required, just two columns which are k and P(k). However, iCosmo works in # terms of ell and C_ell; ell is inverse radians and C_ell in radians^2. Since GalSim tends to # work in terms of arcsec, we have to tell it that the inputs are radians^-1 so it can convert # to store in terms of arcsec^-1. pk_file = os.path.join('data', 'cosmo-fid.zmed1.00.out') ps = galsim.PowerSpectrum(pk_file, units=galsim.radians) # The argument here is "e_power_function" which defines the E-mode power to use. logger.info('Set up power spectrum from tabulated P(k)') # Now let's read in the PSF. It's a real SDSS PSF, which means pixel scale of 0.396". However, # the typical seeing is 1.2" and we want to simulate better seeing, so we will just tell GalSim # that the pixel scale is 0.2". We have to be careful with SDSS PSF images, as they have an # added 'soft bias' of 1000 which has been removed before creation of this file, so that the sky # level is properly zero. Also, the file is bzipped, to demonstrate the ability of GalSim # handle this kind of compressed file (among others). We read the image directly into an # InterpolatedImage GSObject, so we can manipulate it as needed (here, the only manipulation # needed is convolution). The flux is 1 as needed for a PSF. psf_file = os.path.join('data', 'example_sdss_psf_sky0.fits.bz2') psf = galsim.InterpolatedImage(psf_file, scale=pixel_scale, flux=1.) logger.info('Read in PSF image from bzipped FITS file') # Setup the image: full_image = galsim.ImageF(image_size, image_size) # The default convention for indexing an image is to follow the FITS standard where the # lower-left pixel is called (1,1). However, this can be counter-intuitive to people more # used to C or python indexing, where indices start at 0. It is possible to change the # coordinates of the lower-left pixel with the methods `setOrigin`. For this demo, we # switch to 0-based indexing, so the lower-left pixel will be called (0,0). full_image.setOrigin(0, 0) # As for demo10, we use random_seed for the random numbers required for the # whole image. In this case, both the power spectrum realization and the noise on the # full image we apply later. rng = galsim.BaseDeviate(random_seed) # We want to make random positions within our image. However, currently for shears from a power # spectrum we first have to get shears on a grid of positions, and then we can choose random # positions within that. So, let's make the grid. We're going to make it as large as the # image, with grid points spaced by 90 arcsec (hence interpolation only happens below 90" # scales, below the interesting scales on which we want the shear power spectrum to be # represented exactly). The lensing engine wants positions in arcsec, so calculate that: ps.buildGrid(grid_spacing=grid_spacing, ngrid=int(math.ceil(image_size_arcsec / grid_spacing)), rng=rng) logger.info('Made gridded shears') # We keep track of how much noise is already in the image from the RealGalaxies. # The default initial value is all pixels = 0. noise_image = galsim.ImageF(image_size, image_size) noise_image.setOrigin(0, 0) # Make a slightly non-trivial WCS. We'll use a slightly rotated coordinate system # and center it at the image center. theta = 0.17 * galsim.degrees # ( dudx dudy ) = ( cos(theta) -sin(theta) ) * pixel_scale # ( dvdx dvdy ) ( sin(theta) cos(theta) ) # Aside: You can call numpy trig functions on Angle objects directly, rather than getting # their values in radians first. Or, if you prefer, you can write things like # theta.sin() or theta.cos(), which are equivalent. dudx = numpy.cos(theta) * pixel_scale dudy = -numpy.sin(theta) * pixel_scale dvdx = numpy.sin(theta) * pixel_scale dvdy = numpy.cos(theta) * pixel_scale image_center = full_image.true_center affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center) # We can also put it on the celestial sphere to give it a bit more realism. # The TAN projection takes a (u,v) coordinate system on a tangent plane and projects # that plane onto the sky using a given point as the tangent point. The tangent # point should be given as a CelestialCoord. sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec) # The third parameter, units, defaults to arcsec, but we make it explicit here. # It sets the angular units of the (u,v) intermediate coordinate system. wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) full_image.wcs = wcs # Now we need to loop over our objects: for k in range(nobj): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(random_seed + k + 1) # Choose a random RA, Dec around the sky_center. # Note that for this to come out close to a square shape, we need to account for the # cos(dec) part of the metric: ds^2 = dr^2 + r^2 d(dec)^2 + r^2 cos^2(dec) d(ra)^2 # So need to calculate dec first. dec = center_dec + (ud() - 0.5) * image_size_arcsec * galsim.arcsec ra = center_ra + ( ud() - 0.5) * image_size_arcsec / numpy.cos(dec) * galsim.arcsec world_pos = galsim.CelestialCoord(ra, dec) # We will need the image position as well, so use the wcs to get that image_pos = wcs.toImage(world_pos) # We also need this in the tangent plane, which we call "world coordinates" here, # since the PowerSpectrum class is really defined on that plane, not in (ra,dec). uv_pos = affine.toWorld(image_pos) # Get the reduced shears and magnification at this point g1, g2, mu = ps.getLensing(pos=uv_pos) # Now we will have the COSMOSCatalog make a galaxy profile for us. It can make either # a RealGalaxy using the original HST image and PSF, or a parametric model based on # parametric fits to the light distribution of the HST observation. The parametric # models are either a Sersic fit to the data or a bulge + disk fit according to which # one gave the better chisq value. We will select a galaxy at random from the catalog. # One could easily do this by choosing an index = int(ud() * cosmos_cat.nobjects), but # we will instead allow the catalog to choose a random galaxy for us. It will remove any # selection effects involved in postage stamp creation using weights that are stored in # the catalog. (If for some reason you prefer not to do that, you can always choose a # purely random index yourself using int(ud() * cosmos_cat.nobjects).) We employ this # random selection by simply failing to specify an index or identifier for a galaxy, in # which case it chooses a random one. # First determine whether we will make a real galaxy (`gal_type = 'real'`) or a parametric # galaxy (`gal_type = 'parametric'`). The real galaxies take longer to render, so for this # script, we just use them 30% of the time and use parametric galaxies the other 70%. # We could just use `ud()<0.3` for this, but instead we introduce another Deviate type # available in GalSim that we haven't used yet: BinomialDeviate. # It takes an N and p value and returns integers according to a binomial distribution. # i.e. How many heads you get after N flips if each flip has a chance, p, of being heads. binom = galsim.BinomialDeviate(ud, N=1, p=0.3) real = binom() if real: # For real galaxies, we will want to whiten the noise in the image (below). # When whitening the image, we need to make sure the original correlated noise is # present throughout the whole image, otherwise the whitening will do the wrong thing # to the parts of the image that don't include the original image. The RealGalaxy # stores the correct noise profile to use as the gal.noise attribute. This noise # profile is automatically updated as we shear, dilate, convolve, etc. But we need to # tell it how large to pad with this noise by hand. This is a bit complicated for the # code to figure out on its own, so we have to supply the size for noise padding # with the noise_pad_size parameter. # The large galaxies will render fine without any noise padding, but the postage stamp # for the smaller galaxies will be sized appropriately for the PSF, which may make the # stamp larger than the original galaxy image. The psf image is 40 x 40, although # the bright part is much more concentrated than that. If we pad out the galaxy image # to at least 40 x sqrt(2), we should be safe even if the galaxy image is rotated # with respect to the psf image. # noise_pad_size = 40 * sqrt(2) * 0.2 arcsec/pixel = 11.3 arcsec gal = cosmos_cat.makeGalaxy(gal_type='real', rng=ud, noise_pad_size=11.3) else: gal = cosmos_cat.makeGalaxy(gal_type='parametric', rng=ud) # Apply a random rotation theta = ud() * 2.0 * numpy.pi * galsim.radians gal = gal.rotate(theta) # Rescale the flux to match our telescope configuration. # This automatically scales up the noise variance by flux_scaling**2. gal *= flux_scaling # Apply the cosmological (reduced) shear and magnification at this position using a single # GSObject method. gal = gal.lens(g1, g2, mu) # Convolve with the PSF. final = galsim.Convolve(psf, gal) # Account for the fractional part of the position # cf. demo9.py for an explanation of this nominal position stuff. x_nominal = image_pos.x + 0.5 y_nominal = image_pos.y + 0.5 ix_nominal = int(math.floor(x_nominal + 0.5)) iy_nominal = int(math.floor(y_nominal + 0.5)) dx = x_nominal - ix_nominal dy = y_nominal - iy_nominal offset = galsim.PositionD(dx, dy) # We use method='no_pixel' here because the SDSS PSF image that we are using includes the # pixel response already. stamp = final.drawImage(wcs=wcs.local(image_pos), offset=offset, method='no_pixel') # Recenter the stamp at the desired position: stamp.setCenter(ix_nominal, iy_nominal) # Find the overlapping bounds: bounds = stamp.bounds & full_image.bounds # Now, if we are using a real galaxy, we want to ether whiten or at least symmetrize the # noise on the postage stamp to avoid having to deal with correlated noise in any kind of # image processing you would want to do on the final image. (Like measure galaxy shapes.) # Galsim automatically propagates the noise correctly from the initial RealGalaxy object # through the applied shear, distortion, rotation, and convolution into the final object's # noise attribute. To make the noise fully white, use the image.whitenNoise() method. # The returned value is the variance of the Gaussian noise that is present after the # whitening process. # However, this is often overkill for many applications. If it is acceptable to merely end # up with noise with some degree of symmetry (say 4-fold or 8-fold symmetry), then you can # instead have GalSim just add enough noise to make the resulting noise have this kind of # symmetry. Usually this requires adding significantly less additional noise, which means # you can have the resulting total variance be somewhat smaller. The returned variance # corresponds to the zero-lag value of the noise correlation function, which will still have # off-diagonal elements. We can do this step using the image.symmetrizeNoise() method. if real: if True: # We use the symmetrizing option here. new_variance = stamp.symmetrizeNoise(final.noise, 8) else: # Here is how you would do it if you wanted to fully whiten the image. new_variance = stamp.whitenNoise(final.noise) # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. noise_image[bounds] += new_variance # Finally, add the stamp to the full image. full_image[bounds] += stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info('Galaxy %d: position relative to center = %s, t=%f s', k, str(uv_pos), tot_time) # We already have some noise in the image, but it isn't uniform. So the first thing to do is # to make the Gaussian noise uniform across the whole image. We have a special noise class # that can do this. VariableGaussianNoise takes an image of variance values and applies # Gaussian noise with the corresponding variance to each pixel. # So all we need to do is build an image with how much noise to add to each pixel to get us # up to the maximum value that we already have in the image. max_current_variance = numpy.max(noise_image.array) noise_image = max_current_variance - noise_image vn = galsim.VariableGaussianNoise(rng, noise_image) full_image.addNoise(vn) # Now max_current_variance is the noise level across the full image. We don't want to add that # twice, so subtract off this much from the intended noise that we want to end up in the image. noise_variance -= max_current_variance # Now add Gaussian noise with this variance to the final image. We have to do this step # at the end, rather than adding to individual postage stamps, in order to get the noise # level right in the overlap regions between postage stamps. noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance)) full_image.addNoise(noise) logger.info('Added noise to final large image') # Now write the image to disk. It is automatically compressed with Rice compression, # since the filename we provide ends in .fz. full_image.write(file_name) logger.info('Wrote image to %r', file_name) # Compute some sky positions of some of the pixels to compare with the values of RA, Dec # that ds9 reports. ds9 always uses (1,1) for the lower left pixel, so the pixel coordinates # of these pixels are different by 1, but you can check that the RA and Dec values are # the same as what GalSim calculates. ra_str = center_ra.hms() dec_str = center_dec.dms() logger.info('Center of image is at RA %sh %sm %ss, DEC %sd %sm %ss', ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3], dec_str[3:5], dec_str[5:]) for (x, y) in [(0, 0), (0, image_size - 1), (image_size - 1, 0), (image_size - 1, image_size - 1)]: world_pos = wcs.toWorld(galsim.PositionD(x, y)) ra_str = world_pos.ra.hms() dec_str = world_pos.dec.dms() logger.info('Pixel (%4d, %4d) is at RA %sh %sm %ss, DEC %sd %sm %ss', x, y, ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3], dec_str[3:5], dec_str[5:]) logger.info( 'ds9 reports these pixels as (1,1), (1,2048), etc. with the same RA, Dec.' )
def test_focal(): """This test uses 2 input files and two catalogs, but does the interpolation over the whole field of view. """ # Give them different wcs's. # The centers should be separated by ~0.25 arcsec/pixel * 2048 pixels / cos(dec) = 565 arcsec # The actual separation of 10 arcmin gives a bit of a gap between the chips. wcs1 = galsim.TanWCS( galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(-5 * galsim.arcmin, -25 * galsim.degrees)) wcs2 = galsim.TanWCS( galsim.AffineTransform(0.25, -0.02, 0.01, 0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)) field_center = galsim.CelestialCoord(0 * galsim.degrees, -25 * galsim.degrees) if __name__ == '__main__': nstars = 20 # per ccd else: nstars = 3 # per ccd rng = np.random.RandomState(1234) x = rng.random_sample(nstars) * 2000 + 24 y = rng.random_sample(nstars) * 2000 + 24 u, v = field_center.project_rad(*wcs1._radec(x.copy(), y.copy()), projection='gnomonic') e1 = 0.02 + 2.e-5 * u - 3.e-9 * u**2 + 2.e-9 * v**2 e2 = -0.04 - 3.e-5 * v + 1.e-9 * u * v + 3.e-9 * v**2 s = 0.3 + 8.e-9 * (u**2 + v**2) - 1.e-9 * u * v data1 = np.array(list(zip(x, y, e1, e2, s)), dtype=[('x', float), ('y', float), ('e1', float), ('e2', float), ('s', float)]) np.testing.assert_array_equal(data1['x'], x) np.testing.assert_array_equal(data1['y'], y) np.testing.assert_array_equal(data1['e1'], e1) np.testing.assert_array_equal(data1['e2'], e2) np.testing.assert_array_equal(data1['s'], s) im1 = drawImage(2048, 2048, wcs1, x, y, e1, e2, s) im1.write('output/test_focal_im1.fits') fitsio.write('output/test_focal_cat1.fits', data1, clobber=True) x = rng.random_sample(nstars) * 2000 + 24 y = rng.random_sample(nstars) * 2000 + 24 u, v = field_center.project_rad(*wcs2._radec(x.copy(), y.copy()), projection='gnomonic') # Same functions of u,v, but using the positions on chip 2 e1 = 0.02 + 2.e-5 * u - 3.e-9 * u**2 + 2.e-9 * v**2 e2 = -0.04 - 3.e-5 * v + 1.e-9 * u * v + 3.e-9 * v**2 s = 0.3 + 8.e-9 * (u**2 + v**2) - 1.e-9 * u * v data2 = np.array(list(zip(x, y, e1, e2, s)), dtype=[('x', float), ('y', float), ('e1', float), ('e2', float), ('s', float)]) im2 = drawImage(2048, 2048, wcs2, x, y, e1, e2, s) im2.write('output/test_focal_im2.fits') fitsio.write('output/test_focal_cat2.fits', data2, clobber=True) # Try to fit with the right model (Moffat) and interpolant (2nd order polyomial) # Should work very well, since no noise. config = { 'input': { 'image_file_name': 'output/test_focal_im?.fits', 'cat_file_name': 'output/test_focal_cat?.fits', 'x_col': 'x', 'y_col': 'y', 'ra': 0., 'dec': -25., }, 'psf': { 'type': 'Simple', 'model': { 'type': 'Moffat', 'beta': 2.5 }, 'interp': { 'type': 'Polynomial', 'order': 2 } } } if __name__ != '__main__': config['verbose'] = 0 psf = piff.process(config) for data, wcs in [(data1, wcs1), (data2, wcs2)]: for k in range(nstars): x = data['x'][k] y = data['y'][k] e1 = data['e1'][k] e2 = data['e2'][k] s = data['s'][k] #print('k,x,y = ',k,x,y) #print(' true s,e1,e2 = ',s,e1,e2) image_pos = galsim.PositionD(x, y) star = piff.Star.makeTarget(x=x, y=y, wcs=wcs, stamp_size=48, pointing=field_center) star = psf.drawStar(star) #print(' fitted s,e1,e2 = ',star.fit.params) np.testing.assert_almost_equal(star.fit.params, [s, e1, e2], decimal=6)
def test_single(): """Same as test_focal, but using the SingleCCD PSF type, which does a separate fit on each CCD. """ wcs1 = galsim.TanWCS( galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(-5 * galsim.arcmin, -25 * galsim.degrees)) wcs2 = galsim.TanWCS( galsim.AffineTransform(0.25, -0.02, 0.01, 0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)) field_center = galsim.CelestialCoord(0 * galsim.degrees, -25 * galsim.degrees) if __name__ == '__main__': nstars = 20 # per ccd else: nstars = 6 # per ccd rng = np.random.RandomState(1234) x = rng.random_sample(nstars) * 2000 + 24 y = rng.random_sample(nstars) * 2000 + 24 u, v = field_center.project_rad(*wcs1._radec(x.copy(), y.copy()), projection='gnomonic') e1 = 0.02 + 2.e-5 * u - 3.e-9 * u**2 + 2.e-9 * v**2 e2 = -0.04 - 3.e-5 * v + 1.e-9 * u * v + 3.e-9 * v**2 s = 0.3 + 8.e-9 * (u**2 + v**2) - 1.e-9 * u * v data1 = np.array(list(zip(x, y, e1, e2, s)), dtype=[('x', float), ('y', float), ('e1', float), ('e2', float), ('s', float)]) im1 = drawImage(2048, 2048, wcs1, x, y, e1, e2, s) im1.write('output/test_single_im1.fits') fitsio.write('output/test_single_cat1.fits', data1, clobber=True) x = rng.random_sample(nstars) * 2000 + 24 y = rng.random_sample(nstars) * 2000 + 24 u, v = field_center.project_rad(*wcs2._radec(x.copy(), y.copy()), projection='gnomonic') # Same functions of u,v, but using the positions on chip 2 e1 = 0.02 + 2.e-5 * u - 3.e-9 * u**2 + 2.e-9 * v**2 e2 = -0.04 - 3.e-5 * v + 1.e-9 * u * v + 3.e-9 * v**2 s = 0.3 + 8.e-9 * (u**2 + v**2) - 1.e-9 * u * v data2 = np.array(list(zip(x, y, e1, e2, s)), dtype=[('x', float), ('y', float), ('e1', float), ('e2', float), ('s', float)]) im2 = drawImage(2048, 2048, wcs2, x, y, e1, e2, s) im2.write('output/test_single_im2.fits') fitsio.write('output/test_single_cat2.fits', data2, clobber=True) # Try to fit with the right model (Moffat) and interpolant (2nd order polyomial) # Should work very well, since no noise. config = { 'input': { # A third way to input these same file names. Use GalSim config values and # explicitly specify the number of images to read 'nimages': 2, 'image_file_name': { 'type': 'FormattedStr', 'format': '%s/test_single_im%d.fits', 'items': ['output', '$image_num+1'] }, 'cat_file_name': { 'type': 'FormattedStr', 'format': '%s/test_single_cat%d.fits', 'items': ['output', '$image_num+1'] }, # Use chipnum = 1,2 rather than the default 0,1. 'chipnum': '$image_num+1', 'x_col': 'x', 'y_col': 'y', 'ra': 0., 'dec': -25., }, 'psf': { 'type': 'SingleChip', 'model': { 'type': 'Moffat', 'beta': 2.5 }, 'interp': { 'type': 'Polynomial', 'order': 2 } }, } if __name__ != '__main__': config['verbose'] = 0 psf = piff.process(config) for chipnum, data, wcs in [(1, data1, wcs1), (2, data2, wcs2)]: for k in range(nstars): x = data['x'][k] y = data['y'][k] e1 = data['e1'][k] e2 = data['e2'][k] s = data['s'][k] #print('k,x,y = ',k,x,y) #print(' true s,e1,e2 = ',s,e1,e2) image_pos = galsim.PositionD(x, y) star = piff.Star.makeTarget(x=x, y=y, wcs=wcs, stamp_size=48, pointing=field_center, chipnum=chipnum) star = psf.drawStar(star) #print(' fitted s,e1,e2 = ',star.fit.params) np.testing.assert_almost_equal(star.fit.params, [s, e1, e2], decimal=6)
def test_wrongwcs(): """Same as test_focal, but the images are written out with the wrong wcs. """ wcs1 = galsim.TanWCS( galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(-5 * galsim.arcmin, -25 * galsim.degrees)) wcs2 = galsim.TanWCS( galsim.AffineTransform(0.25, -0.02, 0.01, 0.24, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)) wrong_wcs = galsim.TanWCS( galsim.AffineTransform(0.25, 0, 0, 0.25, galsim.PositionD(1024, 1024)), galsim.CelestialCoord(0 * galsim.arcmin, -25 * galsim.degrees)) field_center = galsim.CelestialCoord(0 * galsim.degrees, -25 * galsim.degrees) if __name__ == '__main__': nstars = 20 # per ccd else: nstars = 3 # per ccd rng = np.random.RandomState(1234) x = rng.random_sample(nstars) * 2000 + 24 y = rng.random_sample(nstars) * 2000 + 24 u, v = field_center.project_rad(*wcs1._radec(x.copy(), y.copy()), projection='gnomonic') e1 = 0.02 + 2.e-5 * u - 3.e-9 * u**2 + 2.e-9 * v**2 e2 = -0.04 - 3.e-5 * v + 1.e-9 * u * v + 3.e-9 * v**2 s = 0.3 + 8.e-9 * (u**2 + v**2) - 1.e-9 * u * v data1 = np.array(list(zip(x, y, e1, e2, s)), dtype=[('x', float), ('y', float), ('e1', float), ('e2', float), ('s', float)]) im1 = drawImage(2048, 2048, wcs1, x, y, e1, e2, s) x = rng.random_sample(nstars) * 2000 + 24 y = rng.random_sample(nstars) * 2000 + 24 u, v = field_center.project_rad(*wcs2._radec(x.copy(), y.copy()), projection='gnomonic') # Same functions of u,v, but using the positions on chip 2 e1 = 0.02 + 2.e-5 * u - 3.e-9 * u**2 + 2.e-9 * v**2 e2 = -0.04 - 3.e-5 * v + 1.e-9 * u * v + 3.e-9 * v**2 s = 0.3 + 8.e-9 * (u**2 + v**2) - 1.e-9 * u * v data2 = np.array(list(zip(x, y, e1, e2, s)), dtype=[('x', float), ('y', float), ('e1', float), ('e2', float), ('s', float)]) im2 = drawImage(2048, 2048, wcs2, x, y, e1, e2, s) # Put in the wrong wcs before writing them to files. im1.wcs = im2.wcs = wrong_wcs im1.write('output/test_wrongwcs_im1.fits') im2.write('output/test_wrongwcs_im2.fits') fitsio.write('output/test_wrongwcs_cat1.fits', data1, clobber=True) fitsio.write('output/test_wrongwcs_cat2.fits', data2, clobber=True) config = { 'modules': ['custom_wcs'], 'input': { 'dir': 'output', # Normally more convenient to use a glob string, but an explicit list is also allowed. 'image_file_name': ['test_wrongwcs_im1.fits', 'test_wrongwcs_im2.fits'], 'cat_file_name': ['test_wrongwcs_cat1.fits', 'test_wrongwcs_cat2.fits'], 'x_col': 'x', 'y_col': 'y', 'ra': 0., 'dec': -25., # But here tell Piff the correct WCS to use. This uses a custom WCS builder, # mostly so we can test the 'modules' option. In practice, you might use a # galsim_extra Pixmappy WCS class. Or maybe an LSST DM WCS. 'wcs': { 'type': 'Custom' } }, 'psf': { 'type': 'Simple', 'model': { 'type': 'Moffat', 'beta': 2.5 }, 'interp': { 'type': 'Polynomial', 'order': 2 } }, } if __name__ != '__main__': config['verbose'] = 0 psf = piff.process(config) for data, wcs in [(data1, wcs1), (data2, wcs2)]: for k in range(nstars): x = data['x'][k] y = data['y'][k] e1 = data['e1'][k] e2 = data['e2'][k] s = data['s'][k] #print('k,x,y = ',k,x,y) #print(' true s,e1,e2 = ',s,e1,e2) image_pos = galsim.PositionD(x, y) star = piff.Star.makeTarget(x=x, y=y, wcs=wcs, stamp_size=48, pointing=field_center) star = psf.drawStar(star) #print(' fitted s,e1,e2 = ',star.fit.params) np.testing.assert_almost_equal(star.fit.params, [s, e1, e2], decimal=6)
def runSkyModel(config): # image properties data_path = config.get('pipeline', 'data_path') pixel_scale = config.getfloat('skymodel', 'pixel_scale') * galsim.arcsec fov = config.getfloat('skymodel', 'field_of_view') * galsim.arcmin image_size = int((fov / galsim.arcmin) / (pixel_scale / galsim.arcmin)) ra_field = config.get('field', 'field_ra') ra_field_gs = galsim.HMS_Angle(ra_field) dec_field = config.get('field', 'field_dec') dec_field_gs = galsim.DMS_Angle(dec_field) cat_file_name = config.get('field', 'catalogue') print('Loading catalogue from {0} ...'.format(cat_file_name)) cat = fits.getdata(cat_file_name) nobj = len(cat) cat_wcs = ast_wcs.WCS(naxis=2) cat_wcs.wcs.crpix = [image_size / 2, image_size / 2] cat_wcs.wcs.cdelt = [ pixel_scale / galsim.degrees, pixel_scale / galsim.degrees ] cat_wcs.wcs.crval = [0.e0, 0.e0] cat_wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN'] gal_ra = cat['latitude'] gal_dec = cat['longitude'] gal_e1 = cat['e1'] gal_e2 = cat['e2'] gal_flux = cat['I1400'] #mjy gal_r0 = cat['size'] / 2. g1 = 0 g2 = 0 print('...done.') full_image = galsim.ImageF(image_size, image_size, scale=pixel_scale) im_center = full_image.bounds.trueCenter() sky_center = galsim.CelestialCoord(ra=ra_field_gs, dec=dec_field_gs) # - on dx's since the ra axis is flipped. dudx = -pixel_scale / galsim.arcsec dudy = 0. dvdx = 0. dvdy = pixel_scale / galsim.arcsec image_center = full_image.trueCenter() affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.trueCenter()) wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) full_image.wcs = wcs tstart = time.time() nobj = 200 for i in range(nobj): sys.stdout.write('\rAdding source {0} of {1} to skymodel...'.format( i + 1, nobj)) gal = galsim.Exponential(scale_radius=gal_r0[i], flux=gal_flux[i]) ellipticity = galsim.Shear(e1=gal_e1[i], e2=gal_e2[i]) shear = galsim.Shear(g1=g1[i], g2=g2[i]) total_shear = ellipticity + shear gal = gal.shear(total_shear) x, y = cat_wcs.wcs_world2pix(gal_ra[i], gal_dec[i], 0) x = float(x) y = float(y) # Account for the fractional part of the position: ix = int(np.floor(x + 0.5)) iy = int(np.floor(y + 0.5)) offset = galsim.PositionD(x - ix, y - iy) stamp = gal.drawImage(scale=pixel_scale / galsim.arcsec, offset=offset) stamp.setCenter(ix, iy) bounds = stamp.bounds & full_image.bounds full_image[bounds] += stamp[bounds] sys.stdout.flush() tend = time.time() print('\n...done in {0} seconds.'.format(tend - tstart)) all_gals_fname = data_path + config.get('field', 'fitsname') print('Writing image data to {0} ...'.format(all_gals_fname)) image_data = full_image.array write4dImage(all_gals_fname, image_data, pixel_scale / galsim.degrees, obs_ra=ra_field_gs / galsim.degrees, obs_dec=dec_field_gs / galsim.degrees, obs_freq=config.getfloat('observation', 'lowest_frequency')) print('...done.') print('runSkyModel complete.')
def test_withOrigin(): from test_wcs import Cubic # First EuclideantWCS types: wcs_list = [ galsim.OffsetWCS(0.3, galsim.PositionD(1, 1), galsim.PositionD(10, 23)), galsim.OffsetShearWCS(0.23, galsim.Shear(g1=0.1, g2=0.3), galsim.PositionD(12, 43)), galsim.AffineTransform(0.01, 0.26, -0.26, 0.02, galsim.PositionD(12, 43)), galsim.UVFunction(ufunc=lambda x, y: 0.2 * x, vfunc=lambda x, y: 0.2 * y), galsim.UVFunction(ufunc=lambda x, y: 0.2 * x, vfunc=lambda x, y: 0.2 * y, xfunc=lambda u, v: u / scale, yfunc=lambda u, v: v / scale), galsim.UVFunction(ufunc='0.2*x + 0.03*y', vfunc='0.01*x + 0.2*y'), ] color = 0.3 for wcs in wcs_list: # Original version of the shiftOrigin tests in do_nonlocal_wcs using deprecated name. new_origin = galsim.PositionI(123, 321) wcs3 = check_dep(wcs.withOrigin, new_origin) assert wcs != wcs3, name + ' is not != wcs.withOrigin(pos)' wcs4 = wcs.local(wcs.origin, color=color) assert wcs != wcs4, name + ' is not != wcs.local()' assert wcs4 != wcs, name + ' is not != wcs.local() (reverse)' world_origin = wcs.toWorld(wcs.origin, color=color) if wcs.isUniform(): if wcs.world_origin == galsim.PositionD(0, 0): wcs2 = wcs.local(wcs.origin, color=color).withOrigin(wcs.origin) assert wcs == wcs2, name + ' is not equal after wcs.local().withOrigin(origin)' wcs2 = wcs.local(wcs.origin, color=color).withOrigin(wcs.origin, wcs.world_origin) assert wcs == wcs2, name + ' not equal after wcs.local().withOrigin(origin,world_origin)' world_pos1 = wcs.toWorld(galsim.PositionD(0, 0), color=color) wcs3 = check_dep(wcs.withOrigin, new_origin) world_pos2 = wcs3.toWorld(new_origin, color=color) np.testing.assert_almost_equal( world_pos2.x, world_pos1.x, 7, 'withOrigin(new_origin) returned wrong world position') np.testing.assert_almost_equal( world_pos2.y, world_pos1.y, 7, 'withOrigin(new_origin) returned wrong world position') new_world_origin = galsim.PositionD(5352.7, 9234.3) wcs5 = check_dep(wcs.withOrigin, new_origin, new_world_origin, color=color) world_pos3 = wcs5.toWorld(new_origin, color=color) np.testing.assert_almost_equal( world_pos3.x, new_world_origin.x, 7, 'withOrigin(new_origin, new_world_origin) returned wrong position') np.testing.assert_almost_equal( world_pos3.y, new_world_origin.y, 7, 'withOrigin(new_origin, new_world_origin) returned wrong position') # Now some CelestialWCS types cubic_u = Cubic(2.9e-5, 2000., 'u') cubic_v = Cubic(-3.7e-5, 2000., 'v') center = galsim.CelestialCoord(23 * galsim.degrees, -13 * galsim.degrees) radec = lambda x, y: center.deproject_rad( cubic_u(x, y) * 0.2, cubic_v(x, y) * 0.2, projection='lambert') wcs_list = [ galsim.RaDecFunction(radec), galsim.AstropyWCS('1904-66_TAN.fits', dir='fits_files'), galsim.GSFitsWCS('tpv.fits', dir='fits_files'), galsim.FitsWCS('sipsample.fits', dir='fits_files'), ] for wcs in wcs_list: # Original version of the shiftOrigin tests in do_celestial_wcs using deprecated name. new_origin = galsim.PositionI(123, 321) wcs3 = wcs.shiftOrigin(new_origin) assert wcs != wcs3, name + ' is not != wcs.shiftOrigin(pos)' wcs4 = wcs.local(wcs.origin) assert wcs != wcs4, name + ' is not != wcs.local()' assert wcs4 != wcs, name + ' is not != wcs.local() (reverse)' world_pos1 = wcs.toWorld(galsim.PositionD(0, 0)) wcs3 = wcs.shiftOrigin(new_origin) world_pos2 = wcs3.toWorld(new_origin) np.testing.assert_almost_equal( world_pos2.distanceTo(world_pos1) / galsim.arcsec, 0, 7, 'shiftOrigin(new_origin) returned wrong world position')
def test_hsmcatalog(): """Test HSMCatalog stats type. """ if __name__ == '__main__': logger = piff.config.setup_logger(verbose=2) else: logger = piff.config.setup_logger( log_file='output/test_hsmcatalog.log') image_file = os.path.join('output', 'test_stats_image.fits') cat_file = os.path.join('output', 'test_stats_cat.fits') psf_file = os.path.join('output', 'test_starstats.fits') hsm_file = os.path.join('output', 'test_hsmcatalog.fits') config = { 'input': { 'image_file_name': image_file, 'cat_file_name': cat_file, 'stamp_size': 48, 'reserve_frac': 0.2, 'seed': 123 }, 'psf': { 'model': { 'type': 'Gaussian', 'fastfit': True, 'include_pixel': False }, 'interp': { 'type': 'Mean' }, }, 'output': { 'file_name': psf_file, 'stats': [{ 'type': 'HSMCatalog', 'file_name': hsm_file, }] } } piff.piffify(config, logger) assert os.path.isfile(hsm_file) data = fitsio.read(hsm_file) for col in [ 'ra', 'dec', 'x', 'y', 'u', 'v', 'T_data', 'g1_data', 'g2_data', 'T_model', 'g1_model', 'g2_model', 'flux', 'reserve' ]: assert len(data[col]) == 10 true_data = fitsio.read(cat_file) np.testing.assert_allclose(data['x'], true_data['x']) np.testing.assert_allclose(data['y'], true_data['y']) np.testing.assert_allclose(data['flux'], 123.45, atol=0.001) print('reserve = ', data['reserve']) print('nreserve = ', np.sum(data['reserve'])) print('ntot = ', len(data['reserve'])) assert np.sum(data['reserve']) == int(0.2 * len(data['reserve'])) np.testing.assert_allclose(data['T_model'], data['T_data'], rtol=1.e-4) np.testing.assert_allclose(data['g1_model'], data['g1_data'], rtol=1.e-4) np.testing.assert_allclose(data['g2_model'], data['g2_data'], rtol=1.e-4) image = galsim.fits.read(image_file) world = [ image.wcs.toWorld(galsim.PositionD(x, y)) for x, y in zip(data['x'], data['y']) ] np.testing.assert_allclose(data['ra'], [w.ra.deg for w in world], rtol=1.e-4) np.testing.assert_allclose(data['dec'], [w.dec.deg for w in world], rtol=1.e-4) # Repeat with non-Celestial WCS wcs = galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024, 1024)) config['input']['wcs'] = wcs piff.piffify(config, logger) data = fitsio.read(hsm_file) np.testing.assert_array_equal(data['ra'], 0.) np.testing.assert_array_equal(data['dec'], 0.) world = [ wcs.toWorld(galsim.PositionD(x, y)) for x, y in zip(data['x'], data['y']) ] np.testing.assert_allclose(data['u'], [w.x for w in world], rtol=1.e-4) np.testing.assert_allclose(data['v'], [w.y for w in world], rtol=1.e-4) # Use class directly, rather than through config. psf = piff.PSF.read(psf_file) stars, _, _ = piff.Input.process(config['input']) hsmcat = piff.stats.HSMCatalogStats() with np.testing.assert_raises(RuntimeError): hsmcat.write('dummy') # Cannot write before compute hsmcat.compute(psf, stars) hsm_file2 = os.path.join('output', 'test_hsmcatalog2.fits') with np.testing.assert_raises(ValueError): hsmcat.write() # Must supply file_name if not given in constructor hsmcat.write(hsm_file2) data2 = fitsio.read(hsm_file2) for key in data.dtype.names: np.testing.assert_allclose(data2[key], data[key], rtol=1.e-5)
def main(argv): """ Make images using model PSFs and galaxy cluster shear: - The galaxies come from a processed COSMOS 2015 Catalog, scaled to match anticipated SuperBIT 2021 observations - The galaxy shape parameters are assigned in a probabilistic way through matching galaxy fluxes and redshifts to similar GalSim-COSMOS galaxies (see A. Gill+ 2021) """ global logger logging.basicConfig(format="%(message)s", level=logging.DEBUG, stream=sys.stdout) logger = logging.getLogger("mock_superbit_data") M = MPIHelper() # Define some parameters we'll use below. sbparams = SuperBITParameters(argv=argv) # Set up the NFWHalo: nfw = galsim.NFWHalo(mass=sbparams.mass, conc=sbparams.nfw_conc, redshift=sbparams.nfw_z_halo, omega_m=sbparams.omega_m, omega_lam=sbparams.omega_lam) logger.info('Set up NFW halo for lensing') # Read in galaxy catalog, as well as catalog containing # information from COSMOS fits like redshifts, hlr, etc. # cosmos_cat = galsim.COSMOSCatalog(sbparams.cat_file_name, dir=sbparams.cosmosdir) # fitcat = Table.read(os.path.join(sbparams.cosmosdir, sbparams.fit_file_name)) cosmos_cat = Table.read(os.path.join(sbparams.cosmosdir,sbparams.cat_file_name)) logger.info('Read in %d galaxies from catalog and associated fit info', len(cosmos_cat)) cluster_cat = galsim.COSMOSCatalog(sbparams.cluster_cat_name) logger.debug('Read in %d cluster galaxies from catalog' % cosmos_cat.nobjects) ### Now create PSF. First, define Zernicke polynomial component ### note: aberrations were definined for lam = 550, and close to the ### center of the camera. The PSF degrades at the edge of the FOV lam_over_diam = sbparams.lam * 1.e-9 / sbparams.tel_diam # radians lam_over_diam *= 206265. aberrations = numpy.zeros(38) # Set the initial size. aberrations[0] = 0. # First entry must be zero aberrations[1] = -0.00305127 aberrations[4] = -0.02474205 # Noll index 4 = Defocus aberrations[11] = -0.01544329 # Noll index 11 = Spherical aberrations[22] = 0.00199235 aberrations[26] = 0.00000017 aberrations[37] = 0.00000004 logger.info('Calculated lambda over diam = %f arcsec', lam_over_diam) # will store the Zernicke component of the PSF optics = galsim.OpticalPSF(lam=sbparams.lam,diam=sbparams.tel_diam, obscuration=sbparams.obscuration, nstruts=sbparams.nstruts, strut_angle=sbparams.strut_angle, strut_thick=sbparams.strut_thick, aberrations=aberrations) logger.info('Made telescope PSF profile') ### ### MAKE SIMULATED OBSERVATIONS ### ITERATE n TIMES TO MAKE n SEPARATE IMAGES ### for i in numpy.arange(1,sbparams.nexp+1): # get MPI processes in sync at start of each image M.barrier() #rng = galsim.BaseDeviate(sbparams.noise_seed+i) try: timescale=str(sbparams.exp_time) outname=''.join(['superbit_gaussJitter_',str(i).zfill(3),'.fits']) truth_file_name=''.join([sbparams.outdir, '/truth_gaussJitter_', str(i).zfill(3), '.dat']) file_name = os.path.join(sbparams.outdir, outname) except galsim.errors.GalSimError: print("naming failed, check path") pdb.set_trace() # Setting up a truth catalog names = [ 'gal_num', 'x_image', 'y_image', 'ra', 'dec', 'g1_meas', 'g2_meas', 'nfw_mu', 'redshift','flux','truth_fwhm','truth_mom', 'n','hlr','inclination','scale_h_over_r'] types = [ int, float, float, float,float,float, float, float, float, float, float, float, float, float, float, float] truth_catalog = galsim.OutputCatalog(names, types) # Set up the image: full_image = galsim.ImageF(sbparams.image_xsize, sbparams.image_ysize) sky_level = sbparams.exp_time * sbparams.sky_bkg full_image.fill(sky_level) full_image.setOrigin(0,0) # If you wanted to make a non-trivial WCS system, could set theta to a non-zero number theta = 0.0 * galsim.degrees dudx = numpy.cos(theta) * sbparams.pixel_scale dudy = -numpy.sin(theta) * sbparams.pixel_scale dvdx = numpy.sin(theta) * sbparams.pixel_scale dvdy = numpy.cos(theta) * sbparams.pixel_scale image_center = full_image.true_center affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center) sky_center = galsim.CelestialCoord(ra=sbparams.center_ra, dec=sbparams.center_dec) wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) full_image.wcs = wcs ## Now let's read in the PSFEx PSF model, if using. ## We read the image directly into an InterpolatedImage GSObject, ## so we can manipulate it as needed #psf_wcs=wcs #psf = galsim.des.DES_PSFEx(psf_filen,wcs=psf_wcs) #logger.info('Constructed PSF object from PSFEx file') ##### ## Loop over galaxy objects: ##### # get local range to iterate over in this process local_start, local_end = M.mpi_local_range(sbparams.nobj) for k in range(local_start, local_end): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(sbparams.galobj_seed+k+1) try: # make single galaxy object stamp,truth = make_a_galaxy(ud=ud,wcs=wcs,affine=affine, cosmos_cat=cosmos_cat,optics=optics,nfw=nfw, sbparams=sbparams) # Find the overlapping bounds: bounds = stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. # noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += stamp[bounds] time2 = time.time() tot_time = time2-time1 logger.info('Galaxy %d positioned relative to center t=%f s\n', k, tot_time) this_flux=numpy.sum(stamp.array) row = [ k,truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu,truth.z, this_flux,truth.fwhm, truth.mom_size, truth.n, truth.hlr, truth.inclination, truth.scale_h_over_r] truth_catalog.addRow(row) except galsim.errors.GalSimError: logger.info('Galaxy %d has failed, skipping...',k) ##### ### Inject cluster galaxy objects: ##### center_coords = galsim.CelestialCoord(sbparams.center_ra,sbparams.center_dec) centerpix = wcs.toImage(center_coords) # get local range to iterate over in this process local_start, local_end = M.mpi_local_range(sbparams.nclustergal) for k in range(local_start, local_end): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(sbparams.cluster_seed+k+1) try: # make single galaxy object cluster_stamp,truth = make_cluster_galaxy(ud=ud,wcs=wcs,affine=affine, centerpix=centerpix, cluster_cat=cluster_cat, optics=optics, sbparams=sbparams) # Find the overlapping bounds: bounds = cluster_stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. #noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += cluster_stamp[bounds] time2 = time.time() tot_time = time2-time1 logger.info('Cluster galaxy %d positioned relative to center t=%f s\n', k, tot_time) this_flux=numpy.sum(stamp.array) row = [ k,truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu,truth.z, this_flux,truth.fwhm,truth.mom_size, truth.n, truth.hlr, truth.inclination, truth.scale_h_over_r] truth_catalog.addRow(row) except galsim.errors.GalSimError: logger.info('Cluster galaxy %d has failed, skipping...',k) ##### ### Now repeat process for stars! ##### # get local range to iterate over in this process local_start, local_end = M.mpi_local_range(sbparams.nstars) for k in range(local_start, local_end): time1 = time.time() ud = galsim.UniformDeviate(sbparams.stars_seed+k+1) star_stamp,truth = make_a_star(ud=ud, wcs=wcs, affine=affine, optics=optics, sbparams=sbparams) bounds = star_stamp.bounds & full_image.bounds # Add the stamp to the full image. try: full_image[bounds] += star_stamp[bounds] time2 = time.time() tot_time = time2-time1 logger.info('Star %d: positioned relative to center, t=%f s', k, tot_time) this_flux=numpy.sum(star_stamp.array) row = [ k,truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu, truth.z, this_flux,truth.fwhm,truth.mom_size, truth.n, truth.hlr, truth.inclination, truth.scale_h_over_r] truth_catalog.addRow(row) except galsim.errors.GalSimError: logger.info('Star %d has failed, skipping...',k) # Gather results from MPI processes, reduce to single result on root # Using same names on left and right sides is hiding lots of MPI magic full_image = M.gather(full_image) truth_catalog = M.gather(truth_catalog) if M.is_mpi_root(): full_image = reduce(combine_images, full_image) truth_catalog = reduce(combine_catalogs, truth_catalog) else: # do the adding of noise and writing to disk entirely on root # root and the rest meet again at barrier at start of loop continue # The first thing to do is to make the Gaussian noise uniform across the whole image. # Add dark current logger.info('Adding Dark current') dark_noise = sbparams.dark_current * sbparams.exp_time full_image += dark_noise # Add ccd noise logger.info('Adding CCD noise') noise = galsim.CCDNoise( sky_level=0, gain=1/sbparams.gain, read_noise=sbparams.read_noise) full_image.addNoise(noise) logger.debug('Added noise to final output image') if not os.path.exists(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) full_image.write(file_name) # Write truth catalog to file. truth_catalog.write(truth_file_name) logger.info('Wrote image to %r',file_name) logger.info(' ') logger.info('completed all images') logger.info(' ')
def main(argv): """ Make images using model PSFs and galaxy cluster shear: - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles (like in demo10) and parametric fits to those profiles. - The real galaxy images include some initial correlated noise from the original HST observation. However, we whiten the noise of the final image so the final image has stationary Gaussian noise, rather than correlated noise. """ logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) global logger logger = logging.getLogger("mock_superbit_data") # Define some parameters we'll use below. # Normally these would be read in from some parameter file. global pixel_scale pixel_scale = 0.206 # arcsec/pixel global image_xsize image_xsize = 6665 # size of image in pixels global image_ysize image_ysize = 4453 # size of image in pixels global image_xsize_arcsec image_xsize_arcsec = image_xsize*pixel_scale # size of big image in each dimension (arcsec) global image_ysize_arcsec image_ysize_arcsec = image_ysize*pixel_scale # size of big image in each dimension (arcsec) global center_ra center_ra = 19.3*galsim.hours # The RA, Dec of the center of the image on the sky global center_dec center_dec = -33.1*galsim.degrees global nobj nobj = 30 # number of galaxies in entire field; this number matches empirical global nstars nstars = 1000 # number of stars in the entire field global flux_scaling # Let's figure out the flux for a 0.5 m class telescope global tel_diam tel_diam = 0.5 global psf_fwhm psf_fwhm = 0.30 global lam lam = 625 # Central wavelength for an airy disk global exp_time exp_time = 300 global noise_variance global sky_level psf_path = '/Users/jemcclea/Research/SuperBIT/superbit-ngmix/scripts/outputs/psfex_output' global nfw # will store the NFWHalo information global cosmos_cat # will store the COSMOS catalog from which we draw objects # Set up the NFWHalo: mass=5E14 # Cluster mass (Msol/h) nfw_conc = 4 # Concentration parameter = virial radius / NFW scale radius nfw_z_halo = 0.17 # redshift of the halo --> correct! nfw_z_source = 0.6 # redshift of the lensed sources; COSMOS galaxies don't have any omega_m = 0.3 # Omega matter for the background cosmology. omega_lam = 0.7 # Omega lambda for the background cosmology. nfw = galsim.NFWHalo(mass=mass, conc=nfw_conc, redshift=nfw_z_halo, omega_m=omega_m, omega_lam=omega_lam) logger.info('Set up NFW halo for lensing') # Read in galaxy catalog """ cat_file_name = 'real_galaxy_catalog_23.5.fits' dir = 'data/COSMOS_23.5_training_sample' #cat_file_name = 'real_galaxy_catalog_23.5_example.fits' #dir = 'data' """ cat_file_name = 'real_galaxy_catalog_25.2.fits' dir = 'data/COSMOS_25.2_training_sample/' cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir) logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects) # The catalog returns objects that are appropriate for HST in 1 second exposures. So for our # telescope we scale up by the relative area and exposure time. # Will also multiply by the gain and relative pixel scales... hst_eff_area = 2.4**2 * (1.-0.33**2) sbit_eff_area = tel_diam**2 * (1.-0.3840**2) #sbit_eff_area = tel_diam**2 * (1.-0.1**2) ### ### LOOP OVER PSFs TO MAKE GROUPS OF IMAGES ### WITHIN EACH PSF, ITERATE 5 TIMES TO MAKE 5 SEPARATE IMAGES ### #all_psfs=glob.glob(psf_path+"/*.psf") #all_psfs=glob.glob(psf_path+"/*300*.psf") random_seed = 35609377914 i=0 for psf_filen in range(1): logger.info('Beginning PSF %s...'% psf_filen) rng = galsim.BaseDeviate(random_seed) timescale=str(exp_time) outname=''.join(['debug_0.3FWHM_gaussStar_',timescale,'_',str(i),'.fits']) truth_file_name=''.join(['./output-debug/truth_0.3FWHM_gaussStar_',timescale,'_',str(i),'.dat']) file_name = os.path.join('output-debug',outname) # Set up the image: if timescale=='150': print("Automatically detecting a 150s exposure image, setting flux scale and noise accordingly") #noise_variance=570 # ADU^2 (Just use simple Gaussian noise here.) noise_variance=570 # ADU^2 (Just use simple Gaussian noise here.) sky_level = 51 # ADU exp_time=150. else: print("Automatically detecting a 300s exposure image, setting flux scale and noise accordingly") #noise_variance=400 # ADU^2 (Just use simple Gaussian noise here.) noise_variance=400 # ADU^2 (Just use simple Gaussian noise here.) sky_level = 106 # ADU exp_time=300. flux_scaling = (sbit_eff_area/hst_eff_area) * exp_time * 3.33 * (.206/.05)**2 # Setting up a truth catalog names = [ 'gal_num', 'x_image', 'y_image', 'ra', 'dec', 'g1_nopsf', 'g2_nopsf','g1_meas', 'g2_meas', 'fwhm','final_sigmaSize', 'nopsf_sigmaSize','nfw_g1', 'nfw_g2', 'nfw_mu', 'redshift','flux', 'stamp_sum', 'noisevar'] types = [ int, float, float, float, float, float, float, float, float, float, float, float, float, float,float, float, float,float, float] truth_catalog = galsim.OutputCatalog(names, types) # Set up the image: full_image = galsim.ImageF(image_xsize, image_ysize) full_image.fill(sky_level) full_image.setOrigin(0,0) # We keep track of how much noise is already in the image from the RealGalaxies. noise_image = galsim.ImageF(image_xsize, image_ysize) noise_image.setOrigin(0,0) # Make a slightly non-trivial WCS. We'll use a slightly rotated coordinate system # and center it at the image center. theta = 0.17 * galsim.degrees dudx = numpy.cos(theta) * pixel_scale dudy = -numpy.sin(theta) * pixel_scale dvdx = numpy.sin(theta) * pixel_scale dvdy = numpy.cos(theta) * pixel_scale image_center = full_image.true_center affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center) sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec) wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) full_image.wcs = wcs # Loop over galaxy objects: for k in range(nobj): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(random_seed+k+1) try: # make single galaxy object logger.debug("about to make stamp %d...",k) stamp,truth = make_a_galaxy(ud=ud,wcs=wcs,affine=affine) logger.debug("stamp %d is made",k) # Find the overlapping bounds: bounds = stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += stamp[bounds] logger.debug("stamp %d added to full image",k) time2 = time.time() tot_time = time2-time1 logger.info('Galaxy %d positioned relative to center t=%f s', k, tot_time) try: g1_real=stamp.FindAdaptiveMom().observed_shape.g1 g2_real=stamp.FindAdaptiveMom().observed_shape.g2 except: g1_real=-9999. g2_real=-9999. logger.debug("Galaxy %d made it past g1/g2_real stage",k) sum_flux=numpy.sum(stamp.array) row = [ k,truth.x, truth.y, truth.ra, truth.dec, truth.g1_nopsf, truth.g2_nopsf, g1_real, g2_real, truth.fwhm, truth.final_sigmaSize, truth.nopsf_sigmaSize,truth.g1,truth.g2, truth.mu, truth.z, truth.flux, sum_flux, truth.variance] truth_catalog.addRow(row) logger.debug("row for galaxy %d added to truth catalog\n\n",k) except: logger.info('Galaxy %d has failed, skipping...',k) #pdb.set_trace() pass ###### Inject cluster galaxy objects: random_seed=892465352 center_coords = galsim.CelestialCoord(center_ra,center_dec) centerpix = wcs.toImage(center_coords) for k in range(40): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(random_seed+k+1) try: # make single galaxy object cluster_stamp,truth = make_cluster_galaxy(ud=ud,wcs=wcs,affine=affine,centerpix=centerpix) # Find the overlapping bounds: bounds = cluster_stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += cluster_stamp[bounds] time2 = time.time() tot_time = time2-time1 logger.info('Cluster galaxy %d positioned relative to center t=%f s', k, tot_time) except: logger.info('Cluster galaxy %d has failed, skipping...',k) pdb.set_trace() #### ### Now repeat process for stars! #### random_seed_stars=2308173501873 for k in range(nstars): time1 = time.time() ud = galsim.UniformDeviate(random_seed_stars+k+1) try: star_stamp,truth=make_a_star(ud=ud,wcs=wcs,affine=affine) bounds = star_stamp.bounds & full_image.bounds logger.debug("star stamp & truth catalog made for star %d" %k) # Add the stamp to the full image. full_image[bounds] += star_stamp[bounds] time2 = time.time() tot_time = time2-time1 logger.info('Star %d: positioned relative to center, t=%f s', k, tot_time) try: g1_real=star_stamp.FindAdaptiveMom().observed_shape.g1 g2_real=star_stamp.FindAdaptiveMom().observed_shape.g2 except: g1_real = -9999. g2_real = -9999. this_var = -9999. sum_flux=numpy.sum(star_stamp.array) row = [ k,truth.x, truth.y, truth.ra, truth.dec, truth.g1_nopsf, truth.g2_nopsf, g1_real, g2_real, truth.fwhm, truth.final_sigmaSize, truth.nopsf_sigmaSize, truth.g1, truth.g2, truth.mu, truth.z, truth.flux, sum_flux, truth.variance] truth_catalog.addRow(row) except: logger.info('Star %d has failed, skipping...',k) pdb.set_trace() # We already have some noise in the image, but it isn't uniform. So the first thing to do is # to make the Gaussian noise uniform across the whole image. #max_current_variance = numpy.max(noise_image.array) #noise_image = max_current_variance - noise_image vn = galsim.VariableGaussianNoise(rng, noise_image) full_image.addNoise(vn) # Now max_current_variance is the noise level across the full image. We don't want to add that # twice, so subtract off this much from the intended noise that we want to end up in the image. #noise_variance -= max_current_variance # Now add Gaussian noise with this variance to the final image. noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance)) full_image.addNoise(noise) logger.info('Added noise to final output image') # Now write the image to disk. full_image.write(file_name) # Add a FLUXSCL keyword for later stacking this_hdu=astropy.io.fits.open(file_name) this_hdu[0].header['FLXSCALE'] = 300.0/exp_time this_hdu.writeto(file_name,overwrite='True') logger.info('Wrote image to %r',file_name) # Write truth catalog to file. truth_catalog.write(truth_file_name) i=i+1 logger.info('completed run %d for psf %s',i,psf_filen) logger.info('completed all images')
# Setup the image: fullImage = galsim.ImageF(xsize, ysize) fullImage.setOrigin(0,0) rng = galsim.BaseDeviate(randomSeed) noiseImage = galsim.ImageF(xsize, ysize) noiseImage.setOrigin(0,0) # image projection theta = 0.0 * galsim.degrees dudx = np.cos(theta.rad()) * pixelScale dudy = -np.sin(theta.rad()) * pixelScale dvdx = np.sin(theta.rad()) * pixelScale dvdy = np.cos(theta.rad()) * pixelScale imageCenter = fullImage.trueCenter() affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=imageCenter) skyCenter = galsim.CelestialCoord(ra=raCen*galsim.degrees, dec=decCen*galsim.degrees) wcs = galsim.TanWCS(affine, skyCenter, units=galsim.arcsec) fullImage.wcs = wcs # assume a constant Gaussian PSF and null shear for simulated galaxies psf = galsim.Gaussian(flux=1., fwhm=fwhmPSF) # psf = galsim.TopHat(flux=1., radius=fwhmPSF) g1, g2, mu = 0.0, 0.0, 1.0 # Loop on every galaxy nS = str(ngal) out_uids = np.zeros((ngal), dtype=int) out_gids = np.zeros((ngal), dtype=int) out_mags = np.zeros((ngal))
def main(argv): """ Make images using model PSFs and galaxy cluster shear: - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles (like in demo10) and parametric fits to those profiles. We choose 40% of the galaxies to use the images, and the other 60% to use the parametric fits - The real galaxy images include some initial correlated noise from the original HST observation. However, we whiten the noise of the final image so the final image has stationary Gaussian noise, rather than correlated noise. """ global logger logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) logger = logging.getLogger("mock_superbit_data") # Define some parameters we'll use below. # Normally these would be read in from some parameter file. global pixel_scale pixel_scale = 0.206 # arcsec/pixel global image_xsize image_xsize = 6665 # size of image in pixels global image_ysize image_ysize = 4453 # size of image in pixels global image_xsize_arcsec image_xsize_arcsec = image_xsize * pixel_scale # size of big image in each dimension (arcsec) global image_ysize_arcsec image_ysize_arcsec = image_ysize * pixel_scale # size of big image in each dimension (arcsec) global center_ra center_ra = 19.3 * galsim.hours # The RA, Dec of the center of the image on the sky global center_dec center_dec = -33.1 * galsim.degrees global exp_time exp_time = 300 global sky_bkg # mean sky background from AG's paper sky_bkg = 0.32 # ADU / s / pix global sky_sigma # standard deviation of sky background sky_sigma = 0.16 # ADU / s / pix global nobj nobj = 22 # number of galaxies in entire field global nstars nstars = 300 # number of stars in the entire field global flux_scaling global tel_diam tel_diam = 0.5 global lam lam = 625 # Central wavelength for Airy disk global optics psf_path = '/Users/jemcclea/Research/GalSim/examples/data/fpsc_flight_jitter_psf_oversampled_fixed_10x' global optics # will store the Zernicke component of the PSF global nfw # will store the NFWHalo information global cosmos_cat # will store the COSMOS catalog from which we draw objects global example_cat # also a COSMOS catalog which will contain cluster galaxies # Set up the NFWHalo: mass = 5E14 # Cluster mass (Msol/h) nfw_conc = 4 # Concentration parameter = virial radius / NFW scale radius nfw_z_halo = 0.17 # redshift of the halo omega_m = 0.3 # Omega matter for the background cosmology. omega_lam = 0.7 # Omega lambda for the background cosmology. nfw = galsim.NFWHalo(mass=mass, conc=nfw_conc, redshift=nfw_z_halo, omega_m=omega_m, omega_lam=omega_lam) logger.info('Set up NFW halo for lensing') # Read in galaxy catalog cat_file_name = 'real_galaxy_catalog_25.2.fits' dir = 'data/COSMOS_25.2_training_sample/' cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir) logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects) # Also read in example catalog example_cat_file_name = 'data/real_galaxy_catalog_23.5_example.fits' example_cat = galsim.COSMOSCatalog(example_cat_file_name) # The catalog returns objects that are appropriate for HST in 1 second exposures. So for our # telescope we scale up by the relative area, exposure time and pixel scale hst_eff_area = 2.4**2 * (1. - 0.33**2) sbit_eff_area = tel_diam**2 * (1. - 0.380**2) flux_scaling = (sbit_eff_area / hst_eff_area) * exp_time * (pixel_scale / .05)**2 ### Now create PSF. First, define Zernicke polynomial component lam_over_diam = lam * 1.e-9 / tel_diam # radians lam_over_diam *= 206265 # arcsec aberrations = [0.0] * 12 # Set the initial size. aberrations[4] = -0.00725859 # Noll index 4 = Defocus aberrations[5:7] = [0.0, -0.00] # Noll index 5,6 = Astigmatism aberrations[7:9] = [0.07, 0.00] # Noll index 7,8 = Coma aberrations[11] = 0.00133254 # Noll index 11 = Spherical logger.info('Calculated lambda over diam = %f arcsec', lam_over_diam) optics = galsim.OpticalPSF(lam_over_diam, obscuration=0.380, aberrations=aberrations) logger.info('Made telescope PSF profile') ### ### LOOP OVER PSFs TO MAKE GROUPS OF IMAGES ### WITHIN EACH PSF, ITERATE 5 TIMES TO MAKE 5 SEPARATE IMAGES ### all_psfs = glob.glob(psf_path + "/*247530*.psf") # this is 121s logger.info('Beginning loop over jitter/optical psfs') for psf_filen in all_psfs: logger.info('Beginning PSF %s...' % psf_filen) for i in numpy.arange(1, 2): logger.info('Beginning loop %d' % i) random_seed = 23058923781 rng = galsim.BaseDeviate(random_seed) # This is specific to Javier mock PSFs try: root = psf_filen.split('data/')[1].split('/')[0] timescale = psf_filen.split('_10x/')[1].split('.')[0] outname = ''.join([ 'mock_superbit_', root, timescale, str(i).zfill(3), '.fits' ]) truth_file_name = ''.join([ './output/truth_', root, timescale, str(i).zfill(3), '.dat' ]) file_name = os.path.join('output', outname) except: pdb.set_trace() # Setting up a truth catalog names = [ 'gal_num', 'x_image', 'y_image', 'ra', 'dec', 'g1_meas', 'g2_meas', 'nfw_mu', 'redshift', 'flux' ] types = [ int, float, float, float, float, float, float, float, float, float ] truth_catalog = galsim.OutputCatalog(names, types) # Set up the image: full_image = galsim.ImageF(image_xsize, image_ysize) sky_level = exp_time * sky_bkg full_image.fill(sky_level) full_image.setOrigin(0, 0) # We keep track of how much noise is already in the image from the RealGalaxies. noise_image = galsim.ImageF(image_xsize, image_ysize) noise_image.setOrigin(0, 0) # Make a slightly non-trivial WCS. We'll use a slightly rotated coordinate system # and center it at the image center. theta = 0.17 * galsim.degrees dudx = numpy.cos(theta) * pixel_scale dudy = -numpy.sin(theta) * pixel_scale dvdx = numpy.sin(theta) * pixel_scale dvdy = numpy.cos(theta) * pixel_scale image_center = full_image.true_center affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center) sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec) wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) full_image.wcs = wcs # Now let's read in the PSFEx PSF model. We read the image directly into an # InterpolatedImage GSObject, so we can manipulate it as needed psf_wcs = wcs psf_file = os.path.join(psf_path, psf_filen) psf = galsim.des.DES_PSFEx(psf_file, wcs=psf_wcs) logger.info('Constructed PSF object from PSFEx file') # Loop over galaxy objects: for k in range(nobj): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(random_seed + k + 1) try: # make single galaxy object stamp, truth = make_a_galaxy(ud=ud, wcs=wcs, psf=psf, affine=affine) # Find the overlapping bounds: bounds = stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info( 'Galaxy %d positioned relative to center t=%f s', k, tot_time) this_flux = numpy.sum(stamp.array) row = [ k, truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu, truth.z, this_flux ] truth_catalog.addRow(row) except: logger.info('Galaxy %d has failed, skipping...', k) pdb.set_trace() ###### Inject cluster galaxy objects: random_seed = 892465352 for k in range(50): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(random_seed + k + 1) try: # make single galaxy object cluster_stamp, truth = make_cluster_galaxy(ud=ud, wcs=wcs, psf=psf, affine=affine) # Find the overlapping bounds: bounds = cluster_stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += cluster_stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info( 'Cluster galaxy %d positioned relative to center t=%f s', k, tot_time) this_flux = numpy.sum(stamp.array) row = [ k, truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu, truth.z, this_flux ] truth_catalog.addRow(row) except: logger.info('Cluster galaxy %d has failed, skipping...', k) pdb.set_trace() #### ### Now repeat process for stars! #### random_seed_stars = 2308173501873 for k in range(nstars): time1 = time.time() ud = galsim.UniformDeviate(random_seed_stars + k + 1) star_stamp, truth = make_a_star(ud=ud, wcs=wcs, psf=psf, affine=affine) bounds = star_stamp.bounds & full_image.bounds # Add the stamp to the full image. try: full_image[bounds] += star_stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info( 'Star %d: positioned relative to center, t=%f s', k, tot_time) this_flux = numpy.sum(star_stamp.array) row = [ k, truth.x, truth.y, truth.ra, truth.dec, truth.g1, truth.g2, truth.mu, truth.z, this_flux ] truth_catalog.addRow(row) except: logger.info('Star %d has failed, skipping...', k) pass # If real-type COSMOS galaxies are used, the noise across the image won't be uniform. Since this code is # using parametric-type galaxies, the following section is commented out. # # The first thing to do is to make the Gaussian noise uniform across the whole image. max_current_variance = numpy.max(noise_image.array) noise_image = max_current_variance - noise_image vn = galsim.VariableGaussianNoise(rng, noise_image) full_image.addNoise(vn) # Now max_current_variance is the noise level across the full image. We don't want to add that # twice, so subtract off this much from the intended noise that we want to end up in the image. sky_sigma -= numpy.sqrt(max_current_variance) # Regardless of galaxy type, add Gaussian noise with this variance to the final image. this_noise_sigma = sky_sigma * exp_time noise = galsim.GaussianNoise(rng, sigma=this_noise_sigma) full_image.addNoise(noise) logger.debug('Added noise to final output image') full_image.write(file_name) # Write truth catalog to file. truth_catalog.write(truth_file_name) logger.info('Wrote image to %r', file_name) logger.info(' ') logger.info('completed run %d for psf %s', i, psf_filen) i = i + 1 logger.info(' ') logger.info(' ') logger.info('completed all images') logger.info(' ')
def _process_obs(self): """ add observations as interpolated images also keep track of psfs, variances, and noise realizations """ self.images = [] self.psfs = [] self.weights = np.zeros(len(self.observations)) self.noise_images = [] self._set_coadd_obs() for i, obs in enumerate(self.observations): offset = self._get_offsets(obs.meta['offset_pixels']) #print("offset:",offset) psf_offset = self._get_offsets(obs.psf.meta['offset_pixels']) #print("psf offset:",psf_offset) image_center = self.canonical_center + offset psf_image_center = self.psf_canonical_center + psf_offset # interplated image, shifted to center of the postage stamp jac = obs.jacobian wcs = galsim.TanWCS( affine=galsim.AffineTransform( jac.dudcol, jac.dudrow, jac.dvdcol, jac.dvdrow, origin=image_center, ), world_origin=self.sky_center, ) pjac = obs.psf.jacobian psf_wcs = galsim.TanWCS( affine=galsim.AffineTransform( pjac.dudcol, pjac.dudrow, pjac.dvdcol, pjac.dvdrow, origin=psf_image_center, ), world_origin=self.sky_center, ) image = galsim.InterpolatedImage( galsim.Image(obs.image, wcs=wcs), offset=offset, x_interpolant=self.interp, ) # always normalizing psf psf_image = obs.psf.image.copy() psf_image /= psf_image.sum() psf = galsim.InterpolatedImage( galsim.Image(psf_image, wcs=psf_wcs), offset=psf_offset, x_interpolant=self.interp, ) self.images.append(image) self.psfs.append(psf) # assume variance is constant wt = obs.weight.max() if self.weight_type == 'noise-fwhm': fwhm = measure_fwhm(psf_image) wt /= fwhm**4 self.weights[i] = wt # use input noise image noise_image = galsim.InterpolatedImage( galsim.Image(obs.noise, wcs=wcs), offset=offset, x_interpolant=self.interp, ) self.noise_images.append(noise_image) self.weights /= self.weights.sum()
def main(argv): """ Make images using model PSFs and galaxy cluster shear: - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles (like in demo10) and parametric fits to those profiles. We choose 40% of the galaxies to use the images, and the other 60% to use the parametric fits - The real galaxy images include some initial correlated noise from the original HST observation. However, we whiten the noise of the final image so the final image has stationary Gaussian noise, rather than correlated noise. """ logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) logger = logging.getLogger("mock_superbit_data") # Define some parameters we'll use below. # Normally these would be read in from some parameter file. global pixel_scale pixel_scale = 0.206 # arcsec/pixel global image_xsize image_xsize = 6665 # size of image in pixels global image_ysize image_ysize = 4453 # size of image in pixels global image_xsize_arcsec image_xsize_arcsec = image_xsize * pixel_scale # size of big image in each dimension (arcsec) global image_ysize_arcsec image_ysize_arcsec = image_ysize * pixel_scale # size of big image in each dimension (arcsec) global center_ra center_ra = 19.3 * galsim.hours # The RA, Dec of the center of the image on the sky global center_dec center_dec = -33.1 * galsim.degrees global exp_time exp_time = 3000 # exposing for 1500 seconds to match real, observed galaxy/flux count. global noise_variance noise_variance = 1.8e3 # ADU^2 (Just use simple Gaussian noise here.) -->150s #noise_variance = 2.55e3 # ADU^2 (Just use simple Gaussian noise here.) -->300s global sky_level sky_level = 51 # ADU / arcsec^2 -->150s #sky_level = 106 # ADU / arcsec^2 -->300s global nobj nobj = 1700 # number of galaxies in entire field -- an adjustment to ensure ~1100 detections global nstars nstars = 370 # number of stars in the entire field global flux_scaling # Let's figure out the flux for a 0.5 m class telescope global tel_diam tel_diam = 0.5 global lam lam = 587 # Central wavelength psf_path = '/Users/jemcclea/Research/GalSim/examples/data/fpsc_flight_jitter_psf_oversampled_fixed_10x' global optics # will store the Zernicke component of the PSF global nfw # will store the NFWHalo information global cosmos_cat # will store the COSMOS catalog from which we draw objects # Set up the NFWHalo: mass = 5E14 # Cluster mass (Msol/h) nfw_conc = 4 # Concentration parameter = virial radius / NFW scale radius nfw_z_halo = 0.3 # redshift of the halo nfw_z_source = 0.6 # redshift of the lensed sources omega_m = 0.3 # Omega matter for the background cosmology. omega_lam = 0.7 # Omega lambda for the background cosmology. field_g1 = 0.03 # The field shear is some cosmic shear applied to the whole field, field_g2 = 0.01 # taken to be behind the foreground NFW halo (not needed for now) nfw = galsim.NFWHalo(mass=mass, conc=nfw_conc, redshift=nfw_z_halo, omega_m=omega_m, omega_lam=omega_lam) logger.info('Set up NFW halo for lensing') # Read in galaxy catalog if True: # The catalog we distribute with the GalSim code only has 100 galaxies. # The galaxies will typically be reused several times here. cat_file_name = 'real_galaxy_catalog_23.5_example.fits' dir = 'data' cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir) else: # If you've run galsim_download_cosmos, you can leave out the cat_file_name and dir # to use the full COSMOS catalog with 56,000 galaxies in it. cosmos_cat = galsim.COSMOSCatalog() logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects) # The catalog returns objects that are appropriate for HST in 1 second exposures. So for our # telescope we scale up by the relative area and exposure time. hst_eff_area = 2.4**2 * (1. - 0.33**2) sbit_eff_area = tel_diam**2 * ( 1. - 0.10**2 ) # For want of something better, operating with 10% obscuration flux_scaling = (sbit_eff_area / hst_eff_area) * exp_time ### Now create PSF. First, define Zernicke polynomial component lam_over_diam = lam * 1.e-9 / tel_diam # radians lam_over_diam *= 206265 # arcsec aberrations = [0.0] * 12 # Set the initial size. aberrations[4] = -0.00725859 # Noll index 4 = Defocus aberrations[5:7] = [0.0, -0.00] # Noll index 5,6 = Astigmatism aberrations[7:9] = [0.07, 0.00] # Noll index 7,8 = Coma aberrations[11] = 0.00133254 # Noll index 11 = Spherical logger.info('Calculated lambda over diam = %f arcsec', lam_over_diam) optics = galsim.OpticalPSF(lam_over_diam, obscuration=0.10, aberrations=aberrations) logger.info('Made telescope PSF profile') ### ### LOOP OVER PSFs TO MAKE GROUPS OF IMAGES ### WITHIN EACH PSF, ITERATE 5 TIMES TO MAKE 5 SEPARATE IMAGES ### all_psfs = glob.glob(psf_path + "/*.psf") logger.info('Beginning loop over jitter/optical psfs') for psf_filen in all_psfs: logger.info('Beginning PSF %s...' % psf_filen) for i in numpy.arange(1, 6): logger.info('Beginning loop %d' % i) random_seed = scipy.random.randint(low=10000000, high=99999999) rng = galsim.BaseDeviate(random_seed) # This is specific to Javier mock PSFs try: root = psf_filen.split('data/')[1].split('/')[0] timescale = psf_filen.split('_10x/')[1].split('.')[0] outname = ''.join([ 'mock_superbit_', root, timescale, str(i).zfill(3), '.fits' ]) truth_file_name = ''.join([ './output/truth_', root, timescale, str(i).zfill(3), '.dat' ]) file_name = os.path.join('output', outname) except: pdb.set_trace() # Setting up a truth catalog names = [ 'gal_num', 'x_image', 'y_image', 'ra', 'dec', 'g1_meas', 'g2_meas', 'nfw_g1', 'nfw_g2', 'nfw_mu', 'redshift', 'flux' ] types = [ int, float, float, float, float, float, float, float, float, float, float, float ] truth_catalog = galsim.OutputCatalog(names, types) # Set up the image: full_image = galsim.ImageF(image_xsize, image_ysize) full_image.fill(sky_level) full_image.setOrigin(0, 0) # We keep track of how much noise is already in the image from the RealGalaxies. noise_image = galsim.ImageF(image_xsize, image_ysize) noise_image.setOrigin(0, 0) # Make a slightly non-trivial WCS. We'll use a slightly rotated coordinate system # and center it at the image center. theta = 0.17 * galsim.degrees dudx = numpy.cos(theta) * pixel_scale dudy = -numpy.sin(theta) * pixel_scale dvdx = numpy.sin(theta) * pixel_scale dvdy = numpy.cos(theta) * pixel_scale image_center = full_image.true_center affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center) sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec) wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) full_image.wcs = wcs # Now let's read in the PSFEx PSF model. We read the image directly into an # InterpolatedImage GSObject, so we can manipulate it as needed psf_wcs = wcs psf_file = os.path.join(psf_path, psf_filen) psf = galsim.des.DES_PSFEx(psf_file, wcs=psf_wcs) logger.info('Constructed PSF object from PSFEx file') # Loop over galaxy objects: for k in range(nobj): time1 = time.time() # The usual random number generator using a different seed for each galaxy. ud = galsim.UniformDeviate(random_seed + k + 1) try: # make single galaxy object stamp, truth = make_a_galaxy(ud=ud, wcs=wcs, psf=psf, affine=affine) # Find the overlapping bounds: bounds = stamp.bounds & full_image.bounds # We need to keep track of how much variance we have currently in the image, so when # we add more noise, we can omit what is already there. noise_image[bounds] += truth.variance # Finally, add the stamp to the full image. full_image[bounds] += stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info( 'Galaxy %d positioned relative to center t=%f s', k, tot_time) #g1_real=stamp.FindAdaptiveMom().observed_shape.g1 #g2_real=stamp.FindAdaptiveMom().observed_shape.g2 g1_real = -9999. g2_real = -9999. this_flux = numpy.sum(stamp.array) row = [ k, truth.x, truth.y, truth.ra, truth.dec, g1_real, g2_real, truth.g1, truth.g2, truth.mu, truth.z, this_flux ] truth_catalog.addRow(row) except: logger.info('Galaxy %d has failed, skipping...', k) #### ### Now repeat process for stars! #### random_seed_stars = scipy.random.randint(low=10000000, high=99999999) for k in range(nstars): time1 = time.time() ud = galsim.UniformDeviate(random_seed_stars + k + 1) star_stamp, truth = make_a_star(ud=ud, wcs=wcs, psf=psf, affine=affine) bounds = star_stamp.bounds & full_image.bounds # Add the stamp to the full image. try: full_image[bounds] += star_stamp[bounds] time2 = time.time() tot_time = time2 - time1 logger.info( 'Star %d: positioned relative to center, t=%f s', k, tot_time) #g1_real=star_stamp.FindAdaptiveMom().observed_shape.g1 --> no longer positive definite :-? #g2_real=star_stamp.FindAdaptiveMom().observed_shape.g2 g1_real = -9999. g2_real = -9999. this_flux = numpy.sum(star_stamp.array) row = [ k, truth.x, truth.y, truth.ra, truth.dec, g1_real, g2_real, truth.g1, truth.g2, truth.mu, truth.z, this_flux ] truth_catalog.addRow(row) except: logger.info('Star %d has failed, skipping...', k) # We already have some noise in the image, but it isn't uniform. So the first thing to do is # to make the Gaussian noise uniform across the whole image. max_current_variance = numpy.max(noise_image.array) noise_image = max_current_variance - noise_image vn = galsim.VariableGaussianNoise(rng, noise_image) full_image.addNoise(vn) # Now max_current_variance is the noise level across the full image. We don't want to add that # twice, so subtract off this much from the intended noise that we want to end up in the image. noise_variance -= max_current_variance # Now add Gaussian noise with this variance to the final image. noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance)) full_image.addNoise(noise) logger.info('Added noise to final large image') # Now write the image to disk. It is automatically compressed with Rice compression, # since the filename we provide ends in .fz. full_image.write(file_name) logger.info('Wrote image to %r', file_name) # Write truth catalog to file. truth_catalog.write(truth_file_name) # Compute some sky positions of some of the pixels to compare with the values of RA, Dec # that ds9 reports. ds9 always uses (1,1) for the lower left pixel, so the pixel coordinates # of these pixels are different by 1, but you can check that the RA and Dec values are # the same as what GalSim calculates. ra_str = center_ra.hms() dec_str = center_dec.dms() logger.info( 'Center of image is at RA %sh %sm %ss, DEC %sd %sm %ss', ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3], dec_str[3:5], dec_str[5:]) for (x, y) in [(0, 0), (0, image_xsize - 1), (image_ysize - 1, 0), (image_xsize - 1, image_ysize - 1)]: world_pos = wcs.toWorld(galsim.PositionD(x, y)) ra_str = world_pos.ra.hms() dec_str = world_pos.dec.dms() logger.info( 'Pixel (%4d, %4d) is at RA %sh %sm %ss, DEC %sd %sm %ss', x, y, ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3], dec_str[3:5], dec_str[5:]) logger.info( 'ds9 reports these pixels as (1,1), (1,2048), etc. with the same RA, Dec.' ) i = i + 1 logger.info(' ') logger.info('completed run %d for psf %s', i, psf_filen) logger.info('completed all images')
def main(argv): """ Make a simple image with a few galaxies. - Only galaxies. No stars. - PSF is Airy (Euclid-like) - Each galaxy is single sersic. - Noise is Gaussian using a specified sky value """ timei = time.time() logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout) logger = logging.getLogger("simulator") ###DEFINE CATALOGUE PARAMETERS### # loading the full catalogue # cat_path=argv[1] cat_path = 'data/EUCLID_TU_CATsel_v1.fits' cat_table = Table.read(cat_path, format='fits') # cat_data=cat_table.to_pandas() # slicing the catalogue into patches patchsize = 20 / 60 #(size of the patch is 1x1 arcmin) patches = patch_selection(cat_table, patchsize) # work with one patch at a time nobj = [] RAall = [] DECall = [] magall = [] rhDall = [] rhBall = [] nsersicall = [] axisratioall = [] ell1all = [] ell2all = [] shear1all = [] shear2all = [] diskangleall = [] # work with one patch at a time for patch in patches: nobj.append(len(patch)) RAall.append(patch['RA_MAG']) DECall.append(patch['DEC_MAG']) magall.append(patch['VIS']) rhDall.append(patch['DISK_LENGTH']) rhBall.append(patch['BULGE_LENGTH']) # nsersicll at the moment is not read. it will be assigned either 1 or 4 if galaxy is disk or elliptical # not sure if 'DISK_ANGLE' is in fact the fi angle that computes the ellipticity compontents axisratioall.append(patch['DISK_AXIS_RATIO']) ell1, ell2 = get_ell_12(patch['DISK_AXIS_RATIO'], patch['DISK_ANGLE']) ell1all.append(ell1) ell2all.append(ell2) shear1all.append(patch['GAMMA1']) shear2all.append(patch['GAMMA2']) diskangleall.append(patch['DISK_ANGLE']) ###DEFINE IMAGE PARAMETERS### num = argv[1] #number to appear in the image name random_seed = 8241574 pixel_scale = 0.1 # arcsec / pixel (size units in input catalog are pixels) xsize = 128 # pixels ysize = 128 # pixels image_size = np.int(62 * 22 / 0.1) # pixels t_exp = 3 * 565 #s gain = 3.1 #e-/ADU readoutnoise = 4.2 #e- sky_bkg = 22.35 #mag/arcsec2 ZP = 24.0 #mag F_sky = pixel_scale**(2) * t_exp * 10**(-(sky_bkg - ZP) / 2.5) #e-/pixel noise_variance = ( numpy.sqrt(((readoutnoise)**2 + F_sky)) * 1 / gain)**2 #e- -> ADU by dividing sigma by gain ; sigma = 4.9ADU ###### ###DISPLAY IMAGE INFO### logger.info('\nStarting simulator using:') logger.info(' - pixel scale = %.2f arcsec', pixel_scale) logger.info(' - Image size = %.0f pixels', image_size) logger.info(' - Image ZP = %.2f mag', ZP) logger.info(' - Image exposure time = %.0f s', t_exp) logger.info(' - Image gain = %.2f e-/ADU', gain) logger.info('\n - Sky background = %.2f mag/arcsec2', sky_bkg) logger.info(' - Read-out noise = %.1f e-', readoutnoise) logger.info(' - Gaussian noise (sigma = %.2f ADU/pixel)', numpy.sqrt(noise_variance)) logger.info( '\n - Airy PSF (lam=600,700,800, diam=1.2, obscuration=0.3)') logger.info(' - Sersic galaxies') logger.info(' - Number of galaxies = %.0f\n', nobj) ###### ###MAKE THE WCS COORDINATES (test11)### # Make a slightly non-trivial WCS. We'll use a slightly rotated coordinate system # and center it at the image center. theta = 0.17 * galsim.degrees #dudx = math.cos(theta.rad()) * pixel_scale #dudy = -math.sin(theta.rad()) * pixel_scale #dvdx = math.sin(theta.rad()) * pixel_scale #dvdy = math.cos(theta.rad()) * pixel_scale dudx = numpy.cos(theta) * pixel_scale dudy = -numpy.sin(theta) * pixel_scale dvdx = numpy.sin(theta) * pixel_scale dvdy = numpy.cos(theta) * pixel_scale # image_center = full_image.true_center # affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center) # # We can also put it on the celestial sphere to give it a bit more realism. # # The TAN projection takes a (u,v) coordinate system on a tangent plane and projects # # that plane onto the sky using a given point as the tangent point. The tangent # # point should be given as a CelestialCoord. # sky_center = galsim.CelestialCoord(ra=3.544151*galsim.hours, dec=-27.791371*galsim.degrees) # # The third parameter, units, defaults to arcsec, but we make it explicit here. # # It sets the angular units of the (u,v) intermediate coordinate system. # wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) # full_image.wcs = wcs ###### ###TUNE THE SPEED OF FFT### #slightly decrease the precision on fourrier and convolution to speed up. #Taken from Jarvis discussion https://github.com/GalSim-developers/GalSim/issues/566 gsparams = galsim.GSParams(xvalue_accuracy=2.e-4, kvalue_accuracy=2.e-4, maxk_threshold=5.e-3, folding_threshold=1.e-2) ###### ###BUILD PSF### psf = galsim.Airy(lam=800, diam=1.2, obscuration=0.3, scale_unit=galsim.arcsec, flux=1. / 3) + galsim.Airy(lam=700, diam=1.2, obscuration=0.3, scale_unit=galsim.arcsec, flux=1. / 3) + galsim.Airy( lam=600, diam=1.2, obscuration=0.3, scale_unit=galsim.arcsec, flux=1. / 3) # ###uncomment to write the PSF # logger.info('\nWriting PSF') # image = galsim.ImageF(xsize,ysize,scale=pixel_scale) # psf.drawImage(image=image) # image.write('psf_nonoise.fits') # rng = galsim.BaseDeviate(random_seed) # noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance)*1./1000) # image.addNoise(noise) # image.write('psf_onethousands_noise.fits') # logger.info('PSF written in psf_nonoise.fits and psf_onethousands_noise.fits') # ### ####### ###PAINT GALAXIES### timeigal = time.time() logger.info('\n\nStarting to simulate galaxies') # go over the patches # for p in range(len(patches)): for p in range(1): ###CREATE OUTPUT IMAGES### file_name = 'output/sim_patch-%s_nonoise.fits' % (p) file_name_noise = 'output/sim_patch-%s_noise.fits' % (p) full_image = galsim.ImageF(image_size, image_size) full_image.setOrigin(1, 1) image_center = full_image.true_center affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center) ra_cent = np.mean(RAall[p]) dec_cent = np.mean(DECall[p]) sky_center = galsim.CelestialCoord(ra=ra_cent * galsim.degrees, dec=dec_cent * galsim.degrees) print('\n sky center', sky_center) wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec) full_image.wcs = wcs logger.info('Image %r and %r created', file_name, file_name_noise) # at the moment the center of the image and its wcs are the same for each patch. Realistically this should change ###### stamps_noise = [] stamps_nonoise = [] galaxy_params = [] for k in range(nobj[p]): # for k in range(300): #Read galaxy parameters from catalog RA = RAall[p][k] DEC = DECall[p][k] # from RA,DEC get pixel position on the image world_pos = galsim.CelestialCoord(RA * galsim.degrees, DEC * galsim.degrees) image_pos = wcs.toImage(world_pos) ## if disk galaxy, rh is the disk length and sersic is 1 ## if elliptical galaxy, rh is bulge lenght and sersic is 4 axisratio = axisratioall[p][k] if rhDall == 0: half_light_radius = rhBall[p][k] * np.sqrt(axisratio) #* 3.459 nsersic = 4 else: half_light_radius = rhDall[p][k] * np.sqrt(axisratio) * 1.7 nsersic = 1 mag = magall[p][k] ell1 = ell1all[p][k] ell2 = ell2all[p][k] # the shear for a galaxy is the mean shear of the patch shear1 = shear1all[p][k] #np.mean(shear1all[p]) shear2 = shear2all[p][k] #np.mean(shear2all[p]) diskangle = diskangleall[p][k] # ### DISPLAY INFO FOR GALAXIES IN PATCH### # logger.info('\n - world_pos', world_pos) # logger.info(' -image_pos', image_pos) # logger.info(' - Patch', p) # logger.info(' - Galaxy ', k) # logger.info(' - position RA,DEC %.3f,%.3f', RA, DEC) # logger.info(' - magnitude %.2f', mag) # logger.info(' - half-light radius %.2f', half_light_radius) # logger.info(' - sersic index', nsersic) # logger.info(' - ellipticity %.4f,%.4f', ell1,ell2) # logger.info(' - shear %.4f,%.4f\n', shear1,shear2) #Galaxy is a sersic profile: fluxflux = t_exp / gain * 10**(-(mag - ZP) / 2.5) gal = galsim.Sersic(n=nsersic, half_light_radius=half_light_radius, flux=fluxflux, gsparams=gsparams, trunc=half_light_radius * 4.5) gal = gal.shear(e1=ell1, e2=ell2) gal = gal.shear(g1=shear1, g2=shear2) #Rotate galaxy ang = diskangle gal = gal.rotate(theta=ang * galsim.degrees) #convolve galaxy with PSF final = galsim.Convolve([psf, gal]) # final = gal #offset the center for pixelization (of random fraction of half a pixel) ud = galsim.UniformDeviate(random_seed + k) x_nominal = image_pos.x + 0.5 y_nominal = image_pos.y + 0.5 ix_nominal = int(math.floor(x_nominal + 0.5)) iy_nominal = int(math.floor(y_nominal + 0.5)) dx = (x_nominal - ix_nominal) * (2 * ud() - 1) dy = (y_nominal - iy_nominal) * (2 * ud() - 1) offset = galsim.PositionD(dx, dy) #draw galaxy image = galsim.ImageF(xsize, ysize, scale=pixel_scale) final.drawImage(image=image, wcs=wcs.local(image_pos), offset=offset) image.setCenter(ix_nominal, iy_nominal) image_noise = galsim.Image(image, dtype=numpy.float64, copy=False) #add stamps to single image bounds = image.bounds & full_image.bounds full_image[bounds] += image[bounds] # save the stamps of each galaxy with no noise first stamps_path = 'output/stamps/galaxy_ns' + str( nsersic) + '_stamp_' + str(p) + '-' + str(k) # image.write(stamps_path+'_nonoise.fits') stamps_nonoise.append(image) # ## add noise rng_gal = galsim.BaseDeviate(random_seed) noise_gal = galsim.GaussianNoise(rng_gal, sigma=math.sqrt(noise_variance)) image_noise.addNoise(noise_gal) # image_noise.write(stamps_path+'_noise.fits') stamps_noise.append(image_noise) # save the parameters of the galaxies galaxy_params.append( np.array([ ell1, ell2, shear1, shear2, mag, half_light_radius, axisratio, nsersic, diskangle ])) np.save('output/galaxy_stamps_nonoise_p' + str(p) + '.npy', stamps_nonoise) np.save('output/galaxy_stamps_noise_p' + str(p) + '.npy', stamps_noise) np.save('output/galaxy_params_p' + str(p) + '.npy', galaxy_params) timegal = time.time() logger.info('%d galaxies computed in t=%.2f s', k + 1, timegal - timeigal) ###### ###WRITE THE FITS FILE BEFORE NOISE### full_image.write(file_name) logger.info('Image without noise written to fits file %r', file_name) ###### ###ADD NOISE### #add Gaussian noise rng = galsim.BaseDeviate(random_seed) noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance)) full_image.addNoise(noise) ###### ###WRITE THE FITS FILE WITH NOISE### full_image.write(file_name_noise) logger.info('Image with noise written to fits file %r', file_name_noise) ###### timef = time.time() tot_time = timef - timegal logger.info('Noise added and image written to files in t=%.2f s', tot_time) tot_time = timef - timei logger.info('\nFull simulation run in t=%.2f s', tot_time)