def test_vk_delta(): """Test a VonKarman with a significant delta-function amplitude""" kwargs = {'lam': 1100.0, 'r0': 0.8, 'L0': 5.0, 'flux': 2.2} # Try to see if we can catch the warning first with assert_warns(galsim.GalSimWarning): vk = galsim.VonKarman(**kwargs) kwargs['suppress_warning'] = True vk = galsim.VonKarman(**kwargs) do_pickle(vk) # This profile has more than 15% of its flux in the delta-function component. assert vk.delta_amplitude > 0.15 * vk.flux # If do_delta is False (the default), then the asymptotic kValue should still be zero. np.testing.assert_almost_equal(vk.kValue(1e10, 0).real, 0.0) # But if we use do_delta=True, then the asymptotic kValue should be that of the delta function. vkd = galsim.VonKarman(do_delta=True, **kwargs) do_pickle(vkd) np.testing.assert_almost_equal( vkd.kValue(1e10, 0).real, vkd.delta_amplitude) # Either way, the fluxes should be the same. np.testing.assert_almost_equal(vk.flux, vkd.flux) assert vk != vkd # The half-light-radius of the profile with do_delta=True should be smaller though, as we're # accounting for the 15% flux at r=0 in this case assert vkd.half_light_radius < vk.half_light_radius
def test_vk_r0(): """Test a special r0 value that resulted in an error, reported in issue #957. Note: the resolution of the bug was to add explicit split points for the first several j0 zeros. Without that, the integral in rawXValue can spuriously fail badly, leading to an invalid estimate of the total integrated flux within R=pi/stepk. Update: With the new Ogata method for doing the Hankel transform, this seems no longer to be necessary. However, we continue to test these r values anyway. """ # The first one was issue #957. # Aaron Roodman ran across another, which is now included here as well. r0_list = [0.146068884, 0.16879518207956518] for r0 in r0_list: vk = galsim.VonKarman(L0=25., lam=700., r0=r0) check_basic(vk, "VonKarman, r0=%s" % r0) if __name__ == '__main__': # Josh then tried a bunch more random triples of r0_500, lam, L0 to find more failures, # which are given in input/vk_fail.txt. r0_500_list, lam_list, L0_list = np.loadtxt('input/vk_fail.txt').T for r0_500, lam, L0 in zip(r0_500_list, lam_list, L0_list): print(r0_500, lam, L0) vk = galsim.VonKarman(L0=L0, lam=lam, r0_500=r0_500)
def test_vk_fitting_formulae(): # lam, r0_500, L0 params = [(650, 0.15, 10.0), (450, 0.12, 25.0), (900, 0.18, 100.0)] def predicted_FWHM_ratio(r0, L0): """Fitting formula for VonKarman FWHM / Kolmogorov FWHM from Martinez++2014 """ return np.sqrt(1 - 2.183 * (r0 / L0)**0.356) def predicted_HLR_ratio(r0, L0): """Fitting formula for VonKarman HLR / Kolmogorov HLR from Martinez++2014 """ return np.sqrt(1 - 1.534 * (r0 / L0)**0.347) for lam, r0_500, L0 in params: print(lam, r0_500, L0) r0 = r0_500 * (lam / 500.0)**(6. / 5) kolm = galsim.Kolmogorov(lam=lam, r0=r0) vk = galsim.VonKarman(lam=lam, r0=r0, L0=L0) vk2 = galsim.VonKarman(lam=lam, r0_500=r0_500, L0=L0) np.testing.assert_allclose(vk.r0, vk2.r0) np.testing.assert_allclose(vk.r0_500, vk2.r0_500) for prof in [vk, vk2]: HLR_ratio = prof.calculateHLR() / kolm.calculateHLR() FWHM_ratio = prof.calculateFWHM() / kolm.calculateFWHM() print(HLR_ratio) print(FWHM_ratio) np.testing.assert_allclose(HLR_ratio, predicted_HLR_ratio(r0, L0), rtol=0.015) np.testing.assert_allclose(FWHM_ratio, predicted_FWHM_ratio(r0, L0), rtol=0.015)
def test_vk_shoot(): """Test VonKarman with photon shooting. Particularly the flux of the final image. """ rng = galsim.BaseDeviate(1234) obj = galsim.VonKarman(lam=500, r0=0.2, flux=1.e4) im = galsim.Image(100, 100, scale=1) im.setCenter(0, 0) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng) print('obj.flux = ', obj.flux) print('added_flux = ', added_flux) print('photon fluxes = ', photons.flux.min(), '..', photons.flux.max()) print('image flux = ', im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) obj = galsim.VonKarman(lam=500, r0=0.2, L0=10., flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng) print('obj.flux = ', obj.flux) print('added_flux = ', added_flux) print('photon fluxes = ', photons.flux.min(), '..', photons.flux.max()) print('image flux = ', im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) obj = galsim.VonKarman(lam=700, r0=0.02, L0=10., flux=1.e4) im = galsim.Image(500, 500, scale=1) im.setCenter(0, 0) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng) print('obj.flux = ', obj.flux) print('added_flux = ', added_flux) print('photon fluxes = ', photons.flux.min(), '..', photons.flux.max()) print('image flux = ', im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux)
def test_vk(slow=False): """Test the generation of VonKarman profiles """ if slow: lams = [300.0, 500.0, 1100.0] r0_500s = [0.05, 0.15, 0.3] L0s = [1e10, 25.0, 10.0] do_deltas = [False, True] else: lams = [500.0] r0_500s = [0.2] L0s = [25.0] do_deltas = [False] for lam in lams: for r0_500 in r0_500s: r0 = r0_500 * (lam / 500)**(6. / 5) for L0 in L0s: for do_delta in do_deltas: kwargs = { 'lam': lam, 'r0': r0, 'L0': L0, 'do_delta': do_delta } print(kwargs) delta_amp = np.exp(-0.5 * 0.172629 * (r0 / L0)**(-5. / 3.)) if delta_amp > 1.e-3: print( "Skip this combination, since delta > maxk_threshold" ) continue vk = galsim.VonKarman(flux=2.2, **kwargs) np.testing.assert_almost_equal(vk.flux, 2.2) gsp = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) vk2 = galsim.VonKarman(flux=2.2, gsparams=gsp, **kwargs) assert vk2 != vk assert vk2 == vk.withGSParams(gsp) assert vk2 == vk.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) check_basic(vk, "VonKarman") do_pickle(vk) img = galsim.Image(16, 16, scale=0.25) if not do_delta: do_shoot(vk, img, "VonKarman") do_kvalue(vk, img, "VonKarman") with np.testing.assert_raises(galsim.GalSimIncompatibleValuesError): vk = galsim.VonKarman(lam=500, r0=0.1, r0_500=0.2) with np.testing.assert_raises(galsim.GalSimIncompatibleValuesError): vk = galsim.VonKarman(lam=500)
def test_vk_shoot(): """Test VonKarman with photon shooting. Particularly the flux of the final image. """ rng = galsim.BaseDeviate(1234) obj = galsim.VonKarman(lam=500, r0=0.2, flux=1.e4) im = galsim.Image(100, 100, scale=1) im.setCenter(0, 0) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ', obj.flux) print('added_flux = ', added_flux) print('photon fluxes = ', photons.flux.min(), '..', photons.flux.max()) print('image flux = ', im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "VonKarman makePhot not equivalent to drawPhot" obj = galsim.VonKarman(lam=500, r0=0.2, L0=10., flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ', obj.flux) print('added_flux = ', added_flux) print('photon fluxes = ', photons.flux.min(), '..', photons.flux.max()) print('image flux = ', im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "VonKarman makePhot not equivalent to drawPhot" obj = galsim.VonKarman(lam=700, r0=0.02, L0=10., flux=1.e4) im = galsim.Image(500, 500, scale=1) im.setCenter(0, 0) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ', obj.flux) print('added_flux = ', added_flux) print('photon fluxes = ', photons.flux.min(), '..', photons.flux.max()) print('image flux = ', im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng.duplicate()) assert photons2 == photons, "VonKarman makePhot not equivalent to drawPhot" # Can treat the profile as a convolution of a delta function and put it in a photon_ops list. delta = galsim.DeltaFunction(flux=1.e4) psf = galsim.VonKarman(lam=700, r0=0.02, L0=10.) photons3 = delta.makePhot(poisson_flux=False, rng=rng.duplicate(), photon_ops=[psf]) assert photons3 == photons, "Using VonKarman in photon_ops not equivalent to drawPhot"
def test_vk_gsp(): """Test that we can construct a vK with non-standard folding_threshold. """ # default folding_threshold is 5e-3. # We can't go too much smaller than this for such a flat asymptotic profile, but check a little # bit further works. gsp1 = galsim.GSParams(folding_threshold=1e-2) gsp2 = galsim.GSParams(folding_threshold=2e-3) # Just testing that these construct successfully galsim.VonKarman(lam=700, r0=0.1, L0=24.3, gsparams=gsp1) galsim.VonKarman(lam=700, r0=0.1, L0=24.3, gsparams=gsp2)
def test_vk_scale(): """Test vk scale argument""" kwargs = {'lam':500, 'r0':0.2, 'L0':25.0, 'flux':2.2} vk_arcsec = galsim.VonKarman(scale_unit=galsim.arcsec, **kwargs) vk_arcmin = galsim.VonKarman(scale_unit='arcmin', **kwargs) do_pickle(vk_arcmin) np.testing.assert_almost_equal(vk_arcsec.flux, vk_arcmin.flux) np.testing.assert_almost_equal(vk_arcsec.kValue(0.0, 0.0), vk_arcmin.kValue(0.0, 0.0)) np.testing.assert_almost_equal(vk_arcsec.kValue(0.0, 10.0), vk_arcmin.kValue(0.0, 600.0)) np.testing.assert_almost_equal(vk_arcsec.xValue(0.0, 6.0), vk_arcmin.xValue(0.0, 0.1)) img1 = vk_arcsec.drawImage(nx=32, ny=32, scale=0.2) img2 = vk_arcmin.drawImage(nx=32, ny=32, scale=0.2/60.0) np.testing.assert_almost_equal(img1.array, img2.array)
def test_vk(slow=False): """Test the generation of VonKarman profiles """ if slow: lams = [300.0, 500.0, 1100.0] r0_500s = [0.05, 0.15, 0.3] L0s = [1e10, 25.0, 10.0] do_deltas = [False, True] else: lams = [500.0] r0_500s = [0.2] L0s = [25.0] do_deltas = [False] for lam in lams: for r0_500 in r0_500s: r0 = r0_500*(lam/500)**(6./5) for L0 in L0s: for do_delta in do_deltas: kwargs = {'lam':lam, 'r0':r0, 'L0':L0, 'do_delta':do_delta} print(kwargs) delta_amp = np.exp(-0.5*0.172629*(r0/L0)**(-5./3.)) if delta_amp > 1.e-3: print("Skip this combination, since delta > maxk_threshold") continue vk = galsim.VonKarman(flux=2.2, **kwargs) np.testing.assert_almost_equal(vk.flux, 2.2) check_basic(vk, "VonKarman") do_pickle(vk) img = galsim.Image(16, 16, scale=0.2) if not do_delta: do_shoot(vk, img, "VonKarman") do_kvalue(vk, img, "VonKarman")
def test_vk_force_stepk(): """Check that manually forcing stepk works""" vk1 = galsim.VonKarman(r0_500=0.1, L0=25.0, lam=750.0) vk2 = galsim.VonKarman(r0_500=0.1, L0=25.0, lam=750.0, force_stepk=10.0) # Make sure we get expected stepk assert vk1.stepk != vk2.stepk assert vk2.stepk == 10.0 # Many products will actually be the same for both # Asking for the half_light_radius or xValue will trigger the table build, # which is identical for each. assert vk1.half_light_radius == vk2.half_light_radius assert vk1.xValue(0, 1) == vk2.xValue(0, 1) # Images will be the same if you assert specific bounds img1 = vk1.drawImage(nx=50, ny=50, scale=0.2, method='fft') img2 = vk2.drawImage(nx=50, ny=50, scale=0.2, method='fft') np.testing.assert_equal(img1.array, img2.array) # Though "goodImageSize" will differ. assert vk1.getGoodImageSize(0.2) != vk2.getGoodImageSize(0.2) # Can we pickle? do_pickle(vk2) do_pickle(vk2, lambda obj: obj.stepk) check_basic(vk2, 'vk2', do_x=False) # x fails b/c stamp size is bad img = galsim.Image(50, 50, scale=0.2) do_shoot(vk2, img, "VonKarman") # Check works with scale vk3 = galsim.VonKarman(r0_500=0.1, L0=25.0, lam=750.0, force_stepk=10.0, scale_unit=galsim.radians) assert vk3.stepk == 10.0 assert vk3.scale_unit == galsim.radians # force_stepk is retained through a reflux vk4 = vk3.withFlux(11.0) assert vk4.flux == 11.0 assert vk3.force_stepk == vk4.force_stepk
def test_vk_r0(): """Test a special r0 value that resulted in an error, reported in issue #957. """ r0 = 0.146068884 vk = galsim.VonKarman(L0=25., lam=700., r0=r0) # Note: the resolution of the bug was to add explicit split points for the first several # j0 zeros. Without that, the integral in rawXValue can spuriously fail badly, leading to # an invalid estimate of the total integrated flux within R=pi/stepk. check_basic(vk, "VonKarman, r0=%s" % r0)
def test_vk_ne(): gsp = galsim.GSParams(maxk_threshold=1.1e-3, folding_threshold=5.1e-3) objs = [galsim.VonKarman(lam=500.0, r0=0.2), galsim.VonKarman(lam=500.0, r0=0.2, L0=20.0), galsim.VonKarman(lam=500.0, r0=0.2, L0=20.0, flux=2.2), galsim.VonKarman(lam=500.0, r0=0.2, L0=1e11), galsim.VonKarman(lam=550.0, r0=0.1, L0=20.0), galsim.VonKarman(lam=550.0, r0=0.1, L0=20.0, do_delta=True), galsim.VonKarman(lam=550.0, r0=0.1, L0=20.0, scale_unit=galsim.arcmin), galsim.VonKarman(lam=550.0, r0=0.1, L0=20.0, gsparams=gsp)] all_obj_diff(objs)
def test_limiting_cases(): """SecondKick has some two interesting limiting cases. A) When kcrit = 0, SecondKick = Convolve(Airy, VonKarman). B) When kcrit = inf, SecondKick = Airy Test these. """ lam = 500.0 r0 = 0.2 diam = 8.36 obscuration = 0.61 # First kcrit=0 sk = galsim.SecondKick(lam, r0, diam, obscuration, kcrit=0.0) limiting_case = galsim.Convolve( galsim.VonKarman(lam, r0, L0=1.e8), galsim.Airy(lam=lam, diam=diam, obscuration=obscuration) ) print(sk.stepk, sk.maxk) print(limiting_case.stepk, limiting_case.maxk) for k in [0.0, 0.1, 0.3, 1.0, 3.0, 10.0, 20.0]: print(sk.kValue(0, k).real, limiting_case.kValue(0, k).real) np.testing.assert_allclose( sk.kValue(0, k).real, limiting_case.kValue(0, k).real, rtol=1e-3, atol=1e-4) # Normally, one wouldn't use SecondKick.xValue, since it does a real-space convolution, # so it's slow. But we do allow it, so test it here. import time t0 = time.time() xv_2k = sk.xValue(0,0) print("xValue(0,0) = ",xv_2k) t1 = time.time() # The VonKarman * Airy xValue is much slower still, so don't do that. # Instead compare it to the 'sb' image. xv_image = limiting_case.drawImage(nx=1,ny=1,method='sb',scale=0.1)(1,1) print('from image ',xv_image) t2 = time.time() print('t = ',t1-t0, t2-t1) np.testing.assert_almost_equal(xv_2k, xv_image, decimal=3) # kcrit=inf sk = galsim.SecondKick(lam, r0, diam, obscuration, kcrit=np.inf) limiting_case = galsim.Airy(lam=lam, diam=diam, obscuration=obscuration) for k in [0.0, 0.1, 0.3, 1.0, 3.0, 10.0, 20.0]: print(sk.kValue(0, k).real, limiting_case.kValue(0, k).real) np.testing.assert_allclose( sk.kValue(0, k).real, limiting_case.kValue(0, k).real, rtol=1e-3, atol=1e-4)
def test_vk_eq_kolm(): lam = 500.0 r0 = 0.2 L0 = 1e10 # Need to make this surprisingly large to make vk -> kolm. flux = 3.3 kolm = galsim.Kolmogorov(lam=lam, r0=r0, flux=flux) vk = galsim.VonKarman(lam=lam, r0=r0, L0=L0, flux=flux) np.testing.assert_allclose(kolm.xValue(0,0), vk.xValue(0,0), rtol=1e-3, atol=0) kolm_img = kolm.drawImage(nx=24, ny=24, scale=0.2) vk_img = vk.drawImage(nx=24, ny=24, scale=0.2) np.testing.assert_allclose(kolm_img.array, vk_img.array, atol=flux*4e-5, rtol=0)
def test_structure_function(): """Test that AtmosphericScreen generates the right structure function. """ if __name__ == '__main__': L0s = [10.0, 25.0, 100.0] screen_size = 300.0 else: L0s = [10.0] screen_size = 100.0 rng = galsim.BaseDeviate(4815162342) lam = 500.0 r0_500 = 0.2 screen_scale = 0.05 for L0 in L0s: screen = galsim.AtmosphericScreen(screen_size=screen_size, screen_scale=screen_scale, r0_500=r0_500, L0=L0, rng=rng) screen.instantiate() vk = galsim.VonKarman(lam=lam, r0=r0_500 * (lam / 500.0)**1.2, L0=L0) phase = screen._tab2d.getVals( )[:-1, :-1] * 2 * np.pi / 500.0 # nm -> radians var = np.var(phase) # Conan 2008 eq 16 # 0.0863 ~= Gamma(11/6) Gamma(5/6) / (2 pi^(8/3)) (24/5 Gamma(6/5))^(5/6) expected_var = 0.0863 * (r0_500 / L0)**(-5 / 3.) np.testing.assert_allclose( var, expected_var, rtol=0.025, err_msg="Simulated variance disagrees with expected variance.") im = galsim.Image(phase, scale=screen_scale) D_sim = galsim.utilities.structure_function(im) print("r D_VK D_sim") for r in [0.1, 1.0, 10.0]: analytic_SF = vk._structure_function(r) simulated_SF = D_sim(r) print(r, analytic_SF, simulated_SF) np.testing.assert_allclose( analytic_SF, simulated_SF, rtol=0.05, err_msg="Simulated structure function not close to prediction." )
def vk_benchmark(): import time t0 = time.time() vk = galsim.VonKarman(lam=700, r0=0.1, L0=24.3) vk.drawImage(nx=16, ny=16, scale=0.2) t1 = time.time() print("Time to create/draw first time: {:6.3f}s".format(t1-t0)) # ~0.7s for i in range(10): vk.drawImage(nx=16, ny=16, scale=0.2) t2 = time.time() print("Time to draw 10 more: {:6.3f}s".format(t2-t1)) # ~0.07s for i in range(100): vk.drawImage(nx=16, ny=16, scale=0.2, method='phot', n_photons=50000) t3 = time.time() print("Time to photon-shoot 100 more with 50000 photons each: {:6.3f}s".format(t3-t2)) # ~0.9s
def test_structure_function(): """Test that SecondKick structure function is equivalent to vonKarman structure function when kcrit=0. This is nontrivial since the SecondKick structure function is numerically integrated, while the vK structure function is evaluated analytically. """ lam = 500.0 r0 = 0.2 diam = 8.36 obscuration = 0.61 sk = galsim.SecondKick(lam, r0, diam, obscuration, kcrit=0.0) vk = galsim.VonKarman(lam, r0, L0=1.e10) for rho in [0.01, 0.03, 0.1, 0.3, 1.0, 3.0]: sksf = sk._structure_function(rho / r0) vksf = vk._structure_function(rho) print(sksf, vksf) np.testing.assert_allclose(sksf, vksf, rtol=2e-3, atol=1.e-3)
def getProfile(self, params): """Get a version of the model as a GalSim GSObject :param params: A np array with [z4, z5, z6...z11] :returns: a galsim.GSObject instance """ prof = [] # gaussian if self.sigma is not None: gaussian = galsim.Gaussian(sigma=self.sigma) prof.append(gaussian) # atmosphere if len(self.atm_kwargs) > 0: if 'L0' in self.atm_kwargs and self.atm_kwargs['L0'] is not None: atm = galsim.VonKarman(**self.atm_kwargs) else: atm = galsim.Kolmogorov(**self.atm_kwargs) prof.append(atm) # optics if params is None or len(params) == 0: # no aberrations. Just the basic opt_kwargs optics = galsim.OpticalPSF(**self.opt_kwargs) else: aberrations = [0, 0, 0, 0] + list(params) optics = galsim.OpticalPSF(aberrations=aberrations, **self.opt_kwargs) # convolve together prof.append(optics) if len(prof) == 1: prof = prof[0] else: prof = galsim.Convolve(prof) if self.g1 is not None or self.g2 is not None: prof = prof.shear(g1=self.g1, g2=self.g2) return prof
def test_phase_gradient_shoot(): """Test that photon-shooting PSFs match Fourier optics PSFs when using the same phase screens, and also match the expected size from an analytic VonKarman-convolved-with-Airy PSF. """ # Make the atmosphere seed = 12345 r0_500 = 0.15 # m L0 = 20.0 # m nlayers = 6 screen_size = 102.4 # m # Ideally, we'd use as small a screen scale as possible here. The runtime for generating # phase screens scales like `screen_scale`^-2 though, which is pretty steep, so we use a larger- # than-desireable scale for the __name__ != '__main__' branch. This is known to lead to a bias # in PSF size, which we attempt to account for below when actually comparing FFT PSF moments to # photon-shooting PSF moments. Note that we don't need to apply such a correction when # comparing the photon-shooting PSF to the analytic VonKarman PSF since these both avoid the # screen_scale problem to begin with. (Even though we do generate screens for the # photon-shooting PSF, because we truncate the power spectrum above kcrit, we don't require as # high of resolution). if __name__ == '__main__': screen_scale = 0.025 # m else: screen_scale = 0.1 # m max_speed = 20 # m/s rng = galsim.BaseDeviate(seed) u = galsim.UniformDeviate(rng) # Use atmospheric weights from 1998 Gemini site selection process as something reasonably # realistic. (Ellerbroek 2002, JOSA Vol 19 No 9). Ellerbroek_alts = [0.0, 2.58, 5.16, 7.73, 12.89, 15.46] # km Ellerbroek_weights = [0.652, 0.172, 0.055, 0.025, 0.074, 0.022] Ellerbroek_interp = galsim.LookupTable( Ellerbroek_alts, Ellerbroek_weights, interpolant='linear') alts = np.max(Ellerbroek_alts)*np.arange(nlayers)/(nlayers-1) weights = Ellerbroek_interp(alts) weights /= sum(weights) spd = [] # Wind speed in m/s dirn = [] # Wind direction in radians r0_500s = [] # Fried parameter in m at a wavelength of 500 nm. for i in range(nlayers): spd.append(u()*max_speed) dirn.append(u()*360*galsim.degrees) r0_500s.append(r0_500*weights[i]**(-3./5)) rng2 = rng.duplicate() atm = galsim.Atmosphere(r0_500=r0_500, L0=L0, speed=spd, direction=dirn, altitude=alts, rng=rng, screen_size=screen_size, screen_scale=screen_scale) # Make a second atmosphere to use for geometric photon-shooting atm2 = galsim.Atmosphere(r0_500=r0_500, L0=L0, speed=spd, direction=dirn, altitude=alts, rng=rng2, screen_size=screen_size, screen_scale=screen_scale) # These should be equal at the moment, before we've actually instantiated any screens by drawing # with them. assert atm == atm2 lam = 500.0 diam = 4.0 pad_factor = 0.5 oversampling = 0.5 aper = galsim.Aperture(diam=diam, lam=lam, screen_list=atm, pad_factor=pad_factor, oversampling=oversampling) xs = np.empty((10,), dtype=float) ys = np.empty((10,), dtype=float) u.generate(xs) u.generate(ys) thetas = [(x*galsim.degrees, y*galsim.degrees) for x, y in zip(xs, ys)] if __name__ == '__main__': exptime = 15.0 time_step = 0.05 centroid_tolerance = 0.06 size_tolerance = 0.06 # absolute size_bias = 0.02 # as a fraction shape_tolerance = 0.01 else: exptime = 1.0 time_step = 0.1 centroid_tolerance = 0.3 size_tolerance = 0.3 size_bias = 0.15 shape_tolerance = 0.04 psfs = [atm.makePSF(lam, diam=diam, theta=th, exptime=exptime, aper=aper) for th in thetas] psfs2 = [atm2.makePSF(lam, diam=diam, theta=th, exptime=exptime, aper=aper, time_step=time_step) for th in thetas] shoot_moments = [] fft_moments = [] vk = galsim.VonKarman(lam=lam, r0=r0_500*(lam/500)**1.2, L0=L0) airy = galsim.Airy(lam=lam, diam=diam) obj = galsim.Convolve(vk, airy) vkImg = obj.drawImage(nx=48, ny=48, scale=0.05) vkMom = galsim.hsm.FindAdaptiveMom(vkImg) for psf, psf2 in zip(psfs, psfs2): im_shoot = psf.drawImage(nx=48, ny=48, scale=0.05, method='phot', n_photons=100000, rng=rng) im_fft = psf2.drawImage(nx=48, ny=48, scale=0.05) # at this point, the atms should be different. assert atm != atm2 shoot_moment = galsim.hsm.FindAdaptiveMom(im_shoot) fft_moment = galsim.hsm.FindAdaptiveMom(im_fft) print() print() print() print(shoot_moment.observed_shape.g1) print(fft_moment.observed_shape.g1) # import matplotlib.pyplot as plt # fig, axes = plt.subplots(ncols=2) # axes[0].imshow(im_shoot.array) # axes[1].imshow(im_fft.array) # plt.show() np.testing.assert_allclose( shoot_moment.moments_centroid.x, fft_moment.moments_centroid.x, rtol=0, atol=centroid_tolerance, err_msg='Phase gradient centroid x not close to fft centroid') np.testing.assert_allclose( shoot_moment.moments_centroid.y, fft_moment.moments_centroid.y, rtol=0, atol=centroid_tolerance, err_msg='Phase gradient centroid y not close to fft centroid') np.testing.assert_allclose( shoot_moment.moments_sigma, fft_moment.moments_sigma*(1+size_bias), rtol=0, atol=size_tolerance, err_msg='Phase gradient sigma not close to fft sigma') np.testing.assert_allclose( shoot_moment.moments_sigma, vkMom.moments_sigma, rtol=0.1, atol=0, err_msg='Phase gradient sigma not close to infinite exposure analytic sigma' ) np.testing.assert_allclose( shoot_moment.observed_shape.g1, fft_moment.observed_shape.g1, rtol=0, atol=shape_tolerance, err_msg='Phase gradient shape g1 not close to fft shape') np.testing.assert_allclose( shoot_moment.observed_shape.g2, fft_moment.observed_shape.g2, rtol=0, atol=shape_tolerance, err_msg='Phase gradient shape g2 not close to fft shape') shoot_moments.append(shoot_moment) fft_moments.append(fft_moment) # I cheated. Here's code to evaluate how small I could potentially set the tolerances above. # I think they're all fine, but this is admittedly a tad bit backwards. best_size_bias = np.mean([s1.moments_sigma/s2.moments_sigma for s1, s2 in zip(shoot_moments, fft_moments)]) print("best_size_bias = ", best_size_bias) print("xcentroid") print(max(np.abs([s1.moments_centroid.x - s2.moments_centroid.x for s1, s2 in zip(shoot_moments, fft_moments)]))) print("ycentroid") print(max(np.abs([s1.moments_centroid.y - s2.moments_centroid.y for s1, s2 in zip(shoot_moments, fft_moments)]))) print("size") print(max(np.abs([s1.moments_sigma - s2.moments_sigma*(1+size_bias) for s1, s2 in zip(shoot_moments, fft_moments)]))) print("bestsize") print(max(np.abs([s1.moments_sigma - s2.moments_sigma*(best_size_bias) for s1, s2 in zip(shoot_moments, fft_moments)]))) print("g1") print(max(np.abs([s1.observed_shape.g1 - s2.observed_shape.g1 for s1, s2 in zip(shoot_moments, fft_moments)]))) print("g2") print(max(np.abs([s1.observed_shape.g2 - s2.observed_shape.g2 for s1, s2 in zip(shoot_moments, fft_moments)]))) # import matplotlib.pyplot as plt # fig, ax = plt.subplots(nrows=1, ncols=1) # ax.scatter( # [s.observed_shape.g1 for s in shoot_moments], # [s.observed_shape.g1 for s in fft_moments] # ) # xlim = ax.get_xlim() # ylim = ax.get_ylim() # lim = (min(xlim[0], ylim[0]), max(xlim[1], ylim[1])) # ax.set_xlim(lim) # ax.set_ylim(lim) # ax.plot([-100, 100], [-100, 100]) # plt.show() # Verify that shoot with rng=None runs psf.shoot(100, rng=None) # Check that second_kick=False and second_kick=GSObject also run, and that we can shoot # photons with these settings. for second_kick in [False, galsim.Gaussian(fwhm=1)]: psf = atm.makePSF(lam=500.0, exptime=10, aper=aper, second_kick=second_kick) assert psf.second_kick == second_kick img = psf.drawImage(nx=64, ny=64, scale=0.1, method='phot', n_photons=100) # Verify that we can phase_gradient_shoot with 0 or 1 photons. psf.shoot(0) psf.shoot(1)
def make_plot(args): # Initiate some GalSim random number generators. rng = galsim.BaseDeviate(args.seed) u = galsim.UniformDeviate(rng) # The GalSim atmospheric simulation code describes turbulence in the 3D atmosphere as a series # of 2D turbulent screens. The galsim.Atmosphere() helper function is useful for constructing # this screen list. # First, we estimate a weight for each screen, so that the turbulence is dominated by the lower # layers consistent with direct measurements. The specific values we use are from SCIDAR # measurements on Cerro Pachon as part of the 1998 Gemini site selection process # (Ellerbroek 2002, JOSA Vol 19 No 9). Ellerbroek_alts = [0.0, 2.58, 5.16, 7.73, 12.89, 15.46] # km Ellerbroek_weights = [0.652, 0.172, 0.055, 0.025, 0.074, 0.022] Ellerbroek_interp = galsim.LookupTable(Ellerbroek_alts, Ellerbroek_weights, interpolant='linear') # Use given number of uniformly spaced altitudes alts = np.max(Ellerbroek_alts)*np.arange(args.nlayers)/(args.nlayers-1) weights = Ellerbroek_interp(alts) # interpolate the weights weights /= sum(weights) # and renormalize # Each layer can have its own turbulence strength (roughly inversely proportional to the Fried # parameter r0), wind speed, wind direction, altitude, and even size and scale (though note that # the size of each screen is actually made infinite by "wrapping" the edges of the screen.) The # galsim.Atmosphere helper function is useful for constructing this list, and requires lists of # parameters for the different layers. spd = [] # Wind speed in m/s dirn = [] # Wind direction in radians r0_500 = [] # Fried parameter in m at a wavelength of 500 nm. for i in range(args.nlayers): spd.append(u()*args.max_speed) # Use a random speed between 0 and max_speed dirn.append(u()*360*galsim.degrees) # And an isotropically distributed wind direction. # The turbulence strength of each layer is specified by through its Fried parameter r0_500, # which can be thought of as the diameter of a telescope for which atmospheric turbulence # and unaberrated diffraction contribute equally to image resolution (at a wavelength of # 500nm). The weights above are for the refractive index structure function (similar to a # variance or covariance), however, so we need to use an appropriate scaling relation to # distribute the input "net" Fried parameter into a Fried parameter for each layer. For # Kolmogorov turbulence, this is r0_500 ~ (structure function)**(-3/5): r0_500.append(args.r0_500*weights[i]**(-3./5)) print("Adding layer at altitude {:5.2f} km with velocity ({:5.2f}, {:5.2f}) m/s, " "and r0_500 {:5.3f} m." .format(alts[i], spd[i]*dirn[i].cos(), spd[i]*dirn[i].sin(), r0_500[i])) # Apply fudge factor r0_500 = [r*args.turb_factor**(-3./5) for r in r0_500] # Make sure to use a consistent seed for the atmosphere when varying kcrit # Additionally, we set the screen size and scale. atmRng = galsim.BaseDeviate(args.seed+1) print("Inflating atmosphere") fftAtm = galsim.Atmosphere(r0_500=r0_500, L0=args.L0, speed=spd, direction=dirn, altitude=alts, rng=atmRng, screen_size=args.screen_size, screen_scale=args.screen_scale) with ProgressBar(args.nlayers) as bar: fftAtm.instantiate(_bar=bar) print(fftAtm[0].screen_scale, fftAtm[0].screen_size) print(fftAtm[0]._tab2d.f.shape) # `atm` is now an instance of a galsim.PhaseScreenList object. # Construct an Aperture object for computing the PSF. The Aperture object describes the # illumination pattern of the telescope pupil, and chooses good sampling size and resolution # for representing this pattern as an array. aper = galsim.Aperture(diam=args.diam, lam=args.lam, obscuration=args.obscuration, screen_list=fftAtm, pad_factor=args.pad_factor, oversampling=args.oversampling) print("Drawing with Fourier optics") with ProgressBar(args.exptime/args.time_step) as bar: fftPSF = fftAtm.makePSF(lam=args.lam, aper=aper, exptime=args.exptime, time_step=args.time_step, _bar=bar) fftImg = fftPSF.drawImage(nx=args.nx, ny=args.nx, scale=args.scale) fftMom = galsim.hsm.FindAdaptiveMom(fftImg) vk = galsim.Convolve( galsim.VonKarman(lam=args.lam, r0=args.r0_500*(args.lam/500.0)**(6./5), L0=args.L0), galsim.Airy(lam=args.lam, diam=args.diam, obscuration=args.obscuration) ) vkImg = vk.drawImage(nx=args.nx, ny=args.nx, scale=args.scale) vkMom = galsim.hsm.FindAdaptiveMom(vkImg) # Start output at this point fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(8, 8)) FigureCanvasAgg(fig) for ax in axes.ravel(): ax.set_xticks([]) ax.set_yticks([]) kcrits = np.logspace(np.log10(args.kmin), np.log10(args.kmax), 4) r0 = args.r0_500*(args.lam/500.0)**(6./5) for icol, kcrit in enumerate(kcrits): # reset atmRng atmRng = galsim.BaseDeviate(args.seed+1) print("Inflating atmosphere with kcrit={}".format(kcrit)) atm = galsim.Atmosphere(r0_500=r0_500, L0=args.L0, speed=spd, direction=dirn, altitude=alts, rng=atmRng, screen_size=args.screen_size, screen_scale=args.screen_scale) with ProgressBar(args.nlayers) as bar: atm.instantiate(kmax=kcrit/r0, _bar=bar) kick1 = atm.makePSF(lam=args.lam, aper=aper, exptime=args.exptime, time_step=args.time_step, second_kick=False) r0 = args.r0_500*(args.lam/500)**(6./5) kick2 = galsim.SecondKick(lam=args.lam, r0=r0, diam=args.diam, obscuration=args.obscuration, kcrit=kcrit) img1 = kick1.drawImage(nx=args.nx, ny=args.nx, scale=args.scale, method='phot', n_photons=args.nphot) try: mom1 = galsim.hsm.FindAdaptiveMom(img1) except RuntimeError: mom1 = None img2 = kick2.drawImage(nx=args.nx, ny=args.nx, scale=args.scale, method='phot', n_photons=args.nphot) try: mom2 = galsim.hsm.FindAdaptiveMom(img2) except RuntimeError: mom2 = None geom = galsim.Convolve(kick1, kick2) geomImg = geom.drawImage(nx=args.nx, ny=args.nx, scale=args.scale, method='phot', n_photons=args.nphot) try: geomMom = galsim.hsm.FindAdaptiveMom(geomImg) except RuntimeError: geomMom = None axes[0,icol].imshow(fftImg.array) axes[0,icol].text(0.5, 0.9, "{:6.3f}".format(fftMom.moments_sigma), transform=axes[0,icol].transAxes, color='w') axes[1,icol].imshow(img1.array) if mom1: axes[1,icol].text(0.5, 0.9, "{:6.3f}".format(mom1.moments_sigma), transform=axes[1,icol].transAxes, color='w') axes[2,icol].imshow(img2.array) if mom2: axes[2,icol].text(0.5, 0.9, "{:6.3f}".format(mom2.moments_sigma), transform=axes[2,icol].transAxes, color='w') axes[3,icol].imshow(geomImg.array) if geomMom: axes[3,icol].text(0.5, 0.9, "{:6.3f}".format(geomMom.moments_sigma), transform=axes[3,icol].transAxes, color='w') axes[4,icol].imshow(vkImg.array) axes[4,icol].text(0.5, 0.9, "{:6.3f}".format(vkMom.moments_sigma), transform=axes[4,icol].transAxes, color='w') axes[0,icol].set_title("{:6.3f}".format(kcrit)) axes[0, 0].set_ylabel("DFT") axes[1, 0].set_ylabel("1st kick") axes[2, 0].set_ylabel("2nd kick") axes[3, 0].set_ylabel("Geom") axes[4, 0].set_ylabel("Von Karman") fig.tight_layout() dirname, filename = os.path.split(args.outfile) if not os.path.exists(dirname): os.mkdir(dirname) fig.savefig(args.outfile)