Пример #1
0
def test_ccdnoise():
    """Test that the config layer CCD noise adds noise consistent with using a CCDNoise object.
    """
    import logging
    import time
    t1 = time.time()

    gain = 4
    sky = 50
    rn = 5
    size = 2048

    # Use this to turn on logging, but more info than we noramlly need, so generally leave it off.
    #logging.basicConfig(format="%(message)s", level=logging.DEBUG, stream=sys.stdout)
    #logger = logging.getLogger()
    logger = None

    config = {}
    # Either gal or psf is required, so just give it a Gaussian with 0 flux.
    config['gal'] = {'type': 'Gaussian', 'sigma': 1, 'flux': 0}
    config['image'] = {
        'type': 'Single',
        'size': size,
        'pixel_scale': 0.3,
        'random_seed':
        123  # Note: this means the seed for the noise will really be 124
        # since it is applied at the stamp level, so uses seed + obj_num
    }
    config['image']['noise'] = {
        'type': 'CCD',
        'sky_level_pixel': sky,
        'gain': gain,
        'read_noise': rn
    }
    image = galsim.config.BuildImage(config, logger=logger)

    print 'config-built image: '
    print 'mean = ', np.mean(image.array)
    print 'var = ', np.var(image.array.astype(float))
    test_var = np.var(image.array.astype(float))

    # Build another image that should have equivalent noise properties.
    image2 = galsim.Image(size, size, scale=0.3, dtype=float)
    rng = galsim.BaseDeviate(124)
    noise = galsim.CCDNoise(rng=rng, gain=gain, read_noise=rn)
    image2 += sky
    image2.addNoise(noise)
    image2 -= sky

    print 'manual sky:'
    print 'mean = ', np.mean(image2.array)
    print 'var = ', np.var(image2.array)
    np.testing.assert_almost_equal(
        np.var(image2.array),
        test_var,
        err_msg="CCDNoise with manual sky failed variance test.")

    # So far this isn't too stringent of a test, since the noise module will use a CCDNoise
    # object for this.  In fact, it should do precisely the same calculation.
    # This should be equivalent to letting CCDNoise take the sky level:
    image2.fill(0)
    rng.reset(124)
    noise = galsim.CCDNoise(rng=rng, sky_level=sky, gain=gain, read_noise=rn)
    image2.addNoise(noise)

    print 'sky done by CCDNoise:'
    print 'mean = ', np.mean(image2.array)
    print 'var = ', np.var(image2.array)
    np.testing.assert_almost_equal(
        np.var(image2.array),
        test_var,
        err_msg="CCDNoise using sky failed variance test.")

    # Check that the CCDNoiseBuilder calculates the same variance as CCDNoise
    var1 = noise.getVariance()
    var2 = galsim.config.noise.CCDNoiseBuilder().getNoiseVariance(
        config['image']['noise'], config)
    print 'CCDNoise variance = ', var1
    print 'CCDNoiseBuilder variance = ', var2
    np.testing.assert_almost_equal(
        var1, var2, err_msg="CCDNoiseBuidler calculates the wrong variance")

    # Finally, the config layer also includes its own manual implementation of CCD noise that
    # it uses when there is already some noise in the image.  We want to check that this is
    # consistent with the regular CCDNoise object.

    # This time, we just set the current_var to 1.e-20 to trigger the alternate path, but
    # without any real noise there yet.
    image2.fill(0)
    rng.reset(124)
    galsim.config.noise.CCDNoiseBuilder().addNoise(config['image']['noise'],
                                                   config,
                                                   image2,
                                                   rng,
                                                   current_var=1.e-20,
                                                   draw_method='fft',
                                                   logger=logger)

    print 'Use CCDNoiseBuilder with negligible current_var'
    print 'mean = ', np.mean(image2.array)
    print 'var = ', np.var(image2.array)
    np.testing.assert_almost_equal(
        np.var(image2.array),
        test_var,
        err_msg="CCDNoise with current_var failed variance test.")

    # Here we pre-load the full read noise and tell it it's there with current_var
    image2.fill(0)
    gn = galsim.GaussianNoise(rng=rng, sigma=rn / gain)
    image2.addNoise(gn)
    galsim.config.noise.CCDNoiseBuilder().addNoise(config['image']['noise'],
                                                   config,
                                                   image2,
                                                   rng,
                                                   current_var=(rn / gain)**2,
                                                   draw_method='fft',
                                                   logger=logger)

    print 'Use CCDNoiseBuilder with current_var == read_noise'
    print 'mean = ', np.mean(image2.array)
    print 'var = ', np.var(image2.array)
    # So far we've done this to very high accuracy, since we've been using the same rng seed,
    # so the results should be identical, not just close.  However, hereon the values are just
    # close, since they are difference noise realizations.  So check to 1 decimal place.
    np.testing.assert_almost_equal(
        np.var(image2.array),
        test_var,
        decimal=1,
        err_msg="CCDNoise w/ current_var==rn failed variance test.")

    # Now we pre-load part of the read-noise, but not all.  It should add the rest as read_noise.
    image2.fill(0)
    gn = galsim.GaussianNoise(rng=rng, sigma=0.5 * rn / gain)
    image2.addNoise(gn)
    galsim.config.noise.CCDNoiseBuilder().addNoise(config['image']['noise'],
                                                   config,
                                                   image2,
                                                   rng,
                                                   current_var=(0.5 * rn /
                                                                gain)**2,
                                                   draw_method='fft',
                                                   logger=logger)

    print 'Use CCDNoiseBuilder with current_var < read_noise'
    print 'mean = ', np.mean(image2.array)
    print 'var = ', np.var(image2.array)
    np.testing.assert_almost_equal(
        np.var(image2.array),
        test_var,
        decimal=1,
        err_msg="CCDNoise w/ current_var < rn failed variance test.")

    # Last, we go beyond the read-noise, so it should remove some of the sky level to compensate.
    image2.fill(0)
    gn = galsim.GaussianNoise(rng=rng, sigma=2. * rn / gain)
    image2.addNoise(gn)
    galsim.config.noise.CCDNoiseBuilder().addNoise(config['image']['noise'],
                                                   config,
                                                   image2,
                                                   rng,
                                                   current_var=(2. * rn /
                                                                gain)**2,
                                                   draw_method='fft',
                                                   logger=logger)

    print 'Use CCDNoiseBuilder with current_var > read_noise'
    print 'mean = ', np.mean(image2.array)
    print 'var = ', np.var(image2.array)
    np.testing.assert_almost_equal(
        np.var(image2.array),
        test_var,
        decimal=1,
        err_msg="CCDNoise w/ current_var > rn failed variance test.")

    t2 = time.time()
    print 'time for %s = %.2f' % (funcname(), t2 - t1)
Пример #2
0
def test_real_galaxy_saved():
    """Test accuracy of various calculations with real RealGalaxy vs. stored SHERA result"""
    ind_real = 0  # index of real galaxy in catalog
    shera_file = 'real_comparison_images/shera_result.fits'
    shera_target_PSF_file = 'real_comparison_images/shera_target_PSF.fits'
    shera_target_pixel_scale = 0.24
    shera_target_flux = 1000.0

    # read in real RealGalaxy from file
    # rgc = galsim.RealGalaxyCatalog(catalog_file, dir=image_dir)
    # This is an alternate way to give the directory -- as part of the catalog file name.
    full_catalog_file = os.path.join(image_dir, catalog_file)
    rgc = galsim.RealGalaxyCatalog(full_catalog_file)
    rg = galsim.RealGalaxy(rgc, index=ind_real)

    # read in expected result for some shear
    shera_image = galsim.fits.read(shera_file)
    shera_target_PSF_image = galsim.fits.read(shera_target_PSF_file)
    shera_target_PSF_image.scale = shera_target_pixel_scale

    # simulate the same galaxy with GalSim
    targ_applied_shear1 = 0.06
    targ_applied_shear2 = -0.04
    tmp_gal = rg.withFlux(shera_target_flux).shear(g1=targ_applied_shear1,
                                                   g2=targ_applied_shear2)
    tmp_psf = galsim.InterpolatedImage(shera_target_PSF_image)
    tmp_gal = galsim.Convolve(tmp_gal, tmp_psf)
    sim_image = tmp_gal.drawImage(scale=shera_target_pixel_scale,
                                  method='no_pixel')

    # there are centroid issues when comparing Shera vs. SBProfile outputs, so compare 2nd moments
    # instead of images
    sbp_res = sim_image.FindAdaptiveMom()
    shera_res = shera_image.FindAdaptiveMom()

    np.testing.assert_almost_equal(
        sbp_res.observed_shape.e1,
        shera_res.observed_shape.e1,
        2,
        err_msg="Error in comparison with SHERA result: e1")
    np.testing.assert_almost_equal(
        sbp_res.observed_shape.e2,
        shera_res.observed_shape.e2,
        2,
        err_msg="Error in comparison with SHERA result: e2")
    np.testing.assert_almost_equal(
        sbp_res.moments_sigma,
        shera_res.moments_sigma,
        2,
        err_msg="Error in comparison with SHERA result: sigma")

    check_basic(rg, "RealGalaxy", approx_maxsb=True)

    # Check picklability
    do_pickle(
        rgc, lambda x: [
            x.getGalImage(ind_real),
            x.getPSFImage(ind_real),
            x.getNoiseProperties(ind_real)
        ])
    do_pickle(
        rgc,
        lambda x: drawNoise(x.getNoise(ind_real, rng=galsim.BaseDeviate(123))))
    do_pickle(
        rg, lambda x: galsim.Convolve([x, galsim.Gaussian(sigma=1.7)]).
        drawImage(nx=20, ny=20, scale=0.7))
    do_pickle(rgc)
    do_pickle(rg)
Пример #3
0
def check_crg_noise(n_sed, n_im, n_trial, tol):
    print("Checking CRG noise for")
    print("n_sed = {}".format(n_sed))
    print("n_im = {}".format(n_im))
    print("n_trial = {}".format(n_trial))
    print("Constructing chromatic PSFs")
    in_PSF = galsim.ChromaticAiry(lam=700., diam=2.4)
    out_PSF = galsim.ChromaticAiry(lam=700., diam=0.6)

    print("Constructing filters and SEDs")
    waves = np.arange(550.0, 900.1, 10.0)
    visband = galsim.Bandpass(galsim.LookupTable(waves,
                                                 np.ones_like(waves),
                                                 interpolant='linear'),
                              wave_type='nm')
    split_points = np.linspace(550.0, 900.0, n_im + 1, endpoint=True)
    bands = [
        visband.truncate(blue_limit=blim, red_limit=rlim)
        for blim, rlim in zip(split_points[:-1], split_points[1:])
    ]

    maxk = max([
        out_PSF.evaluateAtWavelength(waves[0]).maxk,
        out_PSF.evaluateAtWavelength(waves[-1]).maxk
    ])

    SEDs = [
        galsim.SED(galsim.LookupTable(waves, waves**i, interpolant='linear'),
                   flux_type='fphotons',
                   wave_type='nm').withFlux(1.0, visband) for i in range(n_sed)
    ]

    print("Constructing input noise correlation functions")
    rng = galsim.BaseDeviate(57721)
    in_xis = [
        galsim.getCOSMOSNoise(cosmos_scale=0.03,
                              rng=rng).dilate(1 + i * 0.05).rotate(
                                  5 * i * galsim.degrees) for i in range(n_im)
    ]

    print("Creating noise images")
    img_sets = []
    for i in range(n_trial):
        imgs = []
        for xi in in_xis:
            img = galsim.Image(128, 128, scale=0.03)
            img.addNoise(xi)
            imgs.append(img)
        img_sets.append(imgs)

    print("Constructing `ChromaticRealGalaxy`s")
    crgs = []
    for imgs in img_sets:
        crgs.append(
            galsim.ChromaticRealGalaxy.makeFromImages(imgs,
                                                      bands,
                                                      in_PSF,
                                                      in_xis,
                                                      SEDs=SEDs,
                                                      maxk=maxk))

    print("Convolving by output PSF")
    objs = [galsim.Convolve(crg, out_PSF) for crg in crgs]

    with assert_raises(galsim.GalSimError):
        noise = objs[0].noise  # Invalid before drawImage is called

    print("Drawing through output filter")
    out_imgs = [
        obj.drawImage(visband, nx=30, ny=30, scale=0.1) for obj in objs
    ]

    noise = objs[0].noise

    print("Measuring images' correlation functions")
    xi_obs = galsim.correlatednoise.CorrelatedNoise(out_imgs[0])
    for img in out_imgs[1:]:
        xi_obs += galsim.correlatednoise.CorrelatedNoise(img)
    xi_obs /= n_trial
    xi_obs_img = galsim.Image(30, 30, scale=0.1)
    xi_obs.drawImage(xi_obs_img)
    noise_img = galsim.Image(30, 30, scale=0.1)
    noise.drawImage(noise_img)

    print("Predicted/Observed variance:",
          noise.getVariance() / xi_obs.getVariance())
    print("Predicted/Observed xlag-1 covariance:",
          noise_img.array[14, 15] / xi_obs_img.array[14, 15])
    print("Predicted/Observed ylag-1 covariance:",
          noise_img.array[15, 14] / xi_obs_img.array[15, 14])
    # Just test that the covariances for nearest neighbor pixels are accurate.
    np.testing.assert_allclose(noise_img.array[14:17, 14:17],
                               xi_obs_img.array[14:17, 14:17],
                               rtol=0,
                               atol=noise.getVariance() * tol)
Пример #4
0
def main(argv):
    """
    Make images using model PSFs and galaxy cluster shear:
      - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles
        (like in demo10) and parametric fits to those profiles.  We choose 40% of the galaxies
        to use the images, and the other 60% to use the parametric fits
      - The real galaxy images include some initial correlated noise from the original HST
        observation.  However, we whiten the noise of the final image so the final image has
        stationary Gaussian noise, rather than correlated noise.
    """
    global logger
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("mock_superbit_data")

    # Define some parameters we'll use below.
    # Normally these would be read in from some parameter file.
    global pixel_scale
    pixel_scale = 0.206  # arcsec/pixel
    global image_xsize
    image_xsize = 6665  # size of image in pixels
    global image_ysize
    image_ysize = 4453  # size of image in pixels
    global image_xsize_arcsec
    image_xsize_arcsec = image_xsize * pixel_scale  # size of big image in each dimension (arcsec)
    global image_ysize_arcsec
    image_ysize_arcsec = image_ysize * pixel_scale  # size of big image in each dimension (arcsec)
    global center_ra
    center_ra = 19.3 * galsim.hours  # The RA, Dec of the center of the image on the sky
    global center_dec
    center_dec = -33.1 * galsim.degrees
    global exp_time
    exp_time = 300
    global sky_bkg  # mean sky background from AG's paper
    sky_bkg = 0.32  # ADU / s / pix
    global sky_sigma  # standard deviation of sky background
    sky_sigma = 0.16  # ADU / s / pix
    global nobj
    nobj = 22  # number of galaxies in entire field
    global nstars
    nstars = 300  # number of stars in the entire field
    global flux_scaling
    global tel_diam
    tel_diam = 0.5
    global lam
    lam = 625  # Central wavelength for Airy disk
    global optics
    psf_path = '/Users/jemcclea/Research/GalSim/examples/data/fpsc_flight_jitter_psf_oversampled_fixed_10x'
    global optics  # will store the Zernicke component of the PSF
    global nfw  # will store the NFWHalo information
    global cosmos_cat  # will store the COSMOS catalog from which we draw objects
    global example_cat  # also a COSMOS catalog which will contain cluster galaxies
    # Set up the NFWHalo:
    mass = 5E14  # Cluster mass (Msol/h)
    nfw_conc = 4  # Concentration parameter = virial radius / NFW scale radius
    nfw_z_halo = 0.17  # redshift of the halo
    omega_m = 0.3  # Omega matter for the background cosmology.
    omega_lam = 0.7  # Omega lambda for the background cosmology.

    nfw = galsim.NFWHalo(mass=mass,
                         conc=nfw_conc,
                         redshift=nfw_z_halo,
                         omega_m=omega_m,
                         omega_lam=omega_lam)
    logger.info('Set up NFW halo for lensing')

    # Read in galaxy catalog
    cat_file_name = 'real_galaxy_catalog_25.2.fits'
    dir = 'data/COSMOS_25.2_training_sample/'

    cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir)
    logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects)

    # Also read in example catalog
    example_cat_file_name = 'data/real_galaxy_catalog_23.5_example.fits'
    example_cat = galsim.COSMOSCatalog(example_cat_file_name)

    # The catalog returns objects that are appropriate for HST in 1 second exposures.  So for our
    # telescope we scale up by the relative area, exposure time and pixel scale
    hst_eff_area = 2.4**2 * (1. - 0.33**2)
    sbit_eff_area = tel_diam**2 * (1. - 0.380**2)
    flux_scaling = (sbit_eff_area / hst_eff_area) * exp_time * (pixel_scale /
                                                                .05)**2

    ### Now create PSF. First, define Zernicke polynomial component
    lam_over_diam = lam * 1.e-9 / tel_diam  # radians
    lam_over_diam *= 206265  # arcsec
    aberrations = [0.0] * 12  # Set the initial size.
    aberrations[4] = -0.00725859  # Noll index 4 = Defocus
    aberrations[5:7] = [0.0, -0.00]  # Noll index 5,6 = Astigmatism
    aberrations[7:9] = [0.07, 0.00]  # Noll index 7,8 = Coma
    aberrations[11] = 0.00133254  # Noll index 11 = Spherical

    logger.info('Calculated lambda over diam = %f arcsec', lam_over_diam)

    optics = galsim.OpticalPSF(lam_over_diam,
                               obscuration=0.380,
                               aberrations=aberrations)
    logger.info('Made telescope PSF profile')

    ###
    ### LOOP OVER PSFs TO MAKE GROUPS OF IMAGES
    ### WITHIN EACH PSF, ITERATE 5 TIMES TO MAKE 5 SEPARATE IMAGES
    ###
    all_psfs = glob.glob(psf_path + "/*247530*.psf")  # this is 121s
    logger.info('Beginning loop over jitter/optical psfs')

    for psf_filen in all_psfs:
        logger.info('Beginning PSF %s...' % psf_filen)

        for i in numpy.arange(1, 2):
            logger.info('Beginning loop %d' % i)

            random_seed = 23058923781
            rng = galsim.BaseDeviate(random_seed)

            # This is specific to Javier mock PSFs
            try:
                root = psf_filen.split('data/')[1].split('/')[0]
                timescale = psf_filen.split('_10x/')[1].split('.')[0]
                outname = ''.join([
                    'mock_superbit_', root, timescale,
                    str(i).zfill(3), '.fits'
                ])
                truth_file_name = ''.join([
                    './output/truth_', root, timescale,
                    str(i).zfill(3), '.dat'
                ])
                file_name = os.path.join('output', outname)
            except:
                pdb.set_trace()

            # Setting up a truth catalog
            names = [
                'gal_num', 'x_image', 'y_image', 'ra', 'dec', 'g1_meas',
                'g2_meas', 'nfw_mu', 'redshift', 'flux'
            ]
            types = [
                int, float, float, float, float, float, float, float, float,
                float
            ]
            truth_catalog = galsim.OutputCatalog(names, types)

            # Set up the image:
            full_image = galsim.ImageF(image_xsize, image_ysize)
            sky_level = exp_time * sky_bkg
            full_image.fill(sky_level)
            full_image.setOrigin(0, 0)

            # We keep track of how much noise is already in the image from the RealGalaxies.
            noise_image = galsim.ImageF(image_xsize, image_ysize)
            noise_image.setOrigin(0, 0)

            # Make a slightly non-trivial WCS.  We'll use a slightly rotated coordinate system
            # and center it at the image center.
            theta = 0.17 * galsim.degrees
            dudx = numpy.cos(theta) * pixel_scale
            dudy = -numpy.sin(theta) * pixel_scale
            dvdx = numpy.sin(theta) * pixel_scale
            dvdy = numpy.cos(theta) * pixel_scale
            image_center = full_image.true_center
            affine = galsim.AffineTransform(dudx,
                                            dudy,
                                            dvdx,
                                            dvdy,
                                            origin=full_image.true_center)
            sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec)

            wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec)
            full_image.wcs = wcs

            # Now let's read in the PSFEx PSF model.  We read the image directly into an
            # InterpolatedImage GSObject, so we can manipulate it as needed
            psf_wcs = wcs
            psf_file = os.path.join(psf_path, psf_filen)
            psf = galsim.des.DES_PSFEx(psf_file, wcs=psf_wcs)
            logger.info('Constructed PSF object from PSFEx file')

            # Loop over galaxy objects:
            for k in range(nobj):
                time1 = time.time()

                # The usual random number generator using a different seed for each galaxy.
                ud = galsim.UniformDeviate(random_seed + k + 1)

                try:
                    # make single galaxy object
                    stamp, truth = make_a_galaxy(ud=ud,
                                                 wcs=wcs,
                                                 psf=psf,
                                                 affine=affine)
                    # Find the overlapping bounds:
                    bounds = stamp.bounds & full_image.bounds

                    # We need to keep track of how much variance we have currently in the image, so when
                    # we add more noise, we can omit what is already there.

                    noise_image[bounds] += truth.variance

                    # Finally, add the stamp to the full image.

                    full_image[bounds] += stamp[bounds]
                    time2 = time.time()
                    tot_time = time2 - time1
                    logger.info(
                        'Galaxy %d positioned relative to center t=%f s', k,
                        tot_time)
                    this_flux = numpy.sum(stamp.array)
                    row = [
                        k, truth.x, truth.y, truth.ra, truth.dec, truth.g1,
                        truth.g2, truth.mu, truth.z, this_flux
                    ]
                    truth_catalog.addRow(row)
                except:
                    logger.info('Galaxy %d has failed, skipping...', k)
                    pdb.set_trace()

            ###### Inject cluster galaxy objects:

            random_seed = 892465352
            for k in range(50):
                time1 = time.time()

                # The usual random number generator using a different seed for each galaxy.
                ud = galsim.UniformDeviate(random_seed + k + 1)

                try:
                    # make single galaxy object
                    cluster_stamp, truth = make_cluster_galaxy(ud=ud,
                                                               wcs=wcs,
                                                               psf=psf,
                                                               affine=affine)
                    # Find the overlapping bounds:
                    bounds = cluster_stamp.bounds & full_image.bounds

                    # We need to keep track of how much variance we have currently in the image, so when
                    # we add more noise, we can omit what is already there.

                    noise_image[bounds] += truth.variance

                    # Finally, add the stamp to the full image.

                    full_image[bounds] += cluster_stamp[bounds]
                    time2 = time.time()
                    tot_time = time2 - time1
                    logger.info(
                        'Cluster galaxy %d positioned relative to center t=%f s',
                        k, tot_time)
                    this_flux = numpy.sum(stamp.array)
                    row = [
                        k, truth.x, truth.y, truth.ra, truth.dec, truth.g1,
                        truth.g2, truth.mu, truth.z, this_flux
                    ]
                    truth_catalog.addRow(row)
                except:
                    logger.info('Cluster galaxy %d has failed, skipping...', k)
                    pdb.set_trace()

            ####
            ### Now repeat process for stars!
            ####

            random_seed_stars = 2308173501873

            for k in range(nstars):
                time1 = time.time()
                ud = galsim.UniformDeviate(random_seed_stars + k + 1)

                star_stamp, truth = make_a_star(ud=ud,
                                                wcs=wcs,
                                                psf=psf,
                                                affine=affine)
                bounds = star_stamp.bounds & full_image.bounds

                # Add the stamp to the full image.
                try:
                    full_image[bounds] += star_stamp[bounds]

                    time2 = time.time()
                    tot_time = time2 - time1

                    logger.info(
                        'Star %d: positioned relative to center, t=%f s', k,
                        tot_time)
                    this_flux = numpy.sum(star_stamp.array)
                    row = [
                        k, truth.x, truth.y, truth.ra, truth.dec, truth.g1,
                        truth.g2, truth.mu, truth.z, this_flux
                    ]
                    truth_catalog.addRow(row)

                except:
                    logger.info('Star %d has failed, skipping...', k)
                    pass

            # If real-type COSMOS galaxies are used, the noise across the image won't be uniform. Since this code is
            # using parametric-type galaxies, the following section is commented out.
            #
            # The first thing to do is to make the Gaussian noise uniform across the whole image.

            max_current_variance = numpy.max(noise_image.array)
            noise_image = max_current_variance - noise_image

            vn = galsim.VariableGaussianNoise(rng, noise_image)
            full_image.addNoise(vn)

            # Now max_current_variance is the noise level across the full image.  We don't want to add that
            # twice, so subtract off this much from the intended noise that we want to end up in the image.
            sky_sigma -= numpy.sqrt(max_current_variance)

            # Regardless of galaxy type, add Gaussian noise with this variance to the final image.
            this_noise_sigma = sky_sigma * exp_time
            noise = galsim.GaussianNoise(rng, sigma=this_noise_sigma)
            full_image.addNoise(noise)

            logger.debug('Added noise to final output image')
            full_image.write(file_name)

            # Write truth catalog to file.
            truth_catalog.write(truth_file_name)
            logger.info('Wrote image to %r', file_name)

            logger.info(' ')
            logger.info('completed run %d for psf %s', i, psf_filen)
            i = i + 1
            logger.info(' ')

        logger.info(' ')
        logger.info('completed all images')
        logger.info(' ')
Пример #5
0
# Compute LSST focus curves including effects of Silicon absorption length and
# refraction, and the fast f/1.2 beam.
# Compare this to figure 8 from O'Connor++06 "Study of silicon sensor thickness
# optimization for LSST".  GalSim results below are grossly consistent, but
# differ in the exact shape and values that the focus curves take.

import numpy as np
import matplotlib.pyplot as plt
import galsim

bd = galsim.BaseDeviate(12)
depths = np.linspace(-25, 25, 41)  # microns
obj = galsim.Gaussian(sigma=1e-4)
sed = galsim.SED("1", wave_type='nm', flux_type='flambda')

fig, ax = plt.subplots()
oversampling = 16
for filter in ['g', 'z', 'y']:
    bandpass = galsim.Bandpass("LSST_{}.dat".format(filter), wave_type='nm')
    Ts = []
    for depth in depths:
        depth_pix = depth / 10
        surface_ops = [
            galsim.WavelengthSampler(sed, bandpass, rng=bd),
            galsim.FRatioAngles(1.234, 0.606, rng=bd),
            galsim.FocusDepth(depth_pix),
            galsim.Refraction(3.9)  # approx number for Silicon
        ]
        img = obj.drawImage(
            sensor=galsim.SiliconSensor(),
            method='phot',
Пример #6
0
def main(argv):
    """
    Getting reasonably close to including all the principle features of an image from a
    ground-based telescope:
      - Use a bulge plus disk model for the galaxy
      - Both galaxy components are Sersic profiles (n=3.5 and n=1.5 respectively)
      - Let the PSF have both atmospheric and optical components.
      - The atmospheric component is a Kolmogorov spectrum.
      - The optical component has some defocus, coma, and astigmatism.
      - Add both Poisson noise to the image and Gaussian read noise.
      - Let the pixels be slightly distorted relative to the sky.
    """
    # We do some fancier logging for demo3, just to demonstrate that we can:
    # - we log to both stdout and to a log file
    # - the log file has a lot more (mostly redundant) information
    logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
    if not os.path.isdir('output'):
        os.mkdir('output')
    logFile = logging.FileHandler(os.path.join("output", "script3.log"))
    logFile.setFormatter(logging.Formatter("%(name)s[%(levelname)s] %(asctime)s: %(message)s"))
    logging.getLogger("demo3").addHandler(logFile)
    logger = logging.getLogger("demo3")

    gal_flux = 1.e6        # ADU  ("Analog-to-digital units", the units of the numbers on a CCD)
    bulge_n = 3.5          #
    bulge_re = 2.3         # arcsec
    disk_n = 1.5           #
    disk_r0 = 0.85         # arcsec (corresponds to half_light_radius of ~3.7 arcsec)
    bulge_frac = 0.3       #
    gal_q = 0.73           # (axis ratio 0 < q < 1)
    gal_beta = 23          # degrees (position angle on the sky)
    atmos_fwhm=2.1         # arcsec
    atmos_e = 0.13         #
    atmos_beta = 0.81      # radians
    opt_defocus=0.53       # wavelengths
    opt_a1=-0.29           # wavelengths
    opt_a2=0.12            # wavelengths
    opt_c1=0.64            # wavelengths
    opt_c2=-0.33           # wavelengths
    opt_obscuration=0.3    # linear scale size of secondary mirror obscuration
    lam = 800              # nm    NB: don't use lambda - that's a reserved word.
    tel_diam = 4.          # meters
    pixel_scale = 0.23     # arcsec / pixel
    image_size = 64        # n x n pixels
    wcs_g1 = -0.02         #
    wcs_g2 = 0.01          #
    sky_level = 2.5e4      # ADU / arcsec^2
    gain = 1.7             # e- / ADU
                           # Note: here we assume 1 photon -> 1 e-, ignoring QE.  If you wanted,
                           # you could include the QE factor as part of the gain.
    read_noise = 0.3       # e- / pixel

    random_seed = 1314662

    logger.info('Starting demo script 3 using:')
    logger.info('    - Galaxy is bulge plus disk, flux = %.1e',gal_flux)
    logger.info('       - Bulge is Sersic (n = %.1f, re = %.2f), frac = %.1f',
                bulge_n,bulge_re,bulge_frac)
    logger.info('       - Disk is Sersic (n = %.1f, r0 = %.2f), frac = %.1f',
                disk_n,disk_r0,1-bulge_frac)
    logger.info('       - Shape is q,beta (%.2f,%.2f deg)', gal_q, gal_beta)
    logger.info('    - Atmospheric PSF is Kolmogorov with fwhm = %.2f',atmos_fwhm)
    logger.info('       - Shape is e,beta (%.2f,%.2f rad)', atmos_e, atmos_beta)
    logger.info('    - Optical PSF has defocus = %.2f, astigmatism = (%.2f,%.2f),',
                opt_defocus, opt_a1, opt_a2)
    logger.info('          coma = (%.2f,%.2f), lambda = %.0f nm, D = %.1f m',
                opt_c1, opt_c2, lam, tel_diam)
    logger.info('          obscuration linear size = %.1f',opt_obscuration)
    logger.info('    - pixel scale = %.2f,',pixel_scale)
    logger.info('    - WCS distortion = (%.2f,%.2f),',wcs_g1,wcs_g2)
    logger.info('    - Poisson noise (sky level = %.1e, gain = %.1f).',sky_level, gain)
    logger.info('    - Gaussian read noise (sigma = %.2f).',read_noise)

    # Initialize the (pseudo-)random number generator that we will be using below.
    rng = galsim.BaseDeviate(random_seed+1)

    # Define the galaxy profile.
    # Normally Sersic profiles are specified by half-light radius, the radius that
    # encloses half of the total flux.  However, for some purposes, it can be
    # preferable to instead specify the scale radius, where the surface brightness
    # drops to 1/e of the central peak value.
    bulge = galsim.Sersic(bulge_n, half_light_radius=bulge_re)
    disk = galsim.Sersic(disk_n, scale_radius=disk_r0)

    # Objects may be multiplied by a scalar (which means scaling the flux) and also
    # added to each other.
    gal = bulge_frac * bulge + (1-bulge_frac) * disk
    # Could also have written the following, which does the same thing:
    #   gal = galsim.Add([ bulge.withFlux(bulge_frac) , disk.withFlux(1-bulge_frac) ])
    # Both syntaxes work with more than two summands as well.

    # Set the overall flux of the combined object.
    gal = gal.withFlux(gal_flux)
    # Since the total flux of the components was 1, we could also have written:
    #   gal *= gal_flux
    # The withFlux method will always set the flux to the given value, while `gal *= flux`
    # will multiply whatever the current flux is by the given factor.

    # Set the shape of the galaxy according to axis ratio and position angle
    # Note: All angles in GalSim must have explicit units.  Options are:
    #       galsim.radians
    #       galsim.degrees
    #       galsim.arcmin
    #       galsim.arcsec
    #       galsim.hours
    gal_shape = galsim.Shear(q=gal_q, beta=gal_beta*galsim.degrees)
    gal = gal.shear(gal_shape)
    logger.debug('Made galaxy profile')

    # Define the atmospheric part of the PSF.
    # Note: the flux here is the default flux=1.
    atmos = galsim.Kolmogorov(fwhm=atmos_fwhm)
    # For the PSF shape here, we use ellipticity rather than axis ratio.
    # And the position angle can be either degrees or radians.  Here we chose radians.
    atmos = atmos.shear(e=atmos_e, beta=atmos_beta*galsim.radians)
    logger.debug('Made atmospheric PSF profile')

    # Define the optical part of the PSF:
    # The first argument of OpticalPSF below is lambda/diam (wavelength of light / telescope
    # diameter), which needs to be in the same units used to specify the image scale.  We are using
    # arcsec for that, so we have to self-consistently use arcsec here, using the following
    # calculation:
    lam_over_diam = lam * 1.e-9 / tel_diam # radians
    lam_over_diam *= 206265  # arcsec
    # Note that we could also have made GalSim do the conversion for us if we did not know the right
    # factor:
    # lam_over_diam = lam * 1.e-9 / tel_diam * galsim.radians
    # lam_over_diam = lam_over_diam / galsim.arcsec
    logger.debug('Calculated lambda over diam = %f arcsec', lam_over_diam)
    # The rest of the values should be given in units of the wavelength of the incident light.
    optics = galsim.OpticalPSF(lam_over_diam,
                               defocus = opt_defocus,
                               coma1 = opt_c1, coma2 = opt_c2,
                               astig1 = opt_a1, astig2 = opt_a2,
                               obscuration = opt_obscuration)
    logger.debug('Made optical PSF profile')

    # So far, our coordinate transformation between image and sky coordinates has been just a
    # scaling of the units between pixels and arcsec, which we have defined as the "pixel scale".
    # This is fine for many purposes, so we have made it easy to treat the coordinate systems
    # this way via the `scale` parameter to commands like drawImage.  However, in general, the
    # transformation between the two coordinate systems can be more complicated than that,
    # including distortions, rotations, variation in pixel size, and so forth.  GalSim can
    # model a number of different "World Coordinate System" (WCS) transformations.  See the
    # docstring for BaseWCS for more information.

    # In this case, we use a WCS that includes a distortion (specified as g1,g2 in this case),
    # which we call a ShearWCS.
    wcs = galsim.ShearWCS(scale=pixel_scale, shear=galsim.Shear(g1=wcs_g1, g2=wcs_g2))
    logger.debug('Made the WCS')

    # Next we will convolve the components in world coordinates.
    psf = galsim.Convolve([atmos, optics])
    final = galsim.Convolve([psf, gal])
    logger.debug('Convolved components into final profile')

    # This time we specify a particular size for the image rather than let GalSim
    # choose the size automatically.  GalSim has several kinds of images that it can use:
    #   ImageF uses 32-bit floats    (like a C float, aka numpy.float32)
    #   ImageD uses 64-bit floats    (like a C double, aka numpy.float64)
    #   ImageS uses 16-bit integers  (usually like a C short, aka numpy.int16)
    #   ImageI uses 32-bit integers  (usually like a C int, aka numpy.int32)
    # If you let the GalSim drawImage command create the image for you, it will create an ImageF.
    # However, you can make a different type if you prefer.  In this case, we still use
    # ImageF, since 32-bit floats are fine.  We just want to set the size explicitly.
    image = galsim.ImageF(image_size, image_size)
    # Draw the image with the given WCS.  Note that we use wcs rather than scale when the
    # WCS is more complicated than just a pixel scale.
    final.drawImage(image=image, wcs=wcs)

    # Also draw the effective PSF by itself and the optical PSF component alone.
    image_epsf = galsim.ImageF(image_size, image_size)
    psf.drawImage(image_epsf, wcs=wcs)

    # We also draw the optical part of the PSF at its own Nyquist-sampled pixel size
    # in order to better see the features of the (highly structured) profile.
    # In this case, we draw a "surface brightness image" using method='sb'.  Rather than
    # integrate the flux over the area of each pixel, this method just samples the surface
    # brightness value at the locations of the pixel centers.  We will encounter a few other
    # drawing methods as we go through this sequence of demos.  cf. demos 7, 8, 10, and 11.
    image_opticalpsf = optics.drawImage(method='sb')
    logger.debug('Made image of the profile')

    # Add a constant sky level to the image.
    image += sky_level * pixel_scale**2

    # This time, we use CCDNoise to model the real noise in a CCD image.  It takes a sky level,
    # gain, and read noise, so it can be a bit more realistic than the simpler GaussianNoise
    # or PoissonNoise that we used in demos 1 and 2.
    #
    # The gain is in units of e-/ADU.  Technically, one should also account for quantum efficiency
    # (QE) of the detector. An ideal CCD has one electron per incident photon, but real CCDs have
    # QE less than 1, so not every photon triggers an electron.  We are essentially folding
    # the quantum efficiency (and filter transmission and anything else like that) into the gain.
    # The read_noise value is given as e-/pixel.  This is modeled as a pure Gaussian noise
    # added to the image after applying the pure Poisson noise.
    image.addNoise(galsim.CCDNoise(rng, gain=gain, read_noise=read_noise))

    # Subtract off the sky.
    image -= sky_level * pixel_scale**2
    logger.debug('Added Gaussian and Poisson noise')

    # Write the images to files.
    file_name = os.path.join('output', 'demo3.fits')
    file_name_epsf = os.path.join('output','demo3_epsf.fits')
    file_name_opticalpsf = os.path.join('output','demo3_opticalpsf.fits')
    image.write(file_name)
    image_epsf.write(file_name_epsf)
    image_opticalpsf.write(file_name_opticalpsf)
    logger.info('Wrote image to %r', file_name)
    logger.info('Wrote effective PSF image to %r', file_name_epsf)
    logger.info('Wrote optics-only PSF image (Nyquist sampled) to %r', file_name_opticalpsf)

    # Check that the HSM package, which is bundled with GalSim, finds a good estimate
    # of the shear.
    results = galsim.hsm.EstimateShear(image, image_epsf)

    logger.info('HSM reports that the image has observed shape and size:')
    logger.info('    e1 = %.3f, e2 = %.3f, sigma = %.3f (pixels)', results.observed_shape.e1,
                results.observed_shape.e2, results.moments_sigma)
    logger.info('When carrying out Regaussianization PSF correction, HSM reports')
    logger.info('    e1, e2 = %.3f, %.3f',
                results.corrected_e1, results.corrected_e2)
    logger.info('Expected values in the limit that noise and non-Gaussianity are negligible:')
    # Convention for shear addition is to apply the second term initially followed by the first.
    # So this needs to be the WCS shear + the galaxy shape in that order.
    total_shape = galsim.Shear(g1=wcs_g1, g2=wcs_g2) + gal_shape
    logger.info('    e1, e2 = %.3f, %.3f', total_shape.e1, total_shape.e2)
Пример #7
0
    def __init__(self,
                 *,
                 rng,
                 im_width,
                 buff,
                 scale,
                 trunc=1,
                 variation_factor=10,
                 fwhm=0.8):
        self._rng = rng
        self._im_cen = (im_width - 1) / 2
        self._scale = scale
        self._tot_width = im_width + 2 * buff
        self._x_scale = 2.0 / self._tot_width / scale
        self._buff = buff
        self._variation_factor = variation_factor
        self._median_seeing = fwhm

        # set the power spectrum and PSF params
        # Heymans et al, 2012 found L0 ~= 3 arcmin, given as 180 arcsec here.
        def _pf(k):
            return (k**2 +
                    (1. / 180)**2)**(-11. / 6.) * np.exp(-(k * trunc)**2)

        self._ps = galsim.PowerSpectrum(e_power_function=_pf,
                                        b_power_function=_pf)
        ng = 128
        gs = max(self._tot_width * self._scale / ng, 1)
        self.ng = ng
        self.gs = gs
        seed = self._rng.randint(1, 2**30)
        self._ps.buildGrid(grid_spacing=gs,
                           ngrid=ng,
                           get_convergence=True,
                           variance=(0.01 * variation_factor)**2,
                           rng=galsim.BaseDeviate(seed))

        # cache the galsim LookupTable2D objects by hand to speed computations
        g1_grid, g2_grid, mu_grid = galsim.lensing_ps.theoryToObserved(
            self._ps.im_g1.array, self._ps.im_g2.array,
            self._ps.im_kappa.array)

        self._lut_g1 = galsim.table.LookupTable2D(
            self._ps.x_grid,
            self._ps.y_grid,
            g1_grid.T,
            edge_mode='wrap',
            interpolant=galsim.Lanczos(5))
        self._lut_g2 = galsim.table.LookupTable2D(
            self._ps.x_grid,
            self._ps.y_grid,
            g2_grid.T,
            edge_mode='wrap',
            interpolant=galsim.Lanczos(5))
        self._lut_mu = galsim.table.LookupTable2D(
            self._ps.x_grid,
            self._ps.y_grid,
            mu_grid.T - 1,
            edge_mode='wrap',
            interpolant=galsim.Lanczos(5))

        self._g1_mean = self._rng.normal() * 0.01 * variation_factor
        self._g2_mean = self._rng.normal() * 0.01 * variation_factor

        def _getlogmnsigma(mean, sigma):
            logmean = np.log(mean) - 0.5 * np.log(1 + sigma**2 / mean**2)
            logvar = np.log(1 + sigma**2 / mean**2)
            logsigma = np.sqrt(logvar)
            return logmean, logsigma

        lm, ls = _getlogmnsigma(self._median_seeing, 0.1)
        self._fwhm_central = np.exp(self._rng.normal() * ls + lm)
def main(argv):
    """
    Make images using model PSFs and galaxy cluster shear:
      - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles
        (like in demo10) and parametric fits to those profiles.  
      - The real galaxy images include some initial correlated noise from the original HST
        observation.  However, we whiten the noise of the final image so the final image has
        stationary Gaussian noise, rather than correlated noise.
    """
    logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
    global logger
    logger = logging.getLogger("mock_superbit_data")

    # Define some parameters we'll use below.
    # Normally these would be read in from some parameter file.
    global pixel_scale
    pixel_scale = 0.206                   # arcsec/pixel
    global image_xsize 
    image_xsize = 6665                    # size of image in pixels
    global image_ysize
    image_ysize = 4453                    # size of image in pixels
    global image_xsize_arcsec
    image_xsize_arcsec = image_xsize*pixel_scale # size of big image in each dimension (arcsec)
    global image_ysize_arcsec
    image_ysize_arcsec = image_ysize*pixel_scale # size of big image in each dimension (arcsec)
    global center_ra
    center_ra = 19.3*galsim.hours         # The RA, Dec of the center of the image on the sky
    global center_dec
    center_dec = -33.1*galsim.degrees
    global nobj
    nobj = 30                        # number of galaxies in entire field; this number matches empirical
    global nstars
    nstars = 1000                         # number of stars in the entire field
    global flux_scaling                  # Let's figure out the flux for a 0.5 m class telescope
    global tel_diam
    tel_diam = 0.5
    global psf_fwhm
    psf_fwhm = 0.30
    global lam
    lam = 625                            # Central wavelength for an airy disk
    global exp_time
    exp_time = 300
    global noise_variance
    global sky_level
   
    psf_path = '/Users/jemcclea/Research/SuperBIT/superbit-ngmix/scripts/outputs/psfex_output'
    global nfw                        # will store the NFWHalo information
    global cosmos_cat                 # will store the COSMOS catalog from which we draw objects
    
    # Set up the NFWHalo:
    mass=5E14              # Cluster mass (Msol/h)
    nfw_conc = 4           # Concentration parameter = virial radius / NFW scale radius
    nfw_z_halo = 0.17     # redshift of the halo --> correct!
    nfw_z_source = 0.6     # redshift of the lensed sources; COSMOS galaxies don't have any
    omega_m = 0.3          # Omega matter for the background cosmology.
    omega_lam = 0.7        # Omega lambda for the background cosmology.
    
    nfw = galsim.NFWHalo(mass=mass, conc=nfw_conc, redshift=nfw_z_halo,
                             omega_m=omega_m, omega_lam=omega_lam)
    logger.info('Set up NFW halo for lensing')

    # Read in galaxy catalog
    """
    cat_file_name = 'real_galaxy_catalog_23.5.fits'
    dir = 'data/COSMOS_23.5_training_sample'
    #cat_file_name = 'real_galaxy_catalog_23.5_example.fits'
    #dir = 'data'
    """
    cat_file_name = 'real_galaxy_catalog_25.2.fits'
    dir = 'data/COSMOS_25.2_training_sample/'

    cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir)
    logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects)
    
    # The catalog returns objects that are appropriate for HST in 1 second exposures.  So for our
    # telescope we scale up by the relative area and exposure time.
    # Will also multiply by the gain and relative pixel scales...
    hst_eff_area = 2.4**2 * (1.-0.33**2)
    sbit_eff_area = tel_diam**2 * (1.-0.3840**2) 
    #sbit_eff_area = tel_diam**2 * (1.-0.1**2) 
  
    ###
    ### LOOP OVER PSFs TO MAKE GROUPS OF IMAGES
    ### WITHIN EACH PSF, ITERATE 5 TIMES TO MAKE 5 SEPARATE IMAGES
    ###
    #all_psfs=glob.glob(psf_path+"/*.psf")
    #all_psfs=glob.glob(psf_path+"/*300*.psf")

    random_seed = 35609377914
    
    i=0
    for psf_filen in range(1):
        
        logger.info('Beginning PSF %s...'% psf_filen)
        rng = galsim.BaseDeviate(random_seed)

        timescale=str(exp_time)
       
        outname=''.join(['debug_0.3FWHM_gaussStar_',timescale,'_',str(i),'.fits'])
        truth_file_name=''.join(['./output-debug/truth_0.3FWHM_gaussStar_',timescale,'_',str(i),'.dat'])
        file_name = os.path.join('output-debug',outname)

        # Set up the image:
        if timescale=='150':
            print("Automatically detecting a 150s exposure image, setting flux scale and noise accordingly")
            #noise_variance=570               # ADU^2  (Just use simple Gaussian noise here.)
            noise_variance=570               # ADU^2  (Just use simple Gaussian noise here.) 
            sky_level = 51                   # ADU 
            exp_time=150.
           
        else:
            print("Automatically detecting a 300s exposure image, setting flux scale and noise accordingly")
            #noise_variance=400              # ADU^2  (Just use simple Gaussian noise here.) 
            noise_variance=400              # ADU^2  (Just use simple Gaussian noise here.) 
            sky_level = 106                 # ADU  
            exp_time=300.
            
        flux_scaling = (sbit_eff_area/hst_eff_area) * exp_time * 3.33 * (.206/.05)**2
                
        # Setting up a truth catalog
        names = [ 'gal_num', 'x_image', 'y_image',
                      'ra', 'dec', 'g1_nopsf', 'g2_nopsf','g1_meas', 'g2_meas', 'fwhm','final_sigmaSize',
                      'nopsf_sigmaSize','nfw_g1', 'nfw_g2', 'nfw_mu', 'redshift','flux', 'stamp_sum', 'noisevar']
        types = [ int, float, float, float, float, float,
                      float, float, float, float, float, float,
                      float, float,float, float, float,float, float]
        truth_catalog = galsim.OutputCatalog(names, types)

        # Set up the image:
        
        full_image = galsim.ImageF(image_xsize, image_ysize)
        full_image.fill(sky_level)
        full_image.setOrigin(0,0)
               
        # We keep track of how much noise is already in the image from the RealGalaxies.
        noise_image = galsim.ImageF(image_xsize, image_ysize)
        noise_image.setOrigin(0,0)

        
        # Make a slightly non-trivial WCS.  We'll use a slightly rotated coordinate system
        # and center it at the image center.        
        theta = 0.17 * galsim.degrees
        dudx = numpy.cos(theta) * pixel_scale
        dudy = -numpy.sin(theta) * pixel_scale
        dvdx = numpy.sin(theta) * pixel_scale
        dvdy = numpy.cos(theta) * pixel_scale
        
        image_center = full_image.true_center
        affine = galsim.AffineTransform(dudx, dudy, dvdx, dvdy, origin=full_image.true_center)
        sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec)
        
        wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec)
        full_image.wcs = wcs

        
        # Loop over galaxy objects:

        for k in range(nobj):
            time1 = time.time()
                
            # The usual random number generator using a different seed for each galaxy.
            ud = galsim.UniformDeviate(random_seed+k+1)

            try: 
                # make single galaxy object
                logger.debug("about to make stamp %d...",k)
                stamp,truth = make_a_galaxy(ud=ud,wcs=wcs,affine=affine)
                logger.debug("stamp %d is made",k)
                # Find the overlapping bounds:
                bounds = stamp.bounds & full_image.bounds
                    
                # We need to keep track of how much variance we have currently in the image, so when
                # we add more noise, we can omit what is already there.
                noise_image[bounds] += truth.variance
            
                # Finally, add the stamp to the full image.
                full_image[bounds] += stamp[bounds]
                logger.debug("stamp %d added to full image",k)
                time2 = time.time()
                tot_time = time2-time1
                logger.info('Galaxy %d positioned relative to center t=%f s',
                                k, tot_time)
                try:
                    g1_real=stamp.FindAdaptiveMom().observed_shape.g1 
                    g2_real=stamp.FindAdaptiveMom().observed_shape.g2
                except:
                    g1_real=-9999.
                    g2_real=-9999.
                logger.debug("Galaxy %d made it past g1/g2_real stage",k)
                sum_flux=numpy.sum(stamp.array)
                row = [ k,truth.x, truth.y, truth.ra, truth.dec, truth.g1_nopsf, truth.g2_nopsf, g1_real, g2_real, truth.fwhm, truth.final_sigmaSize, truth.nopsf_sigmaSize,truth.g1,truth.g2, truth.mu, truth.z, truth.flux, sum_flux, truth.variance]
                truth_catalog.addRow(row)
                logger.debug("row for galaxy %d added to truth catalog\n\n",k)
                
            except:
                logger.info('Galaxy %d has failed, skipping...',k)
                #pdb.set_trace()
                pass
        
        ###### Inject cluster galaxy objects:
        
        random_seed=892465352
        center_coords = galsim.CelestialCoord(center_ra,center_dec)
        centerpix = wcs.toImage(center_coords)

        for k in range(40):
            time1 = time.time()
            # The usual random number generator using a different seed for each galaxy.
            ud = galsim.UniformDeviate(random_seed+k+1)
            
            try: 
                # make single galaxy object
                cluster_stamp,truth = make_cluster_galaxy(ud=ud,wcs=wcs,affine=affine,centerpix=centerpix)                
                # Find the overlapping bounds:
                bounds = cluster_stamp.bounds & full_image.bounds
                
                # We need to keep track of how much variance we have currently in the image, so when
                # we add more noise, we can omit what is already there.
            
                noise_image[bounds] += truth.variance
            
                # Finally, add the stamp to the full image.
                
                full_image[bounds] += cluster_stamp[bounds]
                time2 = time.time()
                tot_time = time2-time1
                logger.info('Cluster galaxy %d positioned relative to center t=%f s',
                                k, tot_time)
            except:
                logger.info('Cluster galaxy %d has failed, skipping...',k)
                pdb.set_trace()
        
        ####
        ### Now repeat process for stars!
        ####
    
        random_seed_stars=2308173501873
        
        for k in range(nstars):
            time1 = time.time()
            ud = galsim.UniformDeviate(random_seed_stars+k+1)
            try:

                star_stamp,truth=make_a_star(ud=ud,wcs=wcs,affine=affine)
                bounds = star_stamp.bounds & full_image.bounds
                logger.debug("star stamp & truth catalog made for star %d" %k)
                # Add the stamp to the full image.
                full_image[bounds] += star_stamp[bounds]
            
                time2 = time.time()
                tot_time = time2-time1
            
                logger.info('Star %d: positioned relative to center, t=%f s',
                                k,  tot_time)

                try:
                    g1_real=star_stamp.FindAdaptiveMom().observed_shape.g1
                    g2_real=star_stamp.FindAdaptiveMom().observed_shape.g2
                except:
                    g1_real = -9999.
                    g2_real = -9999.
                this_var = -9999.
                sum_flux=numpy.sum(star_stamp.array)
                row = [ k,truth.x, truth.y, truth.ra, truth.dec, 
                           truth.g1_nopsf, truth.g2_nopsf, g1_real, g2_real,
                           truth.fwhm, truth.final_sigmaSize, truth.nopsf_sigmaSize, truth.g1,
                            truth.g2, truth.mu, truth.z, truth.flux, sum_flux, truth.variance]
                truth_catalog.addRow(row)
                            
            except:
                logger.info('Star %d has failed, skipping...',k)
                pdb.set_trace()
                
                    
            
        # We already have some noise in the image, but it isn't uniform.  So the first thing to do is
        # to make the Gaussian noise uniform across the whole image.
        
        #max_current_variance = numpy.max(noise_image.array)
        #noise_image = max_current_variance - noise_image
       
        vn = galsim.VariableGaussianNoise(rng, noise_image)
        full_image.addNoise(vn)

        
        # Now max_current_variance is the noise level across the full image.  We don't want to add that
        # twice, so subtract off this much from the intended noise that we want to end up in the image.
        #noise_variance -= max_current_variance

        # Now add Gaussian noise with this variance to the final image.
        noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance))
        full_image.addNoise(noise)
        logger.info('Added noise to final output image')

        
        # Now write the image to disk.  
        full_image.write(file_name)

        
        # Add a FLUXSCL keyword for later stacking
        this_hdu=astropy.io.fits.open(file_name)
        this_hdu[0].header['FLXSCALE'] = 300.0/exp_time
        this_hdu.writeto(file_name,overwrite='True')
        logger.info('Wrote image to %r',file_name)

        
        # Write truth catalog to file. 
        truth_catalog.write(truth_file_name)
        
        i=i+1
        logger.info('completed run %d for psf %s',i,psf_filen)
        
    logger.info('completed all images')
Пример #9
0
def generate_sample(args):
    """Generate one valid sample and write it to the destination arrays.

    Args (packed as a tuple):
        i: index of the current sample
        sersic_index: an optional value of the Sersic Index (default: None, i.e. random)
        psf_re: an optional PSF (default: None, i.e. random)
        noise: an optional Gaussian noise level (default: None, i.e. random)
    """
    # Unpack the arguments
    i, sersic_index, psf_re, noise = args
    global counter

    # Loop until a sample satisfies all criteria
    while True:

        with counter.get_lock():
            # Increment the global iteration counter
            counter.value += 1
            # Initialize the random number generators
            random.seed(random_seed + counter.value)
            rng = galsim.BaseDeviate(random_seed + counter.value + 1)

        # SF moffat scale radius in arcsec: fixed vs random
        psf_re = random.uniform(0.5, 1) if psf_re is None else psf_re

        # Gaussian noise level: fixed vs random
        noise = random.randint(200, 400) if noise is None else noise

        # Sersic index: discrete vs continuous
        bulge_n = random.uniform(0.5,
                                 6) if sersic_index is None else sersic_index

        # Sersic radius, unit arcsec
        bulge_re = random.uniform(0.1, 0.6)

        # q is ellipticity and beta is orientation.
        # You could directly predict q and beta but there would be a discontiniuty issue
        # for beta. A jump from 180 degree to 1 degree.
        # radial sampling for g1 and g2 -reduced shear -> ellipticiy and orientation
        A = random.uniform(
            0, 0.67)  # gal_q =b/a will ranges in (0.2,1) & A=1-q / 1+q
        gal_q = (1 - A) / (1 + A)
        gal_beta = random.uniform(0, 3.14)  # radians
        g_1 = A * np.cos(2 * gal_beta)
        g_2 = A * np.sin(2 * gal_beta)

        gal_flux = 1e5 * random.uniform(0.3, 4)

        gal = galsim.Sersic(bulge_n, half_light_radius=bulge_re)
        gal = gal.withFlux(gal_flux)
        gal = gal.shear(g1=g_1, g2=g_2)
        psf = galsim.Moffat(beta=psf_beta, flux=1.0, fwhm=psf_re)
        final = galsim.Convolve([psf, gal])
        image = galsim.ImageF(image_size, image_size, scale=pixel_scale)
        final.drawImage(image=image)
        image_nonoise = copy.deepcopy(image.array)

        # signal to noise ratio
        snr = np.sqrt((image.array**2).sum()) / noise

        image.addNoise(galsim.PoissonNoise(rng, sky_level=0.0))
        # noise map for bkgr gaussian noise
        image.addNoise(galsim.GaussianNoise(rng, sigma=noise))

        # Optionally: generate a PSF image
        # psf_image = galsim.ImageF(image_size, image_size, scale=pixel_scale)
        # psf.drawImage(image=psf_image)

        # After generating the data, preserve only that with SNR [10, 100]
        if 10 <= snr <= 100:
            break

    data["img"][i] = image.array  # final noised image
    data["img_nonoise"][i] = image_nonoise  # noiseless image
    # Optionally: save the PSF image
    # data['psf_img'][i] = psf_image.array
    data["gal_flux"][i] = gal_flux
    data["bulge_re"][i] = bulge_re
    data["bulge_n"][i] = bulge_n
    data["gal_q"][i] = gal_q
    data["gal_beta"][i] = gal_beta
    data["psf_r"][i] = psf_re
    data["snr"][i] = snr
    data["sigma"][i] = noise
    data["g_1"][i] = g_1
    data["g_2"][i] = g_2
Пример #10
0
def main(argv):
    """
    Make a fits image cube using parameters from an input catalog
      - The number of images in the cube matches the number of rows in the catalog.
      - Each image size is computed automatically by GalSim based on the Nyquist size.
      - Only galaxies.  No stars.
      - PSF is Moffat
      - Each galaxy is bulge plus disk: deVaucouleurs + Exponential.
      - A fraction of the disk flux is placed into point sources, which can model
        knots of star formation.
      - The catalog's columns are:
         0 PSF beta (Moffat exponent)
         1 PSF FWHM
         2 PSF e1
         3 PSF e2
         4 PSF trunc
         5 Disc half-light-radius
         6 Disc e1
         7 Disc e2
         8 Bulge half-light-radius
         9 Bulge e1
        10 Bulge e2
        11 Galaxy dx (the two components have same center)
        12 Galaxy dy
      - Applied shear is the same for each galaxy
      - Noise is Poisson using a nominal sky value of 1.e6
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo4")

    # Define some parameters we'll use below and make directories if needed.
    cat_file_name = os.path.join('input', 'galsim_default_input.asc')
    if not os.path.isdir('output'):
        os.mkdir('output')
    multi_file_name = os.path.join('output', 'multi.fits')

    random_seed = 8241573
    sky_level = 1.e6  # ADU / arcsec^2
    pixel_scale = 1.0  # arcsec / pixel  (size units in input catalog are pixels)
    gal_flux = 1.e6  # arbitrary choice, makes nice (not too) noisy images
    gal_g1 = -0.009  #
    gal_g2 = 0.011  #

    # the fraction of flux in each component
    # 40% is in the bulge, 60% in a disk.  70% of that disk light is placed
    # into point sources distributed as a random walk

    bulge_frac = 0.4
    disk_frac = 0.6
    knot_frac = 0.42
    smooth_disk_frac = 0.18

    # number of knots of star formation.  To simulate a nice irregular (all the
    # flux is in knots) we find ~100 is a minimum number needed, but we will
    # just use 10 here to make the demo run fast.

    n_knots = 10

    xsize = 64  # pixels
    ysize = 64  # pixels

    logger.info('Starting demo script 4 using:')
    logger.info('    - parameters taken from catalog %r', cat_file_name)
    logger.info('    - Moffat PSF (parameters from catalog)')
    logger.info('    - pixel scale = %.2f', pixel_scale)
    logger.info('    - Bulge + Disc galaxies (parameters from catalog)')
    logger.info('    - 100 Point sources, distributed as random walk')
    logger.info('    - Applied gravitational shear = (%.3f,%.3f)', gal_g1,
                gal_g2)
    logger.info('    - Poisson noise (sky level = %.1e).', sky_level)

    # Read in the input catalog
    cat = galsim.Catalog(cat_file_name)

    # save a list of the galaxy images in the "images" list variable:
    images = []
    for k in range(cat.nobjects):
        # Initialize the (pseudo-)random number generator that we will be using below.
        # Use a different random seed for each object to get different noise realizations.
        # Using sequential random seeds here is safer than it sounds.  We use Mersenne Twister
        # random number generators that are designed to be used with this kind of seeding.
        # However, to be extra safe, we actually initialize one random number generator with this
        # seed, generate and throw away two random values with that, and then use the next value
        # to seed a completely different Mersenne Twister RNG.  The result is that successive
        # RNGs created this way produce very independent random number streams.
        rng = galsim.BaseDeviate(random_seed + k + 1)

        # Take the Moffat beta from the first column (called 0) of the input catalog:
        # Note: cat.get(k,col) returns a string.  To get the value as a float, use either
        #       cat.getFloat(k,col) or float(cat.get(k,col))
        beta = cat.getFloat(k, 0)
        # A Moffat's size may be either scale_radius, fwhm, or half_light_radius.
        # Here we use fwhm, taking from the catalog as well.
        fwhm = cat.getFloat(k, 1)
        # A Moffat profile may be truncated if desired
        # The units for this are expected to be arcsec (or specifically -- whatever units
        # you are using for all the size values as defined by the pixel_scale).
        trunc = cat.getFloat(k, 4)
        # Note: You may omit the flux, since the default is flux=1.
        psf = galsim.Moffat(beta=beta, fwhm=fwhm, trunc=trunc)

        # Take the (e1, e2) shape parameters from the catalog as well.
        psf = psf.shear(e1=cat.getFloat(k, 2), e2=cat.getFloat(k, 3))

        # Galaxy is a bulge + disk(+knots) with parameters taken from the catalog:

        # put some fraction of the disk light into knots of star formation

        disk_hlr = cat.getFloat(k, 5)
        disk_e1 = cat.getFloat(k, 6)
        disk_e2 = cat.getFloat(k, 7)
        bulge_hlr = cat.getFloat(k, 8)
        bulge_e1 = cat.getFloat(k, 9)
        bulge_e2 = cat.getFloat(k, 10)

        smooth_disk = galsim.Exponential(flux=smooth_disk_frac,
                                         half_light_radius=disk_hlr)

        knots = galsim.RandomKnots(n_knots,
                                   half_light_radius=disk_hlr,
                                   flux=knot_frac,
                                   rng=rng)

        disk = galsim.Add([smooth_disk, knots])
        disk = disk.shear(e1=disk_e1, e2=disk_e2)

        # the rest of the light goes into the bulge
        bulge = galsim.DeVaucouleurs(flux=bulge_frac,
                                     half_light_radius=bulge_hlr)
        bulge = bulge.shear(e1=bulge_e1, e2=bulge_e2)

        # The flux of an Add object is the sum of the component fluxes.
        # Note that in demo3.py, a similar addition was performed by the binary operator "+".
        gal = galsim.Add([disk, bulge])

        # This flux may be overridden by withFlux.  The relative fluxes of the components
        # remains the same, but the total flux is set to gal_flux.
        gal = gal.withFlux(gal_flux)
        gal = gal.shear(g1=gal_g1, g2=gal_g2)

        # The center of the object is normally placed at the center of the postage stamp image.
        # You can change that with shift:
        gal = gal.shift(dx=cat.getFloat(k, 11), dy=cat.getFloat(k, 12))

        final = galsim.Convolve([psf, gal])

        # Draw the profile
        image = galsim.ImageF(xsize, ysize)
        final.drawImage(image, scale=pixel_scale)

        # Add Poisson noise to the image:
        image.addNoise(galsim.PoissonNoise(rng, sky_level * pixel_scale**2))

        logger.info('Drew image for object at row %d in the input catalog' % k)

        # Add the image to our list of images
        images.append(image)

    # Now write the images to a multi-extension fits file.  Each image will be in its own HDU.
    galsim.fits.writeMulti(images, multi_file_name)
    logger.info('Images written to multi-extension fits file %r',
                multi_file_name)
Пример #11
0
# 1. Redistributions of source code must retain the above copyright notice, this
#    list of conditions, and the disclaimer given in the accompanying LICENSE
#    file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
#    this list of conditions, and the disclaimer given in the documentation
#    and/or other materials provided with the distribution.
#
import numpy as np
import galsim

# The reference data for this test was generated with this script, using the version of the code on
# branch #304 at commit df4b15e.

# random seed, etc.
outfile = 'shearfield_reference.dat'
rng = galsim.BaseDeviate(14136)

# make grid params
n = 10
dx = 1.

# define power spectrum
ps = galsim.lensing.PowerSpectrum(e_power_function="k**0.5",
                                  b_power_function="k")
# get shears and convergences
g1, g2, kappa = ps.buildGrid(grid_spacing=dx,
                             ngrid=n,
                             rng=rng,
                             get_convergence=True)

# write to file
Пример #12
0
def main(argv):
    """
    A little bit more sophisticated, but still pretty basic:
      - Use a sheared, exponential profile for the galaxy.
      - Convolve it by a circular Moffat PSF.
      - Add Poisson noise to the image.
    """
    # In non-script code, use getLogger(__name__) at module scope instead.    
    logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
    logger = logging.getLogger("demo2") 

    gal_flux = 1.e5    # counts
    gal_r0 = 2.7       # arcsec
    g1 = 0.1           #
    g2 = 0.2           #
    psf_beta = 5       #
    psf_re = 1.0       # arcsec
    pixel_scale = 0.2  # arcsec / pixel
    sky_level = 2.5e3  # counts / arcsec^2

    # This time use a particular seed, so the image is deterministic.
    # This is the same seed that is used in demo2.yaml, which means the images produced
    # by the two methods will be precisely identical.
    random_seed = 1534225


    logger.info('Starting demo script 2 using:')
    logger.info('    - sheared (%.2f,%.2f) exponential galaxy (flux = %.1e, scale radius = %.2f),',
                g1, g2, gal_flux, gal_r0)
    logger.info('    - circular Moffat PSF (beta = %.1f, re = %.2f),', psf_beta, psf_re)
    logger.info('    - pixel scale = %.2f,', pixel_scale)
    logger.info('    - Poisson noise (sky level = %.1e).', sky_level)

    # Initialize the (pseudo-)random number generator that we will be using below.
    # For a technical reason that will be explained later (demo9.py), we add 1 to the 
    # given random seed here.
    rng = galsim.BaseDeviate(random_seed+1)

    # Define the galaxy profile.
    gal = galsim.Exponential(flux=gal_flux, scale_radius=gal_r0)

    # Shear the galaxy by some value.
    # There are quite a few ways you can use to specify a shape.
    # q, beta      Axis ratio and position angle: q = b/a, 0 < q < 1
    # e, beta      Ellipticity and position angle: |e| = (1-q^2)/(1+q^2)
    # g, beta      ("Reduced") Shear and position angle: |g| = (1-q)/(1+q)
    # eta, beta    Conformal shear and position angle: eta = ln(1/q)
    # e1,e2        Ellipticity components: e1 = e cos(2 beta), e2 = e sin(2 beta)
    # g1,g2        ("Reduced") shear components: g1 = g cos(2 beta), g2 = g sin(2 beta)
    # eta1,eta2    Conformal shear components: eta1 = eta cos(2 beta), eta2 = eta sin(2 beta)
    gal = gal.shear(g1=g1, g2=g2)
    logger.debug('Made galaxy profile')

    # Define the PSF profile.
    psf = galsim.Moffat(beta=psf_beta, flux=1., half_light_radius=psf_re)
    logger.debug('Made PSF profile')

    # Final profile is the convolution of these.
    final = galsim.Convolve([gal, psf])
    logger.debug('Convolved components into final profile')

    # Draw the image with a particular pixel scale.
    image = final.drawImage(scale=pixel_scale)
    # The "effective PSF" is the PSF as drawn on an image, which includes the convolution
    # by the pixel response.  We label it epsf here.
    image_epsf = psf.drawImage(scale=pixel_scale)
    logger.debug('Made image of the profile')

    # To get Poisson noise on the image, we will use a class called PoissonNoise.
    # However, we want the noise to correspond to what you would get with a significant
    # flux from tke sky.  This is done by telling PoissonNoise to add noise from a
    # sky level in addition to the counts currently in the image.
    #
    # One wrinkle here is that the PoissonNoise class needs the sky level in each pixel,
    # while we have a sky_level in counts per arcsec^2.  So we need to convert:
    sky_level_pixel = sky_level * pixel_scale**2
    noise = galsim.PoissonNoise(rng, sky_level=sky_level_pixel)
    image.addNoise(noise)
    logger.debug('Added Poisson noise')

    # Write the image to a file.
    if not os.path.isdir('output'):
        os.mkdir('output')
    file_name = os.path.join('output', 'demo2.fits')
    file_name_epsf = os.path.join('output','demo2_epsf.fits')
    image.write(file_name)
    image_epsf.write(file_name_epsf)
    logger.info('Wrote image to %r',file_name)
    logger.info('Wrote effective PSF image to %r',file_name_epsf)

    results = galsim.hsm.EstimateShear(image, image_epsf)

    logger.info('HSM reports that the image has observed shape and size:')
    logger.info('    e1 = %.3f, e2 = %.3f, sigma = %.3f (pixels)', results.observed_shape.e1,
                results.observed_shape.e2, results.moments_sigma)
    logger.info('When carrying out Regaussianization PSF correction, HSM reports distortions')
    logger.info('    e1, e2 = %.3f, %.3f', 
                results.corrected_e1, results.corrected_e2)
    logger.info('Expected values in the limit that noise and non-Gaussianity are negligible:')
    exp_shear = galsim.Shear(g1=g1, g2=g2)
    logger.info('    g1, g2 = %.3f, %.3f', exp_shear.e1,exp_shear.e2)
Пример #13
0
    def __init__(self,
                 image,
                 x_interpolant=None,
                 k_interpolant=None,
                 normalization='flux',
                 scale=None,
                 wcs=None,
                 flux=None,
                 pad_factor=4.,
                 noise_pad_size=0,
                 noise_pad=0.,
                 rng=None,
                 pad_image=None,
                 calculate_stepk=True,
                 calculate_maxk=True,
                 use_cache=True,
                 use_true_center=True,
                 offset=None,
                 gsparams=None,
                 dx=None,
                 _force_stepk=0.,
                 _force_maxk=0.,
                 _serialize_stepk=None,
                 _serialize_maxk=None,
                 hdu=None):

        # Check for obsolete dx parameter
        if dx is not None and scale is None:
            from galsim.deprecated import depr
            depr('dx', 1.1, 'scale')
            scale = dx

        # If the "image" is not actually an image, try to read the image as a file.
        if not isinstance(image, galsim.Image):
            image = galsim.fits.read(image, hdu=hdu)

        # make sure image is really an image and has a float type
        if image.dtype != np.float32 and image.dtype != np.float64:
            raise ValueError(
                "Supplied image does not have dtype of float32 or float64!")

        # it must have well-defined bounds, otherwise seg fault in SBInterpolatedImage constructor
        if not image.bounds.isDefined():
            raise ValueError("Supplied image does not have bounds defined!")

        # check what normalization was specified for the image: is it an image of surface
        # brightness, or flux?
        if not normalization.lower() in ("flux", "f", "surface brightness",
                                         "sb"):
            raise ValueError((
                "Invalid normalization requested: '%s'. Expecting one of 'flux', "
                + "'f', 'surface brightness', or 'sb'.") % normalization)

        # set up the interpolants if none was provided by user, or check that the user-provided ones
        # are of a valid type
        if x_interpolant is None:
            self.x_interpolant = galsim.Quintic(tol=1e-4)
        else:
            self.x_interpolant = galsim.utilities.convert_interpolant(
                x_interpolant)
        if k_interpolant is None:
            self.k_interpolant = galsim.Quintic(tol=1e-4)
        else:
            self.k_interpolant = galsim.utilities.convert_interpolant(
                k_interpolant)

        # Store the image as an attribute and make sure we don't change the original image
        # in anything we do here.  (e.g. set scale, etc.)
        self.image = image.view()
        self.use_cache = use_cache

        # Set the wcs if necessary
        if scale is not None:
            if wcs is not None:
                raise TypeError(
                    "Cannot provide both scale and wcs to InterpolatedImage")
            self.image.wcs = galsim.PixelScale(scale)
        elif wcs is not None:
            if not isinstance(wcs, galsim.BaseWCS):
                raise TypeError(
                    "wcs parameter is not a galsim.BaseWCS instance")
            self.image.wcs = wcs
        elif self.image.wcs is None:
            raise ValueError(
                "No information given with Image or keywords about pixel scale!"
            )

        # Set up the GaussianDeviate if not provided one, or check that the user-provided one is
        # of a valid type.
        if rng is None:
            if noise_pad: rng = galsim.BaseDeviate()
        elif not isinstance(rng, galsim.BaseDeviate):
            raise TypeError(
                "rng provided to InterpolatedImage constructor is not a BaseDeviate"
            )

        # Check that given pad_image is valid:
        if pad_image:
            if isinstance(pad_image, str):
                pad_image = galsim.fits.read(pad_image)
            if not isinstance(pad_image, galsim.Image):
                raise ValueError("Supplied pad_image is not an Image!")
            if pad_image.dtype != np.float32 and pad_image.dtype != np.float64:
                raise ValueError(
                    "Supplied pad_image is not one of the allowed types!")

        # Check that the given noise_pad is valid:
        try:
            noise_pad = float(noise_pad)
        except:
            pass
        if isinstance(noise_pad, float):
            if noise_pad < 0.:
                raise ValueError("Noise variance cannot be negative!")
        # There are other options for noise_pad, the validity of which will be checked in
        # the helper function self.buildNoisePadImage()

        # This will be passed to SBInterpolatedImage, so make sure it is the right type.
        pad_factor = float(pad_factor)
        if pad_factor <= 0.:
            raise ValueError("Invalid pad_factor <= 0 in InterpolatedImage")

        if use_true_center:
            im_cen = self.image.bounds.trueCenter()
        else:
            im_cen = self.image.bounds.center()

        local_wcs = self.image.wcs.local(image_pos=im_cen)
        self.min_scale = local_wcs.minLinearScale()
        self.max_scale = local_wcs.maxLinearScale()

        # Make sure the image fits in the noise pad image:
        if noise_pad_size:
            import math
            # Convert from arcsec to pixels according to the local wcs.
            # Use the minimum scale, since we want to make sure noise_pad_size is
            # as large as we need in any direction.
            noise_pad_size = int(math.ceil(noise_pad_size / self.min_scale))
            # Round up to a good size for doing FFTs
            noise_pad_size = galsim._galsim.goodFFTSize(noise_pad_size)
            if noise_pad_size <= min(self.image.array.shape):
                # Don't need any noise padding in this case.
                noise_pad_size = 0
            elif noise_pad_size < max(self.image.array.shape):
                noise_pad_size = max(self.image.array.shape)

        # See if we need to pad out the image with either a pad_image or noise_pad
        if noise_pad_size:
            new_pad_image = self.buildNoisePadImage(noise_pad_size, noise_pad,
                                                    rng)

            if pad_image:
                # if both noise_pad and pad_image are set, then we need to build up a larger
                # pad_image and place the given pad_image in the center.

                # We will change the bounds here, so make a new view to avoid modifying the
                # input pad_image.
                pad_image = pad_image.view()
                pad_image.setCenter(0, 0)
                new_pad_image.setCenter(0, 0)
                if new_pad_image.bounds.includes(pad_image.bounds):
                    new_pad_image[pad_image.bounds] = pad_image
                else:
                    new_pad_image = pad_image

            pad_image = new_pad_image

        elif pad_image:
            # Just make sure pad_image is the right type
            pad_image = galsim.Image(pad_image, dtype=image.dtype)

        # Now place the given image in the center of the padding image:
        if pad_image:
            pad_image.setCenter(0, 0)
            self.image.setCenter(0, 0)
            if pad_image.bounds.includes(self.image.bounds):
                pad_image[self.image.bounds] = self.image
                pad_image.wcs = self.image.wcs
            else:
                # If padding was smaller than original image, just use the original image.
                pad_image = self.image
        else:
            pad_image = self.image

        # GalSim cannot automatically know what stepK and maxK are appropriate for the
        # input image.  So it is usually worth it to do a manual calculation (below).
        #
        # However, there is also a hidden option to force it to use specific values of stepK and
        # maxK (caveat user!).  The values of _force_stepk and _force_maxk should be provided in
        # terms of physical scale, e.g., for images that have a scale length of 0.1 arcsec, the
        # stepK and maxK should be provided in units of 1/arcsec.  Then we convert to the 1/pixel
        # units required by the C++ layer below.  Also note that profile recentering for even-sized
        # images (see the ._fix_center step below) leads to automatic reduction of stepK slightly
        # below what is provided here, while maxK is preserved.
        if _force_stepk > 0.:
            calculate_stepk = False
            _force_stepk *= self.min_scale
        if _force_maxk > 0.:
            calculate_maxk = False
            _force_maxk *= self.max_scale

        # Due to floating point rounding errors, for pickling it's necessary to store the exact
        # _force_maxk and _force_stepk used to create the SBInterpolatedImage, as opposed to the
        # values before being scaled by self.min_scale and self.max_scale.  So we do that via the
        # _serialize_maxk and _serialize_stepk hidden kwargs, which should only get used during
        # pickling.
        if _serialize_stepk is not None:
            calculate_stepk = False
            _force_stepk = _serialize_stepk
        if _serialize_maxk is not None:
            calculate_maxk = False
            _force_maxk = _serialize_maxk

        # Save these values for pickling
        self._pad_image = pad_image
        self._pad_factor = pad_factor
        self._gsparams = gsparams

        # Make the SBInterpolatedImage out of the image.
        sbii = galsim._galsim.SBInterpolatedImage(pad_image.image,
                                                  self.x_interpolant,
                                                  self.k_interpolant,
                                                  pad_factor, _force_stepk,
                                                  _force_maxk, gsparams)

        # I think the only things that will mess up if getFlux() == 0 are the
        # calculateStepK and calculateMaxK functions, and rescaling the flux to some value.
        if (calculate_stepk or calculate_maxk
                or flux is not None) and sbii.getFlux() == 0.:
            raise RuntimeError(
                "This input image has zero total flux. "
                "It does not define a valid surface brightness profile.")

        if calculate_stepk:
            if calculate_stepk is True:
                sbii.calculateStepK()
            else:
                # If not a bool, then value is max_stepk
                sbii.calculateStepK(max_stepk=calculate_stepk)
        if calculate_maxk:
            if calculate_maxk is True:
                sbii.calculateMaxK()
            else:
                # If not a bool, then value is max_maxk
                sbii.calculateMaxK(max_maxk=calculate_maxk)

        # If the user specified a surface brightness normalization for the input Image, then
        # need to rescale flux by the pixel area to get proper normalization.
        if flux is None and normalization.lower() in [
                'surface brightness', 'sb'
        ]:
            flux = sbii.getFlux() * local_wcs.pixelArea()

        # Save this intermediate profile
        self._sbii = sbii
        self._stepk = sbii.stepK() / self.min_scale
        self._maxk = sbii.maxK() / self.max_scale
        self._flux = flux

        self._serialize_stepk = sbii.stepK()
        self._serialize_maxk = sbii.maxK()

        prof = GSObject(sbii)

        # Make sure offset is a PositionD
        offset = prof._parse_offset(offset)

        # Apply the offset, and possibly fix the centering for even-sized images
        # Note reverse=True, since we want to fix the center in the opposite sense of what the
        # draw function does.
        prof = prof._fix_center(self.image.array.shape,
                                offset,
                                use_true_center,
                                reverse=True)

        # Save the offset we will need when pickling.
        if hasattr(prof, 'offset'):
            self._offset = -prof.offset
        else:
            self._offset = None

        # Bring the profile from image coordinates into world coordinates
        prof = local_wcs.toWorld(prof)

        # If the user specified a flux, then set to that flux value.
        if flux is not None:
            prof = prof.withFlux(float(flux))

        # Now, in order for these to pickle correctly if they are the "original" object in a
        # Transform object, we need to hide the current transformation.  An easy way to do that
        # is to hide the SBProfile in an SBAdd object.
        sbp = galsim._galsim.SBAdd([prof.SBProfile])

        GSObject.__init__(self, sbp)
Пример #14
0
def compare_dft_vs_photon_object(gsobject, psf_object=None, rng=None, pixel_scale=1., size=512,
                                 wmult=4., abs_tol_ellip=1.e-5, abs_tol_size=1.e-5,
                                 n_trials_per_iter=32, n_photons_per_trial=1e7, moments=True,
                                 hsm=False):
    """Take an input object (with optional PSF) and render it in two ways comparing results at high
    precision.

    Using both photon shooting (via drawShoot()) and Discrete Fourier Transform (via draw()) to
    render images, we compare the numerical values of adaptive moments estimates of size and
    ellipticity to check consistency.

    This function takes actual GSObjects as its input, but because these are not yet picklable this
    means that the internals cannot be parallelized using the Python multiprocessing module.  For
    a parallelized function, that instead uses a config dictionary to specify the test objects, see
    the function compare_dft_vs_photon_config() in this module.

    We generate successive sets of `n_trials_per_iter` photon-shot images, using 
    `n_photons_per_trial` photons in each image, until the standard error on the mean absolute size
    and ellipticity drop below `abs_tol_size` and `abs_tol_ellip`.  We then output a
    ComparisonShapeData object which stores the results.

    Note that `n_photons_per_trial` should be large (>~ 1e6) to ensure that any biases detected
    between the photon shooting and DFT-drawn images are due to numerical differences rather than
    biases on adaptive moments due to noise itself, a generic feature in this work.  This can be
    verified with a convergence test.

    @param gsobject         The GSObject for which this test is to be performed (prior
                            to PSF convolution if a PSF is also supplied via `psf_object`).
                            Note that this function will automatically handle integration over
                            a Pixel of width `pixel_scale`, so a Pixel should not be included in
                            the supplied `gsobject` (unless you really mean to include it, which
                            will be very rare in normal usage).
    @param psf_object       Optional additional PSF for tests of convolved objects, also a
                            GSObject.  Note that this function will automatically handle
                            integration over a Pixel of width `pixel_scale`, so this should not
                            be included in the supplied `psf_object`.  [default: None]
    @param rng              A BaseDeviate or derived deviate class instance to provide
                            the pseudo random numbers for the photon shooting.  [default: None]
    @param pixel_scale      The pixel scale to use in the test images. [default: 1]
    @param size             The size of the images in the rendering tests - all test images
                            are currently square. [default: 512]
    @param wmult            The `wmult` parameter used in .draw() (see the GSObject .draw()
                            method docs via `help(galsim.GSObject.draw)` for more details).
                            [default: 4]
    @param abs_tol_ellip    The test will keep iterating, adding ever greater numbers of
                            trials, until estimates of the 1-sigma standard error on mean 
                            ellipticity moments from photon-shot images are smaller than this
                            param value. [default: 1.e-5]
    @param abs_tol_size     The test will keep iterating, adding ever greater numbers of
                            trials, until estimates of the 1-sigma standard error on mean 
                            size moments from photon-shot images are smaller than this param
                            value. [default: 1.e-5]
    @param n_trials_per_iter  Number of trial images used to estimate (or successively
                            re-estimate) the standard error on the delta quantities above for
                            each iteration of the tests. [default: 32]
    @param n_photons_per_trial  Number of photons shot in drawShoot() for each trial.  This should
                            be large enough that any noise bias (a.k.a. noise rectification
                            bias) on moments estimates is small. [default: 1e7]
    @param moments          Set True to compare rendered images using AdaptiveMoments
                            estimates of simple observed estimates. [default: True]
    @param hsm              Should the rendered images be compared using HSM shear estimates?
                            (i.e. including a PSF correction for shears) [not implemented]
    """
    import sys
    import logging
    import time     

    # Some sanity checks on inputs
    if hsm is True:
        if psf_object is None:
            raise ValueError('An input psf_object is required for HSM shear estimate testing.')
        else:
            # Raise an apologetic exception about the HSM not yet being implemented!
            raise NotImplementedError('Sorry, HSM tests not yet implemented!')

    if rng is None:
        rng = galsim.BaseDeviate()

    # Then define some convenience functions for handling lists and multiple trial operations
    def _mean(array_like):
        return np.mean(np.asarray(array_like))

    def _stderr(array_like):
        return np.std(np.asarray(array_like)) / np.sqrt(len(array_like))

    def _shoot_trials_single(gsobject, ntrials, dx, imsize, rng, n_photons):
        """Convenience function to run `ntrials` and collect the results, uses only a single core.

        Uses a Python for loop but this is very unlikely to be a rate determining factor provided
        n_photons is suitably large (>1e6).
        """
        g1obslist = []
        g2obslist = []
        sigmalist = []
        im = galsim.ImageF(imsize, imsize)
        for i in xrange(ntrials):
            gsobject.drawShoot(im, dx=dx, n_photons=n_photons, rng=rng)
            res = im.FindAdaptiveMom()
            g1obslist.append(res.observed_shape.g1)
            g2obslist.append(res.observed_shape.g2)
            sigmalist.append(res.moments_sigma)
            logging.debug('Completed '+str(i + 1)+'/'+str(ntrials)+' trials in this iteration')
            #im.write('check_shoot_trial'+str(i + 1)) CHECK IMAGE
        return g1obslist, g2obslist, sigmalist

    # OK, that's the end of the helper functions-within-helper functions, back to the main unit

    # Start the timer
    t1 = time.time()

    # If a PSF is supplied, do the convolution, otherwise just use the gal_object
    if psf_object is None:
        logging.info('No psf_object supplied, running tests using input gsobject only')
        test_object = gsobject
    else:
        logging.info('Generating test_object by convolving gsobject with input psf_object')
        test_object = galsim.Convolve([gsobject, psf_object])

    # Draw the FFT image, only needs to be done once
    # For the FFT drawn image we need to include the galsim.Pixel, for the photon shooting we don't!
    test_object_pixelized = galsim.Convolve([test_object, galsim.Pixel(pixel_scale)])
    im_draw = galsim.ImageF(size, size)
    test_object_pixelized.draw(im_draw, dx=pixel_scale, wmult=wmult)
    res_draw = im_draw.FindAdaptiveMom()
    sigma_draw = res_draw.moments_sigma
    g1obs_draw = res_draw.observed_shape.g1
    g2obs_draw = res_draw.observed_shape.g2

    # Setup storage lists for the trial shooting results
    sigma_shoot_list = []
    g1obs_shoot_list = []
    g2obs_shoot_list = [] 
    sigmaerr = 666. # Slightly kludgy but will not accidentally fail the first `while` condition
    g1obserr = 666.
    g2obserr = 666.

    # Initialize iteration counter
    itercount = 0

    # Then begin while loop, farming out sets of n_trials_per_iter trials until we get the
    # statistical accuracy we require 
    while (g1obserr > abs_tol_ellip) or (g2obserr > abs_tol_ellip) or (sigmaerr > abs_tol_size):

        # Run the trials using helper function
        g1obs_list_tmp, g2obs_list_tmp, sigma_list_tmp = _shoot_trials_single(
            test_object, n_trials_per_iter, pixel_scale, size, rng, n_photons_per_trial)

        # Collect results and calculate new standard error
        g1obs_shoot_list.extend(g1obs_list_tmp)
        g2obs_shoot_list.extend(g2obs_list_tmp)
        sigma_shoot_list.extend(sigma_list_tmp)
        g1obserr = _stderr(g1obs_shoot_list)
        g2obserr = _stderr(g2obs_shoot_list)
        sigmaerr = _stderr(sigma_shoot_list)
        itercount += 1
        sys.stdout.write(".") # This doesn't add a carriage return at the end of the line, nice!
        logging.debug('Completed '+str(itercount)+' iterations')
        logging.debug(
            '(g1obserr, g2obserr, sigmaerr) = '+str(g1obserr)+', '+str(g2obserr)+', '+str(sigmaerr))

    sys.stdout.write("\n")

    # Take the runtime and collate results into a ComparisonShapeData
    runtime = time.time() - t1
    results = ComparisonShapeData(
        g1obs_draw, g2obs_draw, sigma_draw,
        _mean(g1obs_shoot_list), _mean(g2obs_shoot_list), _mean(sigma_shoot_list),
        g1obserr, g2obserr, sigmaerr, size, pixel_scale, wmult, itercount, n_trials_per_iter,
        n_photons_per_trial, runtime, gsobject=gsobject, psf_object=psf_object)

    logging.info('\n'+str(results))
    return results
Пример #15
0
def test_PSE_basic():
    """Basic test of power spectrum estimation.
    """

    # Here are some parameters that define array sizes and other such things.
    array_size = 300
    e_tolerance = 0.10     # 10% error allowed because of finite grid effects, noise fluctuations,
                           # and other things.  This unit test is just for a basic sanity test.
    b_tolerance = 0.15     # B-mode is slightly less accurate.
    zero_tolerance = 0.03  # For power that should be zero

    n_ell = 8
    grid_spacing = 0.1 # degrees
    ps_file = os.path.join(datapath, 'cosmo-fid.zmed1.00.out')
    rand_seed = 2718

    # Begin by setting up the PowerSpectrum and generating shears.
    tab = galsim.LookupTable.from_file(ps_file)
    ps = galsim.PowerSpectrum(tab, units=galsim.radians)
    g1, g2 = ps.buildGrid(grid_spacing=grid_spacing, ngrid=array_size, units=galsim.degrees,
                          rng=galsim.BaseDeviate(rand_seed))

    # Then initialize the PSE object.
    pse = galsim.pse.PowerSpectrumEstimator(N=array_size,
                                            sky_size_deg=array_size*grid_spacing,
                                            nbin=n_ell)

    do_pickle(pse)

    # Estimate the power spectrum using the PSE, without weighting.
    ell, P_e1, P_b1, P_eb1 = pse.estimate(g1, g2)

    # To check: P_E is right (to within the desired tolerance); P_B and P_EB are <1% of P_E.
    P_theory = np.zeros_like(ell)
    for ind in range(len(ell)):
        P_theory[ind] = tab(ell[ind])
    # Note: we don't check the first element because at low ell the tests can fail more
    # spectacularly for reasons that are well understood.
    np.testing.assert_allclose(P_e1[1:], P_theory[1:], rtol=e_tolerance,
                               err_msg='PSE returned wrong E power')
    np.testing.assert_allclose(P_b1[1:]/P_theory[1:], 0., atol=zero_tolerance,
                               err_msg='PSE found B power')
    np.testing.assert_allclose(P_eb1[1:]/P_theory[1:], 0., atol=zero_tolerance,
                               err_msg='PSE found EB cross-power')

    # Test theory_func
    ell, P_e1, P_b1, P_eb1, t = pse.estimate(g1, g2, theory_func=tab)
    # This isn't super accurate.  I think just because of binning.  But I'm not sure.
    np.testing.assert_allclose(t, P_theory, rtol=0.3,
                               err_msg='PSE returned wrong theory binning')

    # Also check the case where P_e=P_b.
    ps = galsim.PowerSpectrum(tab, tab, units=galsim.radians)
    g1, g2 = ps.buildGrid(grid_spacing=grid_spacing, ngrid=array_size, units=galsim.degrees,
                          rng=galsim.BaseDeviate(rand_seed))
    ell, P_e2, P_b2, P_eb2 = pse.estimate(g1, g2)
    np.testing.assert_allclose(P_e2[1:], P_theory[1:], rtol=e_tolerance,
                               err_msg='PSE returned wrong E power')
    np.testing.assert_allclose(P_b2[1:], P_theory[1:], rtol=b_tolerance,
                               err_msg='PSE returned wrong B power')
    np.testing.assert_allclose(P_eb2[1:]/P_theory[1:], 0., atol=zero_tolerance,
                               err_msg='PSE found EB cross-power')

    # And check the case where P_b is nonzero and P_e is zero.
    ps = galsim.PowerSpectrum(e_power_function=None, b_power_function=tab,
                              units=galsim.radians)
    g1, g2 = ps.buildGrid(grid_spacing=grid_spacing, ngrid=array_size, units=galsim.degrees,
                          rng=galsim.BaseDeviate(rand_seed))
    ell, P_e3, P_b3, P_eb3 = pse.estimate(g1, g2)
    np.testing.assert_allclose(P_e3[1:]/P_theory[1:], 0., atol=zero_tolerance,
                               err_msg='PSE found E power when it should be zero')
    np.testing.assert_allclose(P_b3[1:], P_theory[1:], rtol=b_tolerance,
                               err_msg='PSE returned wrong B power')
    np.testing.assert_allclose(P_eb3[1:]/P_theory[1:], 0., atol=zero_tolerance,
                               err_msg='PSE found EB cross-power')

    assert_raises(ValueError, pse.estimate, g1[:3,:3], g2)
    assert_raises(ValueError, pse.estimate, g1[:3,:8], g2[:3,:8])
    assert_raises(ValueError, pse.estimate, g1[:8,:8], g2[:8,:8])
Пример #16
0
def test_integer_shift_photon():
    """Test if shift works correctly for integer shifts using method=phot.
    """
    n_photons_low = 10
    seed = 10

    gal = galsim.Gaussian(sigma=test_sigma)
    psf = galsim.Airy(lam_over_diam=test_hlr)

    # shift galaxy only

    final = galsim.Convolve([gal, psf])
    img_center = galsim.ImageD(n_pix_x, n_pix_y)
    test_deviate = galsim.BaseDeviate(seed)
    final.drawImage(img_center,
                    scale=1,
                    rng=test_deviate,
                    n_photons=n_photons_low,
                    method='phot')

    gal = gal.shift(dx=int_shift_x, dy=int_shift_y)
    final = galsim.Convolve([gal, psf])
    img_shift = galsim.ImageD(n_pix_x, n_pix_y)
    test_deviate = galsim.BaseDeviate(seed)
    final.drawImage(img_shift,
                    scale=1,
                    rng=test_deviate,
                    n_photons=n_photons_low,
                    method='phot')

    sub_center = img_center.array[(n_pix_y - delta_sub) //
                                  2:(n_pix_y + delta_sub) // 2,
                                  (n_pix_x - delta_sub) //
                                  2:(n_pix_x + delta_sub) // 2]
    sub_shift = img_shift.array[(n_pix_y - delta_sub) // 2 +
                                int_shift_y:(n_pix_y + delta_sub) // 2 +
                                int_shift_y, (n_pix_x - delta_sub) // 2 +
                                int_shift_x:(n_pix_x + delta_sub) // 2 +
                                int_shift_x]

    np.testing.assert_array_almost_equal(
        sub_center,
        sub_shift,
        decimal=image_decimal_precise,
        err_msg=
        "Integer shift failed for FFT rendered Gaussian GSObject with shifted Galaxy only"
    )

    # shift PSF only

    gal = galsim.Gaussian(sigma=test_sigma)
    psf = psf.shift(dx=int_shift_x, dy=int_shift_y)
    final = galsim.Convolve([gal, psf])
    img_shift = galsim.ImageD(n_pix_x, n_pix_y)
    test_deviate = galsim.BaseDeviate(seed)
    final.drawImage(img_shift,
                    scale=1,
                    rng=test_deviate,
                    n_photons=n_photons_low,
                    method='phot')

    sub_center = img_center.array[(n_pix_y - delta_sub) //
                                  2:(n_pix_y + delta_sub) // 2,
                                  (n_pix_x - delta_sub) //
                                  2:(n_pix_x + delta_sub) // 2]
    sub_shift = img_shift.array[(n_pix_y - delta_sub) // 2 +
                                int_shift_y:(n_pix_y + delta_sub) // 2 +
                                int_shift_y, (n_pix_x - delta_sub) // 2 +
                                int_shift_x:(n_pix_x + delta_sub) // 2 +
                                int_shift_x]
    np.testing.assert_array_almost_equal(
        sub_center,
        sub_shift,
        decimal=image_decimal_precise,
        err_msg=
        "Integer shift failed for FFT rendered Gaussian GSObject with only PSF shifted "
    )
Пример #17
0
def make_movie(args):
    """Actually make the movie of the atmosphere given command line arguments stored in `args`.
    """

    # Initiate some GalSim random number generators.
    rng = galsim.BaseDeviate(args.seed)
    u = galsim.UniformDeviate(rng)

    # The GalSim atmospheric simulation code describes turbulence in the 3D atmosphere as a series
    # of 2D turbulent screens.  The galsim.Atmosphere() helper function is useful for constructing
    # this screen list.

    # First, we estimate a weight for each screen, so that the turbulence is dominated by the lower
    # layers consistent with direct measurements.  The specific values we use are from SCIDAR
    # measurements on Cerro Pachon as part of the 1998 Gemini site selection process
    # (Ellerbroek 2002, JOSA Vol 19 No 9).

    Ellerbroek_alts = [0.0, 2.58, 5.16, 7.73, 12.89, 15.46]  # km
    Ellerbroek_weights = [0.652, 0.172, 0.055, 0.025, 0.074, 0.022]
    Ellerbroek_interp = galsim.LookupTable(Ellerbroek_alts, Ellerbroek_weights,
                                           interpolant='linear')

    # Use given number of uniformly spaced altitudes
    alts = np.max(Ellerbroek_alts)*np.arange(args.nlayers)/(args.nlayers-1)
    weights = Ellerbroek_interp(alts)  # interpolate the weights
    weights /= sum(weights)  # and renormalize

    # Each layer can have its own turbulence strength (roughly inversely proportional to the Fried
    # parameter r0), wind speed, wind direction, altitude, and even size and scale (though note that
    # the size of each screen is actually made infinite by "wrapping" the edges of the screen.)  The
    # galsim.Atmosphere helper function is useful for constructing this list, and requires lists of
    # parameters for the different layers.

    spd = []  # Wind speed in m/s
    dirn = [] # Wind direction in radians
    r0_500 = [] # Fried parameter in m at a wavelength of 500 nm.
    for i in range(args.nlayers):
        spd.append(u()*args.max_speed)  # Use a random speed between 0 and max_speed
        dirn.append(u()*360*galsim.degrees)  # And an isotropically distributed wind direction.
        # The turbulence strength of each layer is specified by through its Fried parameter r0_500,
        # which can be thought of as the diameter of a telescope for which atmospheric turbulence
        # and unaberrated diffraction contribute equally to image resolution (at a wavelength of
        # 500nm).  The weights above are for the refractive index structure function (similar to a
        # variance or covariance), however, so we need to use an appropriate scaling relation to
        # distribute the input "net" Fried parameter into a Fried parameter for each layer.  For
        # Kolmogorov turbulence, this is r0_500 ~ (structure function)**(-3/5):
        r0_500.append(args.r0_500*weights[i]**(-3./5))
        print("Adding layer at altitude {:5.2f} km with velocity ({:5.2f}, {:5.2f}) m/s, "
              "and r0_500 {:5.3f} m."
              .format(alts[i], spd[i]*dirn[i].cos(), spd[i]*dirn[i].sin(), r0_500[i]))

    # Additionally, we set the screen size and scale.
    atm = galsim.Atmosphere(r0_500=r0_500, speed=spd, direction=dirn, altitude=alts, rng=rng,
                            screen_size=args.screen_size, screen_scale=args.screen_scale)
    # `atm` is now an instance of a galsim.PhaseScreenList object.

    # Place to store the cumulative PSF image if args.accumulate is set.
    psf_img_sum = galsim.ImageD(args.psf_nx, args.psf_nx, scale=args.psf_scale)

    # Field angle (angle on the sky wrt the telescope boresight) at which to compute the PSF.
    theta = (args.x*galsim.arcmin, args.y*galsim.arcmin)

    # Construct an Aperture object for computing the PSF.  The Aperture object describes the
    # illumination pattern of the telescope pupil, and chooses good sampling size and resolution
    # for representing this pattern as an array.
    aper = galsim.Aperture(diam=args.diam, lam=args.lam, obscuration=args.obscuration,
                           nstruts=args.nstruts, strut_thick=args.strut_thick,
                           strut_angle=args.strut_angle*galsim.degrees,
                           screen_list=atm, pad_factor=args.pad_factor,
                           oversampling=args.oversampling)

    # Code to setup the Matplotlib animation.
    metadata = dict(title='Wavefront Movie', artist='Matplotlib')
    writer = anim.FFMpegWriter(fps=15, bitrate=5000, metadata=metadata)

    # For the animation code, we essentially draw a single figure first, and then use various
    # `set_XYZ` methods to update each successive frame.
    fig = Figure(facecolor='k', figsize=(11, 6))
    FigureCanvasAgg(fig)

    # Axis for the PSF image on the left.
    psf_ax = fig.add_axes([0.08, 0.15, 0.35, 0.7])
    psf_ax.set_xlabel("Arcsec")
    psf_ax.set_ylabel("Arcsec")
    psf_im = psf_ax.imshow(np.ones((128, 128), dtype=np.float64), animated=True,
                           vmin=0.0, vmax=args.psf_vmax, cmap='hot',
                           extent=np.r_[-1, 1, -1, 1]*0.5*args.psf_nx*args.psf_scale)

    # Axis for the wavefront image on the right.
    wf_ax = fig.add_axes([0.51, 0.15, 0.35, 0.7])
    wf_ax.set_xlabel("Meters")
    wf_ax.set_ylabel("Meters")
    wf_im = wf_ax.imshow(np.ones((128, 128), dtype=np.float64), animated=True,
                         vmin=-args.wf_vmax, vmax=args.wf_vmax, cmap='YlGnBu',
                         extent=np.r_[-1, 1, -1, 1]*0.5*aper.pupil_plane_size)
    cbar_ax = fig.add_axes([0.88, 0.175, 0.03, 0.65])
    cbar_ax.set_ylabel("Radians")
    fig.colorbar(wf_im, cax=cbar_ax)

    # Overlay an alpha-mask on the wavefront image showing which parts are actually illuminated.
    ilum = np.ma.masked_greater(aper.illuminated, 0.5)
    wf_ax.imshow(ilum, alpha=0.4, extent=np.r_[-1, 1, -1, 1]*0.5*aper.pupil_plane_size)

    # Color items white to show up on black background
    for ax in [psf_ax, wf_ax, cbar_ax]:
        for _, spine in ax.spines.items():
            spine.set_color('w')
        ax.title.set_color('w')
        ax.xaxis.label.set_color('w')
        ax.yaxis.label.set_color('w')
        ax.tick_params(axis='both', colors='w')

    etext = psf_ax.text(0.05, 0.92, '', transform=psf_ax.transAxes)
    etext.set_color('w')

    nstep = int(args.exptime / args.time_step)
    t0 = 0.0
    # Use astropy ProgressBar to keep track of progress and show an estimate for time to completion.
    with ProgressBar(nstep) as bar:
        with writer.saving(fig, args.outfile, 100):
            for i in range(nstep):
                # The wavefront() method accepts pupil plane coordinates `u` and `v` in meters, a
                # time `t` in seconds, and possibly a field angle `theta`.  It returns the wavefront
                # lag or lead in nanometers with respect to the "perfect" planar wavefront at the
                # specified location angle and time.  In normal use for computing atmospheric PSFs,
                # this is just an implementation detail.  In this script, however, we include the
                # wavefront in the visualization.
                wf = atm.wavefront(aper.u, aper.v, t0, theta=theta) * 2*np.pi/args.lam  # radians
                # To make an actual PSF GSObject, we use the makePSF() method, including arguments
                # for the wavelength `lam`, the field angle `theta`, the aperture `aper`, the
                # starting time t0, and the exposure time `exptime`.  Here, since we're making a
                # movie, we set the exptime equal to just a single timestep, though normally we'd
                # want to set this to the full exposure time.
                psf = atm.makePSF(lam=args.lam, theta=theta, aper=aper,
                                  t0=t0, exptime=args.time_step)
                # `psf` is now just like an any other GSObject, ready to be convolved, drawn, or
                # transformed.  Here, we just draw it into an image to add to our movie.
                psf_img0 = psf.drawImage(nx=args.psf_nx, ny=args.psf_nx, scale=args.psf_scale)

                if args.accumulate:
                    psf_img_sum += psf_img0
                    psf_img = psf_img_sum/(i+1)
                else:
                    psf_img = psf_img0

                # Calculate simple estimate of size and ellipticity
                e = galsim.utilities.unweighted_shape(psf_img)

                # Update t0 for the next movie frame.
                t0 += args.time_step

                # Matplotlib code updating plot elements
                wf_im.set_array(wf)
                wf_ax.set_title("t={:5.2f} s".format(i*args.time_step))
                psf_im.set_array(psf_img.array)
                etext.set_text("$e_1$={:6.3f}, $e_2$={:6.3f}, $r^2$={:6.3f}".format(
                        e['e1'], e['e2'], e['rsqr']*args.psf_scale**2))
                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    writer.grab_frame(facecolor=fig.get_facecolor())
                bar.update()
Пример #18
0
import cmath as cm
import math
import random
import scipy
from scipy.stats import norm
from astropy.io import fits

from cosmos_params import *

import photutils
from photutils.centroids import centroid_com

sys.path.insert(0, '../tools_for_VAE/')
from tools_for_VAE import utils

rng = galsim.BaseDeviate(None)

############ PARAMETER MEASUREMENTS

center_brightest = True

beta_prime_parameters = (
    4.449289034920325, 12.777961268954916, -0.009031368689570645,
    0.2840353229095878
)  #(14.022429614276358, 6.922508843325913, -0.0247188726955977, 0.04994196562063914)


def get_scale_radius(gal):
    """
    Return the scale radius of the created galaxy
    
Пример #19
0
def main(argv):
    """
    Make images using constant PSF and variable shear:
      - The main image is 2048 x 2048 pixels.
      - Pixel scale is 0.2 arcsec/pixel, hence the image is about 0.11 degrees on a side.
      - Applied shear is from a cosmological power spectrum read in from file.
      - The PSF is a real one from SDSS, and corresponds to a convolution of atmospheric PSF,
        optical PSF, and pixel response, which has been sampled at pixel centers.  We used a PSF
        from SDSS in order to have a PSF profile that could correspond to what you see with a real
        telescope. However, in order that the galaxy resolution not be too poor, we tell GalSim that
        the pixel scale for that PSF image is 0.2" rather than 0.396".  We are simultaneously lying
        about the intrinsic size of the PSF and about the pixel scale when we do this.
      - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles
        (like in demo10) and parametric fits to those profiles.  We choose 30% of the galaxies
        to use the images, and the other 60% to use the parametric fits
      - The real galaxy images include some initial correlated noise from the original HST
        observation.  However, we whiten the noise of the final image so the final image has
        stationary Gaussian noise, rather than correlated noise.
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo11")

    # Define some parameters we'll use below.
    # Normally these would be read in from some parameter file.

    pixel_scale = 0.2  # arcsec/pixel
    image_size = 2048  # size of image in pixels
    image_size_arcsec = image_size * pixel_scale  # size of big image in each dimension (arcsec)
    noise_variance = 5.e4  # ADU^2  (Just use simple Gaussian noise here.)
    nobj = 288  # number of galaxies in entire field
    # (This corresponds to 8 galaxies / arcmin^2)
    grid_spacing = 90.0  # The spacing between the samples for the power spectrum
    # realization (arcsec)
    tel_diam = 4  # Let's figure out the flux for a 4 m class telescope
    exp_time = 300  # exposing for 300 seconds.
    center_ra = 19.3 * galsim.hours  # The RA, Dec of the center of the image on the sky
    center_dec = -33.1 * galsim.degrees

    # The catalog returns objects that are appropriate for HST in 1 second exposures.  So for our
    # telescope we scale up by the relative area and exposure time.  Note that what is important is
    # the *effective* area after taking into account obscuration.  For HST, the telescope diameter
    # is 2.4 but there is obscuration (a linear factor of 0.33).  Here, we assume that the telescope
    # we're simulating effectively has no obscuration factor.  We're also ignoring the pi/4 factor
    # since it appears in the numerator and denominator, so we use area = diam^2.
    hst_eff_area = 2.4**2 * (1. - 0.33**2)
    flux_scaling = (tel_diam**2 / hst_eff_area) * exp_time

    # random_seed is used for both the power spectrum realization and the random properties
    # of the galaxies.
    random_seed = 24783923

    file_name = os.path.join('output', 'tabulated_power_spectrum.fits.fz')

    logger.info('Starting demo script 11')

    # Read in galaxy catalog
    # The COSMOSCatalog uses the same input file as we have been using for RealGalaxyCatalogs
    # along with a second file called real_galaxy_catalog_23.5_examples_fits.fits, which stores
    # the information about the parameteric fits.  There is no need to specify the second file
    # name, since the name is derivable from the name of the main catalog.
    if True:
        # The catalog we distribute with the GalSim code only has 100 galaxies.
        # The galaxies will typically be reused several times here.
        cat_file_name = 'real_galaxy_catalog_23.5_example.fits'
        dir = 'data'
        cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir)
    else:
        # If you've run galsim_download_cosmos, you can leave out the cat_file_name and dir
        # to use the full COSMOS catalog with 56,000 galaxies in it.
        cosmos_cat = galsim.COSMOSCatalog()
    logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects)

    # Setup the PowerSpectrum object we'll be using:
    # To do this, we first have to read in the tabulated shear power spectrum, often denoted
    # C_ell(ell), where ell has units of inverse angle and C_ell has units of angle^2.  However,
    # GalSim works in the flat-sky approximation, so we use this notation interchangeably with
    # P(k).  GalSim does not calculate shear power spectra for users, who must be able to provide
    # their own (or use the examples in the repository).
    #
    # Here we use a tabulated power spectrum from iCosmo (http://icosmo.org), with the following
    # cosmological parameters and survey design:
    # H_0 = 70 km/s/Mpc
    # Omega_m = 0.25
    # Omega_Lambda = 0.75
    # w_0 = -1.0
    # w_a = 0.0
    # n_s = 0.96
    # sigma_8 = 0.8
    # Smith et al. prescription for the non-linear power spectrum.
    # Eisenstein & Hu transfer function with wiggles.
    # Default dN/dz with z_med = 1.0
    # The file has, as required, just two columns which are k and P(k).  However, iCosmo works in
    # terms of ell and C_ell; ell is inverse radians and C_ell in radians^2.  Since GalSim tends to
    # work in terms of arcsec, we have to tell it that the inputs are radians^-1 so it can convert
    # to store in terms of arcsec^-1.
    pk_file = os.path.join('data', 'cosmo-fid.zmed1.00.out')
    ps = galsim.PowerSpectrum(pk_file, units=galsim.radians)
    # The argument here is "e_power_function" which defines the E-mode power to use.
    logger.info('Set up power spectrum from tabulated P(k)')

    # Now let's read in the PSF.  It's a real SDSS PSF, which means pixel scale of 0.396".  However,
    # the typical seeing is 1.2" and we want to simulate better seeing, so we will just tell GalSim
    # that the pixel scale is 0.2".  We have to be careful with SDSS PSF images, as they have an
    # added 'soft bias' of 1000 which has been removed before creation of this file, so that the sky
    # level is properly zero.  Also, the file is bzipped, to demonstrate the ability of GalSim
    # handle this kind of compressed file (among others).  We read the image directly into an
    # InterpolatedImage GSObject, so we can manipulate it as needed (here, the only manipulation
    # needed is convolution).  The flux is 1 as needed for a PSF.
    psf_file = os.path.join('data', 'example_sdss_psf_sky0.fits.bz2')
    psf = galsim.InterpolatedImage(psf_file, scale=pixel_scale, flux=1.)
    logger.info('Read in PSF image from bzipped FITS file')

    # Setup the image:
    full_image = galsim.ImageF(image_size, image_size)

    # The default convention for indexing an image is to follow the FITS standard where the
    # lower-left pixel is called (1,1).  However, this can be counter-intuitive to people more
    # used to C or python indexing, where indices start at 0.  It is possible to change the
    # coordinates of the lower-left pixel with the methods `setOrigin`.  For this demo, we
    # switch to 0-based indexing, so the lower-left pixel will be called (0,0).
    full_image.setOrigin(0, 0)

    # As for demo10, we use random_seed for the random numbers required for the
    # whole image.  In this case, both the power spectrum realization and the noise on the
    # full image we apply later.
    rng = galsim.BaseDeviate(random_seed)

    # We want to make random positions within our image.  However, currently for shears from a power
    # spectrum we first have to get shears on a grid of positions, and then we can choose random
    # positions within that.  So, let's make the grid.  We're going to make it as large as the
    # image, with grid points spaced by 90 arcsec (hence interpolation only happens below 90"
    # scales, below the interesting scales on which we want the shear power spectrum to be
    # represented exactly).  The lensing engine wants positions in arcsec, so calculate that:
    ps.buildGrid(grid_spacing=grid_spacing,
                 ngrid=int(math.ceil(image_size_arcsec / grid_spacing)),
                 rng=rng)
    logger.info('Made gridded shears')

    # We keep track of how much noise is already in the image from the RealGalaxies.
    # The default initial value is all pixels = 0.
    noise_image = galsim.ImageF(image_size, image_size)
    noise_image.setOrigin(0, 0)

    # Make a slightly non-trivial WCS.  We'll use a slightly rotated coordinate system
    # and center it at the image center.
    theta = 0.17 * galsim.degrees
    # ( dudx  dudy ) = ( cos(theta)  -sin(theta) ) * pixel_scale
    # ( dvdx  dvdy )   ( sin(theta)   cos(theta) )
    # Aside: You can call numpy trig functions on Angle objects directly, rather than getting
    #        their values in radians first.  Or, if you prefer, you can write things like
    #        theta.sin() or theta.cos(), which are equivalent.
    dudx = numpy.cos(theta) * pixel_scale
    dudy = -numpy.sin(theta) * pixel_scale
    dvdx = numpy.sin(theta) * pixel_scale
    dvdy = numpy.cos(theta) * pixel_scale
    image_center = full_image.true_center
    affine = galsim.AffineTransform(dudx,
                                    dudy,
                                    dvdx,
                                    dvdy,
                                    origin=full_image.true_center)

    # We can also put it on the celestial sphere to give it a bit more realism.
    # The TAN projection takes a (u,v) coordinate system on a tangent plane and projects
    # that plane onto the sky using a given point as the tangent point.  The tangent
    # point should be given as a CelestialCoord.
    sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec)

    # The third parameter, units, defaults to arcsec, but we make it explicit here.
    # It sets the angular units of the (u,v) intermediate coordinate system.
    wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec)
    full_image.wcs = wcs

    # Now we need to loop over our objects:
    for k in range(nobj):
        time1 = time.time()
        # The usual random number generator using a different seed for each galaxy.
        ud = galsim.UniformDeviate(random_seed + k + 1)

        # Choose a random RA, Dec around the sky_center.
        # Note that for this to come out close to a square shape, we need to account for the
        # cos(dec) part of the metric: ds^2 = dr^2 + r^2 d(dec)^2 + r^2 cos^2(dec) d(ra)^2
        # So need to calculate dec first.
        dec = center_dec + (ud() - 0.5) * image_size_arcsec * galsim.arcsec
        ra = center_ra + (
            ud() - 0.5) * image_size_arcsec / numpy.cos(dec) * galsim.arcsec
        world_pos = galsim.CelestialCoord(ra, dec)

        # We will need the image position as well, so use the wcs to get that
        image_pos = wcs.toImage(world_pos)

        # We also need this in the tangent plane, which we call "world coordinates" here,
        # since the PowerSpectrum class is really defined on that plane, not in (ra,dec).
        uv_pos = affine.toWorld(image_pos)

        # Get the reduced shears and magnification at this point
        g1, g2, mu = ps.getLensing(pos=uv_pos)

        # Now we will have the COSMOSCatalog make a galaxy profile for us.  It can make either
        # a RealGalaxy using the original HST image and PSF, or a parametric model based on
        # parametric fits to the light distribution of the HST observation.  The parametric
        # models are either a Sersic fit to the data or a bulge + disk fit according to which
        # one gave the better chisq value.  We will select a galaxy at random from the catalog.
        # One could easily do this by choosing an index = int(ud() * cosmos_cat.nobjects), but
        # we will instead allow the catalog to choose a random galaxy for us.  It will remove any
        # selection effects involved in postage stamp creation using weights that are stored in
        # the catalog.  (If for some reason you prefer not to do that, you can always choose a
        # purely random index yourself using int(ud() * cosmos_cat.nobjects).)  We employ this
        # random selection by simply failing to specify an index or identifier for a galaxy, in
        # which case it chooses a random one.

        # First determine whether we will make a real galaxy (`gal_type = 'real'`) or a parametric
        # galaxy (`gal_type = 'parametric'`).  The real galaxies take longer to render, so for this
        # script, we just use them 30% of the time and use parametric galaxies the other 70%.

        # We could just use `ud()<0.3` for this, but instead we introduce another Deviate type
        # available in GalSim that we haven't used yet: BinomialDeviate.
        # It takes an N and p value and returns integers according to a binomial distribution.
        # i.e. How many heads you get after N flips if each flip has a chance, p, of being heads.
        binom = galsim.BinomialDeviate(ud, N=1, p=0.3)
        real = binom()

        if real:
            # For real galaxies, we will want to whiten the noise in the image (below).
            # When whitening the image, we need to make sure the original correlated noise is
            # present throughout the whole image, otherwise the whitening will do the wrong thing
            # to the parts of the image that don't include the original image.  The RealGalaxy
            # stores the correct noise profile to use as the gal.noise attribute.  This noise
            # profile is automatically updated as we shear, dilate, convolve, etc.  But we need to
            # tell it how large to pad with this noise by hand.  This is a bit complicated for the
            # code to figure out on its own, so we have to supply the size for noise padding
            # with the noise_pad_size parameter.

            # The large galaxies will render fine without any noise padding, but the postage stamp
            # for the smaller galaxies will be sized appropriately for the PSF, which may make the
            # stamp larger than the original galaxy image.  The psf image is 40 x 40, although
            # the bright part is much more concentrated than that.  If we pad out the galaxy image
            # to at least 40 x sqrt(2), we should be safe even if the galaxy image is rotated
            # with respect to the psf image.
            #     noise_pad_size = 40 * sqrt(2) * 0.2 arcsec/pixel = 11.3 arcsec
            gal = cosmos_cat.makeGalaxy(gal_type='real',
                                        rng=ud,
                                        noise_pad_size=11.3)
        else:
            gal = cosmos_cat.makeGalaxy(gal_type='parametric', rng=ud)

        # Apply a random rotation
        theta = ud() * 2.0 * numpy.pi * galsim.radians
        gal = gal.rotate(theta)

        # Rescale the flux to match our telescope configuration.
        # This automatically scales up the noise variance by flux_scaling**2.
        gal *= flux_scaling

        # Apply the cosmological (reduced) shear and magnification at this position using a single
        # GSObject method.
        gal = gal.lens(g1, g2, mu)

        # Convolve with the PSF.
        final = galsim.Convolve(psf, gal)

        # Account for the fractional part of the position
        # cf. demo9.py for an explanation of this nominal position stuff.
        x_nominal = image_pos.x + 0.5
        y_nominal = image_pos.y + 0.5
        ix_nominal = int(math.floor(x_nominal + 0.5))
        iy_nominal = int(math.floor(y_nominal + 0.5))
        dx = x_nominal - ix_nominal
        dy = y_nominal - iy_nominal
        offset = galsim.PositionD(dx, dy)

        # We use method='no_pixel' here because the SDSS PSF image that we are using includes the
        # pixel response already.
        stamp = final.drawImage(wcs=wcs.local(image_pos),
                                offset=offset,
                                method='no_pixel')

        # Recenter the stamp at the desired position:
        stamp.setCenter(ix_nominal, iy_nominal)

        # Find the overlapping bounds:
        bounds = stamp.bounds & full_image.bounds

        # Now, if we are using a real galaxy, we want to ether whiten or at least symmetrize the
        # noise on the postage stamp to avoid having to deal with correlated noise in any kind of
        # image processing you would want to do on the final image.  (Like measure galaxy shapes.)

        # Galsim automatically propagates the noise correctly from the initial RealGalaxy object
        # through the applied shear, distortion, rotation, and convolution into the final object's
        # noise attribute.  To make the noise fully white, use the image.whitenNoise() method.
        # The returned value is the variance of the Gaussian noise that is present after the
        # whitening process.

        # However, this is often overkill for many applications.  If it is acceptable to merely end
        # up with noise with some degree of symmetry (say 4-fold or 8-fold symmetry), then you can
        # instead have GalSim just add enough noise to make the resulting noise have this kind of
        # symmetry.  Usually this requires adding significantly less additional noise, which means
        # you can have the resulting total variance be somewhat smaller.  The returned variance
        # corresponds to the zero-lag value of the noise correlation function, which will still have
        # off-diagonal elements.  We can do this step using the image.symmetrizeNoise() method.
        if real:
            if True:
                # We use the symmetrizing option here.
                new_variance = stamp.symmetrizeNoise(final.noise, 8)
            else:
                # Here is how you would do it if you wanted to fully whiten the image.
                new_variance = stamp.whitenNoise(final.noise)

            # We need to keep track of how much variance we have currently in the image, so when
            # we add more noise, we can omit what is already there.
            noise_image[bounds] += new_variance

        # Finally, add the stamp to the full image.
        full_image[bounds] += stamp[bounds]

        time2 = time.time()
        tot_time = time2 - time1
        logger.info('Galaxy %d: position relative to center = %s, t=%f s', k,
                    str(uv_pos), tot_time)

    # We already have some noise in the image, but it isn't uniform.  So the first thing to do is
    # to make the Gaussian noise uniform across the whole image.  We have a special noise class
    # that can do this.  VariableGaussianNoise takes an image of variance values and applies
    # Gaussian noise with the corresponding variance to each pixel.
    # So all we need to do is build an image with how much noise to add to each pixel to get us
    # up to the maximum value that we already have in the image.
    max_current_variance = numpy.max(noise_image.array)
    noise_image = max_current_variance - noise_image
    vn = galsim.VariableGaussianNoise(rng, noise_image)
    full_image.addNoise(vn)

    # Now max_current_variance is the noise level across the full image.  We don't want to add that
    # twice, so subtract off this much from the intended noise that we want to end up in the image.
    noise_variance -= max_current_variance

    # Now add Gaussian noise with this variance to the final image.  We have to do this step
    # at the end, rather than adding to individual postage stamps, in order to get the noise
    # level right in the overlap regions between postage stamps.
    noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance))
    full_image.addNoise(noise)
    logger.info('Added noise to final large image')

    # Now write the image to disk.  It is automatically compressed with Rice compression,
    # since the filename we provide ends in .fz.
    full_image.write(file_name)
    logger.info('Wrote image to %r', file_name)

    # Compute some sky positions of some of the pixels to compare with the values of RA, Dec
    # that ds9 reports.  ds9 always uses (1,1) for the lower left pixel, so the pixel coordinates
    # of these pixels are different by 1, but you can check that the RA and Dec values are
    # the same as what GalSim calculates.
    ra_str = center_ra.hms()
    dec_str = center_dec.dms()
    logger.info('Center of image    is at RA %sh %sm %ss, DEC %sd %sm %ss',
                ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3],
                dec_str[3:5], dec_str[5:])
    for (x, y) in [(0, 0), (0, image_size - 1), (image_size - 1, 0),
                   (image_size - 1, image_size - 1)]:
        world_pos = wcs.toWorld(galsim.PositionD(x, y))
        ra_str = world_pos.ra.hms()
        dec_str = world_pos.dec.dms()
        logger.info('Pixel (%4d, %4d) is at RA %sh %sm %ss, DEC %sd %sm %ss',
                    x, y, ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3],
                    dec_str[3:5], dec_str[5:])
    logger.info(
        'ds9 reports these pixels as (1,1), (1,2048), etc. with the same RA, Dec.'
    )
Пример #20
0
def image_generator(cosmos_cat_dir,
                    training_or_test,
                    isolated_or_blended,
                    constants_dir,
                    used_idx=None,
                    nmax_blend=4,
                    max_try=3,
                    mag_cut=28.,
                    method_first_shift='noshift',
                    do_peak_detection=True,
                    do_add_shear=False):
    """
    Return numpy arrays: noiseless and noisy image of single galaxy and of blended galaxies as well as the pandaframe including data about the image and the shifts in the test sample generation configuration
    
    Parameters:
    ----------
    cosmos_cat_dir: COSMOS catalog directory
    training_or_test: choice for generating a training or testing dataset
    isolated_or_blended: choice for generation of samples of isolated galaxy images or blended galaxies images
    constants_dir: directory where normalization constants are saved
    used_idx: indexes to use in the catalog (to use different parts of the catalog for training/validation/test)
    nmax_blend: maximum number of galaxies in a blended galaxies image
    max_try: maximum number of try before leaving the function (to avoir infinite loop)
    mag_cut: cut in magnitude to select function below this magnitude
    method_first_shift: chosen method for shifting the centered galaxy
    do_peak_detection: boolean to do the peak detection
    """
    # Import the COSMOS catalog
    cosmos_cat = galsim.COSMOSCatalog('real_galaxy_catalog_25.2.fits',
                                      dir=cosmos_cat_dir)
    counter = 0
    np.random.seed()  # important for multiprocessing !

    assert training_or_test in ['training', 'validation', 'test']
    assert isolated_or_blended in ['blended', 'isolated']

    while counter < max_try:
        try:
            ud = galsim.UniformDeviate()

            nb_blended_gal = np.random.randint(nmax_blend) + 1
            data = {}
            galaxies = []
            mag = []
            mag_ir = []
            j = 0
            while j < nb_blended_gal:
                # Chose the part of the catalog used for generation
                if used_idx is not None:
                    idx = np.random.choice(used_idx)
                else:
                    idx = np.random.randint(cosmos_cat.nobject)
                # Generate galaxy
                gal = cosmos_cat.makeGalaxy(idx,
                                            gal_type='parametric',
                                            chromatic=True,
                                            noise_pad_size=0)
                # Compute the magnitude of the galaxy
                _mag_temp = gal.calculateMagnitude(
                    filters['r'].withZeropoint(28.13))
                # Magnitude cut
                if _mag_temp < mag_cut:
                    gal = gal.rotate(ud() * 360. * galsim.degrees)
                    galaxies.append(gal)
                    mag.append(_mag_temp)
                    mag_ir.append(
                        gal.calculateMagnitude(
                            filters['H'].withZeropoint(24.92 -
                                                       22.35 * coeff_noise_h)))
                    j += 1

            # Optionally, find the brightest and put it first in the list
            if center_brightest:
                _idx = np.argmin(mag)
                galaxies.insert(0, galaxies.pop(_idx))
                mag.insert(0, mag.pop(_idx))
                mag_ir.insert(0, mag_ir.pop(_idx))

            # Shifts galaxies
            shift = np.zeros((nmax_blend, 2))
            # Shift the lowest magnitude galaxy
            galaxies[0], shift[0] = shift_gal(galaxies[0],
                                              method=method_first_shift,
                                              max_dx=0.1)
            # Shift all the other galaxies
            for j, gal in enumerate(galaxies[1:]):
                galaxies[j + 1], shift[j + 1] = shift_gal(gal,
                                                          shift_x0=shift[0, 0],
                                                          shift_y0=shift[0, 1],
                                                          min_r=0.65 / 2.,
                                                          max_r=1.5,
                                                          method='annulus')

            # Compute distances of the neighbour galaxies to the lowest magnitude galaxy
            if nb_blended_gal > 1:
                distances = [
                    shift[j][0]**2 + shift[j][1]**2
                    for j in range(1, nb_blended_gal)
                ]
                idx_closest_to_peak_galaxy = np.argmin(distances) + 1
            else:
                idx_closest_to_peak_galaxy = 0

            if do_add_shear:
                galaxy_noiseless = np.zeros(
                    (50, max_stamp_size, max_stamp_size))
                blend_noisy = np.zeros((50, max_stamp_size, max_stamp_size))
            else:
                galaxy_noiseless = np.zeros(
                    (10, max_stamp_size, max_stamp_size))
                blend_noisy = np.zeros((10, max_stamp_size, max_stamp_size))

            # Realize peak detection in r-band filter if asked
            if do_peak_detection:
                band = 6
                galaxies_psf = [
                    galsim.Convolve([gal * coeff_exp[band], PSF[band]])
                    for gal in galaxies
                ]

                images, blend_img = draw_images(galaxies_psf, band,
                                                max_stamp_size * 2, 'r',
                                                sky_level_pixel[band])
                blend_noisy_temp = blend_img.array.data
                peak_detection_output = peak_detection(blend_noisy_temp,
                                                       band,
                                                       shift,
                                                       max_stamp_size * 2,
                                                       4,
                                                       nb_blended_gal,
                                                       training_or_test,
                                                       dist_cut=0.65 / 2.)
                if not peak_detection_output:
                    print('No peak detected')
                    raise RuntimeError
                else:
                    idx_closest_to_peak, idx_closest_to_peak_galaxy, center_pix_x, center_pix_y, center_arc_x, center_arc_y, n_peak = peak_detection_output

                # Modify galaxies and shift accordingly
                galaxies = [
                    gal.shift(-center_arc_x, -center_arc_y) for gal in galaxies
                ]
                shift[:nb_blended_gal] -= np.array(
                    [center_arc_x, center_arc_y])

            # Now draw image in all filters
            for i, filter_name in enumerate(filter_names_all):
                shear_list = [(0, 0), (0.01, 0), (-0.01, 0), (0, 0.01),
                              (0, -0.01)]
                if not do_add_shear:
                    shear_list = [shear_list[0]]
                    r_int = None
                else:
                    r_int = np.random.randint(10000000)
                for s, shear in enumerate(shear_list):
                    rng_shear = galsim.BaseDeviate(r_int)
                    galaxies_psf = [
                        galsim.Convolve([
                            gal.shear(g1=shear[0], g2=shear[1]) * coeff_exp[i],
                            PSF[i]
                        ]) for gal in galaxies
                    ]
                    images, blend_img = draw_images(galaxies_psf,
                                                    i,
                                                    max_stamp_size,
                                                    filter_name,
                                                    sky_level_pixel[i],
                                                    rng_shear=rng_shear)
                    if isolated_or_blended == 'isolated' or not do_peak_detection:
                        idx_closest_to_peak = 0
                        n_peak = 1
                    galaxy_noiseless[
                        i + s * 10] = images[idx_closest_to_peak].array.data
                    blend_noisy[i + s * 10] = blend_img.array.data

                    if s == 0:
                        # get data for the test sample, data are computed in the 'r' filter
                        if training_or_test == 'test' and filter_name == 'r':
                            # need psf to compute ellipticities
                            psf_image = PSF[i].drawImage(nx=max_stamp_size,
                                                         ny=max_stamp_size,
                                                         scale=pixel_scale[i])
                            data['redshift'], data['moment_sigma'], data[
                                'e1'], data['e2'], data['mag'] = get_data(
                                    galaxies[idx_closest_to_peak],
                                    images[idx_closest_to_peak], psf_image)

                            # Compute data and blendedness
                            if nb_blended_gal > 1:
                                data['closest_redshift'], data[
                                    'closest_moment_sigma'], data[
                                        'closest_e1'], data['closest_e2'], data[
                                            'closest_mag'] = get_data(
                                                galaxies[
                                                    idx_closest_to_peak_galaxy],
                                                images[
                                                    idx_closest_to_peak_galaxy],
                                                psf_image)
                                img_central = images[idx_closest_to_peak].array
                                img_others = np.zeros_like(img_central)
                                for _h, image in enumerate(images):
                                    if _h != idx_closest_to_peak:
                                        img_others += image.array
                                #img_others = np.array([image.array.data for _h, image in enumerate(images) if _h!=idx_closest_to_peak]).sum(axis = 0)
                                img_closest_neighbour = images[
                                    idx_closest_to_peak_galaxy].array  # np.array(images[idx_closest_to_peak_galaxy].array.data)
                                data[
                                    'blendedness_total_lsst'] = utils.compute_blendedness_total(
                                        img_central, img_others)
                                data[
                                    'blendedness_closest_lsst'] = utils.compute_blendedness_single(
                                        img_central, img_closest_neighbour)
                                data[
                                    'blendedness_aperture_lsst'] = utils.compute_blendedness_aperture(
                                        img_central, img_others,
                                        data['moment_sigma'])
                            else:
                                data['closest_redshift'] = np.nan
                                data['closest_moment_sigma'] = np.nan
                                data['closest_e1'] = np.nan
                                data['closest_e2'] = np.nan
                                data['closest_mag'] = np.nan
                                data['blendedness_total_lsst'] = np.nan
                                data['blendedness_closest_lsst'] = np.nan
                                data['blendedness_aperture_lsst'] = np.nan
            break

        except RuntimeError as e:
            print(e)

    # For training/validation, return normalized images only
    if training_or_test in ['training', 'validation']:
        galaxy_noiseless = utils.norm(galaxy_noiseless[None, :],
                                      bands=range(10),
                                      path=constants_dir)[0]
        blend_noisy = utils.norm(blend_noisy[None, :],
                                 bands=range(10),
                                 path=constants_dir)[0]
        return galaxy_noiseless, blend_noisy

    # For testing, return unormalized images and data
    elif training_or_test == 'test':
        data['nb_blended_gal'] = nb_blended_gal
        data['mag'] = mag[0]
        data['mag_ir'] = mag_ir[0]
        if nb_blended_gal > 1:
            data['closest_mag'] = mag[idx_closest_to_peak_galaxy]
            data['closest_mag_ir'] = mag_ir[idx_closest_to_peak_galaxy]
            data['closest_x'] = shift[idx_closest_to_peak_galaxy][0]
            data['closest_y'] = shift[idx_closest_to_peak_galaxy][1]
        else:
            data['closest_mag'] = np.nan
            data['closest_mag_ir'] = np.nan
            data['closest_x'] = np.nan
            data['closest_y'] = np.nan
        data['idx_closest_to_peak'] = idx_closest_to_peak
        data['n_peak_detected'] = n_peak
        data['SNR'] = utils.SNR(galaxy_noiseless, sky_level_pixel, band=6)[1]
        data['SNR_peak'] = utils.SNR_peak(galaxy_noiseless,
                                          sky_level_pixel,
                                          band=6)[1]
        return galaxy_noiseless, blend_noisy, data, shift
    else:
        raise ValueError
Пример #21
0
def main(argv):
    """
    Make images using variable PSF and shear:
      - The main image is 10 x 10 postage stamps.
      - Each postage stamp is 48 x 48 pixels.
      - The second HDU has the corresponding PSF image.
      - Applied shear is from a power spectrum P(k) ~ k^1.8.
      - Galaxies are real galaxies oriented in a ring test of 20 each.
      - The PSF is Gaussian with FWHM, ellipticity and position angle functions of (x,y)
      - Noise is Poisson using a nominal sky value of 1.e6.
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo10")

    # Define some parameters we'll use below.
    # Normally these would be read in from some parameter file.

    n_tiles = 10  # number of tiles in each direction.
    stamp_size = 48  # pixels

    pixel_scale = 0.44  # arcsec / pixel
    sky_level = 1.e6  # ADU / arcsec^2

    # The random seed is used for both the power spectrum realization and the random properties
    # of the galaxies.
    random_seed = 3339201

    # Make output directory if not already present.
    if not os.path.isdir('output'):
        os.mkdir('output')

    file_name = os.path.join('output', 'power_spectrum.fits')

    # These will be created for each object below.  The values we'll use will be functions
    # of (x,y) relative to the center of the image.  (r = sqrt(x^2+y^2))
    # psf_fwhm = 0.9 + 0.5 * (r/100)^2  -- arcsec
    # psf_e = 0.4 * (r/100)^1.5         -- large value at the edge, so visible by eye.
    # psf_beta = atan2(y/x) + pi/2      -- tangential pattern

    gal_dilation = 3  # Make the galaxies a bit larger than their original size.
    gal_signal_to_noise = 100  # Pretty high.
    psf_signal_to_noise = 1000  # Even higher.

    logger.info('Starting demo script 10')

    # Read in galaxy catalog
    cat_file_name = 'real_galaxy_catalog_example.fits'
    dir = 'data'
    real_galaxy_catalog = galsim.RealGalaxyCatalog(cat_file_name, dir=dir)
    logger.info('Read in %d real galaxies from catalog',
                real_galaxy_catalog.nobjects)

    # List of IDs to use.  We select 5 particularly irregular galaxies for this demo.
    # Then we'll choose randomly from this list.
    id_list = [106416, 106731, 108402, 116045, 116448]

    # Make the 5 galaxies we're going to use here rather than remake them each time.
    # This means the Fourier transforms of the real galaxy images don't need to be recalculated
    # each time, so it's a bit more efficient.
    gal_list = [
        galsim.RealGalaxy(real_galaxy_catalog, id=id) for id in id_list
    ]

    # Make the galaxies a bit larger than their original observed size.
    gal_list = [gal.dilate(gal_dilation) for gal in gal_list]

    # Setup the PowerSpectrum object we'll be using:
    ps = galsim.PowerSpectrum(lambda k: k**1.8)
    # The argument here is "e_power_function" which defines the E-mode power to use.

    # There is also a b_power_function if you want to include any B-mode power:
    #     ps = galsim.PowerSpectrum(e_power_function, b_power_function)

    # You may even omit the e_power_function argument and have a pure B-mode power spectrum.
    #     ps = galsim.PowerSpectrum(b_power_function = b_power_function)

    # All the random number generator classes derive from BaseDeviate.
    # When we construct another kind of deviate class from any other
    # kind of deviate class, the two share the same underlying random number
    # generator.  Sometimes it can be clearer to just construct a BaseDeviate
    # explicitly and then construct anything else you need from that.
    # Note: A BaseDeviate cannot be used to generate any values.  It can
    # only be used in the constructor for other kinds of deviates.
    # The seeds for the objects are random_seed..random_seed+nobj-1 (which comes later),
    # so use the next one.
    nobj = n_tiles * n_tiles
    rng = galsim.BaseDeviate(random_seed + nobj)

    # Setup the images:
    gal_image = galsim.ImageF(stamp_size * n_tiles, stamp_size * n_tiles)
    psf_image = galsim.ImageF(stamp_size * n_tiles, stamp_size * n_tiles)

    # Update the image WCS to use the image center as the origin of the WCS.
    # The class that acts like a PixelScale except for this offset is called OffsetWCS.
    im_center = gal_image.bounds.trueCenter()
    wcs = galsim.OffsetWCS(scale=pixel_scale, origin=im_center)
    gal_image.wcs = wcs
    psf_image.wcs = wcs

    # We will place the tiles in a random order.  To do this, we make two lists for the
    # ix and iy values.  Then we apply a random permutation to the lists (in tandem).
    ix_list = []
    iy_list = []
    for ix in range(n_tiles):
        for iy in range(n_tiles):
            ix_list.append(ix)
            iy_list.append(iy)
    # This next function will use the given random number generator, rng, and use it to
    # randomly permute any number of lists.  All lists will have the same random permutation
    # applied.
    galsim.random.permute(rng, ix_list, iy_list)

    # Now have the PowerSpectrum object build a grid of shear values for us to use.
    # Also, because of some technical details about how the config stuff handles the random
    # number generator here, we need to duplicate the rng object if we want to have the
    # two output files match.  This means that technically, the same sequence of random numbers
    # will be used in building the grid as will be used by the other uses of rng (permuting the
    # postage stamps and adding noise).  But since they are used in such completely different
    # ways, it is hard to imagine how this could lead to any kind of bias in the images.
    grid_g1, grid_g2 = ps.buildGrid(grid_spacing=stamp_size * pixel_scale,
                                    ngrid=n_tiles,
                                    rng=rng.duplicate())

    # Build each postage stamp:
    for k in range(nobj):
        # The usual random number generator using a different seed for each galaxy.
        rng = galsim.BaseDeviate(random_seed + k)

        # Determine the bounds for this stamp and its center position.
        ix = ix_list[k]
        iy = iy_list[k]
        b = galsim.BoundsI(ix * stamp_size + 1, (ix + 1) * stamp_size,
                           iy * stamp_size + 1, (iy + 1) * stamp_size)
        sub_gal_image = gal_image[b]
        sub_psf_image = psf_image[b]

        pos = wcs.toWorld(b.trueCenter())
        # The image comes out as about 211 arcsec across, so we define our variable
        # parameters in terms of (r/100 arcsec), so roughly the scale size of the image.
        r = math.sqrt(pos.x**2 + pos.y**2) / 100
        psf_fwhm = 0.9 + 0.5 * r**2  # arcsec
        psf_e = 0.4 * r**1.5
        psf_beta = (math.atan2(pos.y, pos.x) + math.pi / 2) * galsim.radians

        # Define the PSF profile
        psf = galsim.Gaussian(fwhm=psf_fwhm)
        psf = psf.shear(e=psf_e, beta=psf_beta)

        # Define the galaxy profile:

        # For this demo, we are doing a ring test where the same galaxy profile is drawn at many
        # orientations stepped uniformly in angle, making a ring in e1-e2 space.
        # We're drawing each profile at 20 different orientations and then skipping to the
        # next galaxy in the list.  So theta steps by 1/20 * 360 degrees:
        theta = k / 20. * 360. * galsim.degrees

        # The index needs to increment every 20 objects so we use k/20 using integer math.
        index = k / 20
        gal = gal_list[index]

        # This makes a new copy so we're not changing the object in the gal_list.
        gal = gal.rotate(theta)

        # Apply the shear from the power spectrum.  We should either turn the gridded shears
        # grid_g1[iy, ix] and grid_g2[iy, ix] into gridded reduced shears using a utility called
        # galsim.lensing.theoryToObserved, or use ps.getShear() which by default gets the reduced
        # shear.  ps.getShear() is also more flexible because it can get the shear at positions that
        # are not on the original grid, as long as they are contained within the bounds of the full
        # grid. So in this example we'll use ps.getShear().
        alt_g1, alt_g2 = ps.getShear(pos)
        gal = gal.shear(g1=alt_g1, g2=alt_g2)

        # Apply half-pixel shift in a random direction.
        shift_r = pixel_scale * 0.5
        ud = galsim.UniformDeviate(rng)
        theta = ud() * 2. * math.pi
        dx = shift_r * math.cos(theta)
        dy = shift_r * math.sin(theta)
        gal = gal.shift(dx, dy)

        # Make the final image, convolving with the psf
        final = galsim.Convolve([psf, gal])

        # Draw the image
        final.drawImage(sub_gal_image)

        # Now add noise to get our desired S/N
        # See demo5.py for more info about how this works.
        sky_level_pixel = sky_level * pixel_scale**2
        noise = galsim.PoissonNoise(rng, sky_level=sky_level_pixel)
        sub_gal_image.addNoiseSNR(noise, gal_signal_to_noise)

        # For the PSF image, we also shift the PSF by the same amount.
        psf = psf.shift(dx, dy)

        # Draw the PSF image:
        # We use real space integration over the pixels to avoid some of the
        # artifacts that can show up with Fourier convolution.
        # The level of the artifacts is quite low, but when drawing with
        # so little noise, they are apparent with ds9's zscale viewing.
        psf.drawImage(sub_psf_image, method='real_space')

        # Again, add noise, but at higher S/N this time.
        sub_psf_image.addNoiseSNR(noise, psf_signal_to_noise)

        logger.info('Galaxy (%d,%d): position relative to center = %s', ix, iy,
                    str(pos))

    logger.info('Done making images of postage stamps')

    # Now write the images to disk.
    images = [gal_image, psf_image]
    galsim.fits.writeMulti(images, file_name)
    logger.info('Wrote image to %r', file_name)
Пример #22
0
def test_interp():
    """First test of use with interpolator.  Make a bunch of noisy
    versions of the same PSF, interpolate them with constant interp
    to get an average PSF
    """
    influx = 150.
    if __name__ == '__main__':
        fiducial_list = [
            fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat
        ]
        niter = 3
        npos = 10
    else:
        fiducial_list = [fiducial_moffat]
        niter = 1  # Not actually any need for interating in this case.
        npos = 4
    for fiducial in fiducial_list:
        print()
        print("fiducial = ", fiducial)
        print()
        mod = piff.GSObjectModel(fiducial, include_pixel=False)
        g1 = g2 = u0 = v0 = 0.0

        # Interpolator will be simple mean
        interp = piff.Polynomial(order=0)

        # Draw stars on a 2d grid of "focal plane" with 0<=u,v<=1
        positions = np.linspace(0., 1., npos)
        stars = []
        rng = galsim.BaseDeviate(1234)
        for u in positions:
            for v in positions:
                s = make_data(fiducial,
                              1.0,
                              g1,
                              g2,
                              u0,
                              v0,
                              influx,
                              noise=0.1,
                              pix_scale=0.5,
                              fpu=u,
                              fpv=v,
                              rng=rng,
                              include_pixel=False)
                s = mod.initialize(s)
                stars.append(s)

        # Also store away a noiseless copy of the PSF, origin of focal plane
        s0 = make_data(fiducial,
                       1.0,
                       g1,
                       g2,
                       u0,
                       v0,
                       influx,
                       pix_scale=0.5,
                       include_pixel=False)
        s0 = mod.initialize(s0)

        # Polynomial doesn't need this, but it should work nonetheless.
        interp.initialize(stars)

        # Iterate solution using interpolator
        for iteration in range(niter):
            # Refit PSFs star by star:
            for i, s in enumerate(stars):
                stars[i] = mod.fit(s)
            # Run the interpolator
            interp.solve(stars)
            # Install interpolator solution into each
            # star, recalculate flux, report chisq
            chisq = 0.
            dof = 0
            for i, s in enumerate(stars):
                s = interp.interpolate(s)
                s = mod.reflux(s)
                chisq += s.fit.chisq
                dof += s.fit.dof
                stars[i] = s
            print('iteration', iteration, 'chisq=', chisq, 'dof=', dof)

        # Now use the interpolator to produce a noiseless rendering
        s1 = interp.interpolate(s0)
        s1 = mod.reflux(s1)
        print('Flux, ctr, chisq after interpolation: ', s1.fit.flux,
              s1.fit.center, s1.fit.chisq)
        np.testing.assert_almost_equal(s1.fit.flux / influx, 1.0, decimal=3)

        s1 = mod.draw(s1)
        print('max image abs diff = ',
              np.max(np.abs(s1.image.array - s0.image.array)))
        print('max image abs value = ', np.max(np.abs(s0.image.array)))
        peak = np.max(np.abs(s0.image.array))
        np.testing.assert_almost_equal(s1.image.array / peak,
                                       s0.image.array / peak,
                                       decimal=3)
Пример #23
0
def test_CRG_noise(args):
    """Test noise propagation in ChromaticRealGalaxy
    """
    t0 = time.time()

    print("Constructing chromatic PSFs")
    in_PSF = galsim.ChromaticAiry(lam=700., diam=2.4)
    out_PSF = galsim.ChromaticAiry(lam=700., diam=1.2)

    print("Constructing filters and SEDs")
    waves = np.arange(550.0, 900.1, 10.0)
    visband = galsim.Bandpass(galsim.LookupTable(waves,
                                                 np.ones_like(waves),
                                                 interpolant='linear'),
                              wave_type='nm')
    split_points = np.linspace(550.0, 900.0, args.Nim + 1, endpoint=True)
    bands = [
        visband.truncate(blue_limit=blim, red_limit=rlim)
        for blim, rlim in zip(split_points[:-1], split_points[1:])
    ]

    maxk = max([
        out_PSF.evaluateAtWavelength(waves[0]).maxK(),
        out_PSF.evaluateAtWavelength(waves[-1]).maxK()
    ])

    SEDs = [
        galsim.SED(galsim.LookupTable(waves, waves**i, interpolant='linear'),
                   flux_type='fphotons',
                   wave_type='nm').withFlux(1.0, visband)
        for i in range(args.NSED)
    ]

    print("Constructing input noise correlation functions")
    rng = galsim.BaseDeviate(args.seed)
    in_xis = [
        galsim.getCOSMOSNoise(cosmos_scale=args.in_scale,
                              rng=rng).dilate(1 + i * 0.05).rotate(
                                  5 * i * galsim.degrees)
        for i in range(args.Nim)
    ]

    print("Creating noise images")
    img_sets = []
    for i in range(args.Ntrial):
        imgs = []
        for j, xi in enumerate(in_xis):
            img = galsim.Image(args.in_Nx, args.in_Nx, scale=args.in_scale)
            img.addNoise(xi)
            imgs.append(img)
        img_sets.append(imgs)

    print("Constructing `ChromaticRealGalaxy`s")
    crgs = []
    with ProgressBar(len(img_sets)) as bar:
        for imgs in img_sets:
            crgs.append(
                galsim.ChromaticRealGalaxy.makeFromImages(imgs,
                                                          bands,
                                                          in_PSF,
                                                          in_xis,
                                                          SEDs=SEDs,
                                                          maxk=maxk))
            bar.update()

    print("Convolving by output PSF")
    objs = [galsim.Convolve(crg, out_PSF) for crg in crgs]

    print("Drawing through output filter")
    out_imgs = [
        obj.drawImage(visband,
                      nx=args.out_Nx,
                      ny=args.out_Nx,
                      scale=args.out_scale,
                      iimult=args.iimult) for obj in objs
    ]

    noise = objs[0].noise

    print("Measuring images' correlation functions")
    xi_obs = galsim.correlatednoise.CorrelatedNoise(out_imgs[0])
    for img in out_imgs[1:]:
        xi_obs += galsim.correlatednoise.CorrelatedNoise(img)
    xi_obs /= args.Ntrial
    xi_obs_img = galsim.Image(args.out_Nx, args.out_Nx, scale=args.out_scale)
    xi_obs.drawImage(xi_obs_img)

    print("Observed image variance: ", xi_obs.getVariance())
    print("Predicted image variance: ", noise.getVariance())
    print("Predicted/Observed variance:",
          noise.getVariance() / xi_obs.getVariance())

    print("Took {} seconds".format(time.time() - t0))

    if args.plot:
        import matplotlib.pyplot as plt
        out_array = (np.arange(args.out_Nx) - args.out_Nx / 2) * args.out_scale
        out_extent = [
            -args.out_Nx * args.out_scale / 2,
            args.out_Nx * args.out_scale / 2,
            -args.out_Nx * args.out_scale / 2, args.out_Nx * args.out_scale / 2
        ]

        fig = plt.figure(figsize=(5, 5))

        # Sample image
        ax = fig.add_subplot(111)
        ax.imshow(out_imgs[0].array, extent=out_extent)
        ax.set_title("sample output image")
        ax.set_xlabel("x")
        ax.set_ylabel("y")
        # ax.colorbar()
        fig.show()

        # 2D correlation functions
        fig = plt.figure(figsize=(10, 10))
        ax1 = fig.add_subplot(221)
        noise_img = galsim.Image(args.out_Nx,
                                 args.out_Nx,
                                 scale=args.out_scale)
        noise.drawImage(noise_img)
        ax1.imshow(np.log10(np.abs(noise_img.array)), extent=out_extent)
        ax1.set_title("predicted covariance function")
        ax1.set_xlabel(r"$\Delta x$")
        ax1.set_ylabel(r"$\Delta y$")
        ax2 = fig.add_subplot(222)
        ax2.imshow(np.log10(np.abs(xi_obs_img.array)), extent=out_extent)
        ax2.set_title("observed covariance function")
        ax2.set_xlabel(r"$\Delta x$")
        ax2.set_ylabel(r"$\Delta y$")

        # 1D slide through correlation functions
        ax3 = fig.add_subplot(223)
        ax3.plot(out_array,
                 noise_img.array[args.out_Nx / 2, :],
                 label="prediction",
                 color='red')
        ax3.plot(out_array,
                 xi_obs_img.array[args.out_Nx / 2, :],
                 label="observation",
                 color='blue')
        ax3.legend(loc='best')
        ax3.set_xlabel(r"$\Delta x$")
        ax3.set_ylabel(r"$\xi$")

        ax4 = fig.add_subplot(224)
        ax4.plot(out_array,
                 noise_img.array[args.out_Nx / 2, :],
                 label="prediction",
                 color='red')
        ax4.plot(out_array,
                 xi_obs_img.array[args.out_Nx / 2, :],
                 label="observation",
                 color='blue')
        ax4.plot(out_array,
                 -noise_img.array[args.out_Nx / 2, :],
                 ls=':',
                 color='red')
        ax4.plot(out_array,
                 -xi_obs_img.array[args.out_Nx / 2, :],
                 ls=':',
                 color='blue')
        ax4.legend(loc='best')
        ax4.set_yscale('log')
        ax4.set_xlabel(r"$\Delta x$")
        ax4.set_ylabel(r"$\xi$")

        plt.tight_layout()
        plt.show()
Пример #24
0
def test_missing():
    """Next: fit mean PSF to multiple images, with missing pixels.
    """
    if __name__ == '__main__':
        fiducial_list = [
            fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat
        ]
    else:
        fiducial_list = [fiducial_moffat]
    for fiducial in fiducial_list:
        print()
        print("fiducial = ", fiducial)
        print()
        mod = piff.GSObjectModel(fiducial, include_pixel=False)
        g1 = g2 = u0 = v0 = 0.0

        # Draw stars on a 2d grid of "focal plane" with 0<=u,v<=1
        positions = np.linspace(0., 1., 4)
        influx = 150.
        stars = []
        np_rng = np.random.RandomState(1234)
        rng = galsim.BaseDeviate(1234)
        for u in positions:
            for v in positions:
                # Draw stars in focal plane positions around a unit ring
                s = make_data(fiducial,
                              1.0,
                              g1,
                              g2,
                              u0,
                              v0,
                              influx,
                              noise=0.1,
                              pix_scale=0.5,
                              fpu=u,
                              fpv=v,
                              rng=rng,
                              include_pixel=False)
                s = mod.initialize(s)
                # Kill 10% of each star's pixels
                bad = np_rng.rand(*s.image.array.shape) < 0.1
                s.weight.array[bad] = 0.
                s.image.array[bad] = -999.
                s = mod.reflux(s,
                               fit_center=False)  # Start with a sensible flux
                stars.append(s)

        # Also store away a noiseless copy of the PSF, origin of focal plane
        s0 = make_data(fiducial,
                       1.0,
                       g1,
                       g2,
                       u0,
                       v0,
                       influx,
                       pix_scale=0.5,
                       include_pixel=False)
        s0 = mod.initialize(s0)

        interp = piff.Polynomial(order=0)
        interp.initialize(stars)

        oldchisq = 0.
        # Iterate solution using interpolator
        for iteration in range(40):
            # Refit PSFs star by star:
            for i, s in enumerate(stars):
                stars[i] = mod.fit(s)
            # Run the interpolator
            interp.solve(stars)
            # Install interpolator solution into each
            # star, recalculate flux, report chisq
            chisq = 0.
            dof = 0
            for i, s in enumerate(stars):
                s = interp.interpolate(s)
                s = mod.reflux(s)
                chisq += s.fit.chisq
                dof += s.fit.dof
                stars[i] = s
                ###print('   chisq=',s.fit.chisq, 'dof=',s.fit.dof)
            print('iteration', iteration, 'chisq=', chisq, 'dof=', dof)
            if oldchisq > 0 and chisq < oldchisq and oldchisq - chisq < dof / 10.:
                break
            else:
                oldchisq = chisq

        # Now use the interpolator to produce a noiseless rendering
        s1 = interp.interpolate(s0)
        s1 = mod.reflux(s1)
        print('Flux, ctr after interpolation: ', s1.fit.flux, s1.fit.center,
              s1.fit.chisq)
        # Less than 2 dp of accuracy here!
        np.testing.assert_almost_equal(s1.fit.flux / influx, 1.0, decimal=3)

        s1 = mod.draw(s1)
        print('max image abs diff = ',
              np.max(np.abs(s1.image.array - s0.image.array)))
        print('max image abs value = ', np.max(np.abs(s0.image.array)))
        peak = np.max(np.abs(s0.image.array))
        np.testing.assert_almost_equal(s1.image.array / peak,
                                       s0.image.array / peak,
                                       decimal=3)
Пример #25
0
def test_real_galaxy_ideal():
    """Test accuracy of various calculations with fake Gaussian RealGalaxy vs. ideal expectations"""
    ind_fake = 1  # index of mock galaxy (Gaussian) in catalog
    fake_gal_fwhm = 0.7  # arcsec
    fake_gal_shear1 = 0.29  # shear representing intrinsic shape component 1
    fake_gal_shear2 = -0.21  # shear representing intrinsic shape component 2
    # note non-round, to detect possible issues with x<->y or others that might not show up using
    # circular galaxy

    fake_gal_flux = 1000.0
    fake_gal_orig_PSF_fwhm = 0.1  # arcsec
    fake_gal_orig_PSF_shear1 = 0.0
    fake_gal_orig_PSF_shear2 = -0.07

    # read in faked Gaussian RealGalaxy from file
    rgc = galsim.RealGalaxyCatalog(catalog_file, dir=image_dir)
    assert len(rgc) == rgc.getNObjects() == rgc.nobjects == len(rgc.cat)
    rg = galsim.RealGalaxy(rgc, index=ind_fake)
    # as a side note, make sure it behaves okay given a legit RNG and a bad RNG
    # or when trying to specify the galaxy too many ways
    rg_1 = galsim.RealGalaxy(rgc, index=ind_fake, rng=galsim.BaseDeviate(1234))
    rg_2 = galsim.RealGalaxy(rgc, random=True)

    assert_raises(TypeError, galsim.RealGalaxy, rgc, index=ind_fake, rng='foo')
    assert_raises(TypeError, galsim.RealGalaxy, rgc)
    assert_raises(TypeError,
                  galsim.RealGalaxy,
                  rgc,
                  index=ind_fake,
                  flux=12,
                  flux_rescale=2)

    assert_raises(ValueError, galsim.RealGalaxy, rgc, index=ind_fake, id=0)
    assert_raises(ValueError,
                  galsim.RealGalaxy,
                  rgc,
                  index=ind_fake,
                  random=True)
    assert_raises(ValueError, galsim.RealGalaxy, rgc, id=0, random=True)

    # Different RNGs give different random galaxies.
    rg_3 = galsim.RealGalaxy(rgc, random=True, rng=galsim.BaseDeviate(12345))
    rg_4 = galsim.RealGalaxy(rgc, random=True, rng=galsim.BaseDeviate(67890))
    assert rg_3.index != rg_4.index, 'Different seeds did not give different random objects!'

    check_basic(rg, "RealGalaxy", approx_maxsb=True)
    check_basic(rg_1, "RealGalaxy", approx_maxsb=True)
    check_basic(rg_2, "RealGalaxy", approx_maxsb=True)

    do_pickle(
        rgc, lambda x: [
            x.getGalImage(ind_fake),
            x.getPSFImage(ind_fake),
            x.getNoiseProperties(ind_fake)
        ])
    do_pickle(
        rgc,
        lambda x: drawNoise(x.getNoise(ind_fake, rng=galsim.BaseDeviate(123))))
    do_pickle(rgc)
    do_pickle(
        rg, lambda x: [
            x.gal_image, x.psf_image,
            repr(x.noise), x.original_psf.flux, x.original_gal.flux, x.flux
        ])
    do_pickle(rg, lambda x: x.drawImage(nx=20, ny=20, scale=0.7))
    do_pickle(rg_1, lambda x: x.drawImage(nx=20, ny=20, scale=0.7))
    do_pickle(rg)
    do_pickle(rg_1)

    ## for the generation of the ideal right answer, we need to add the intrinsic shape of the
    ## galaxy and the lensing shear using the rule for addition of distortions which is ugly, but oh
    ## well:
    targ_pixel_scale = [0.18, 0.25]  # arcsec
    targ_PSF_fwhm = [0.7, 1.0]  # arcsec
    targ_PSF_shear1 = [-0.03, 0.0]
    targ_PSF_shear2 = [0.05, -0.08]
    targ_applied_shear1 = 0.06
    targ_applied_shear2 = -0.04

    fwhm_to_sigma = 1.0 / (2.0 * np.sqrt(2.0 * np.log(2.0)))

    (d1, d2) = galsim.utilities.g1g2_to_e1e2(fake_gal_shear1, fake_gal_shear2)
    (d1app, d2app) = galsim.utilities.g1g2_to_e1e2(targ_applied_shear1,
                                                   targ_applied_shear2)
    denom = 1.0 + d1 * d1app + d2 * d2app
    dapp_sq = d1app**2 + d2app**2
    d1tot = (d1 + d1app + d2app / dapp_sq * (1.0 - np.sqrt(1.0 - dapp_sq)) *
             (d2 * d1app - d1 * d2app)) / denom
    d2tot = (d2 + d2app + d1app / dapp_sq * (1.0 - np.sqrt(1.0 - dapp_sq)) *
             (d1 * d2app - d2 * d1app)) / denom

    # convolve with a range of Gaussians, with and without shear (note, for this test all the
    # original and target ePSFs are Gaussian - there's no separate pixel response so that everything
    # can be calculated analytically)
    for tps in targ_pixel_scale:
        for tpf in targ_PSF_fwhm:
            for tps1 in targ_PSF_shear1:
                for tps2 in targ_PSF_shear2:
                    print('tps,tpf,tps1,tps2 = ', tps, tpf, tps1, tps2)
                    # make target PSF
                    targ_PSF = galsim.Gaussian(fwhm=tpf).shear(g1=tps1,
                                                               g2=tps2)
                    # simulate image
                    tmp_gal = rg.withFlux(fake_gal_flux).shear(
                        g1=targ_applied_shear1, g2=targ_applied_shear2)
                    final_tmp_gal = galsim.Convolve(targ_PSF, tmp_gal)
                    sim_image = final_tmp_gal.drawImage(scale=tps,
                                                        method='no_pixel')
                    # galaxy sigma, in units of pixels on the final image
                    sigma_ideal = (fake_gal_fwhm / tps) * fwhm_to_sigma
                    # compute analytically the expected galaxy moments:
                    mxx_gal, myy_gal, mxy_gal = ellip_to_moments(
                        d1tot, d2tot, sigma_ideal)
                    # compute analytically the expected PSF moments:
                    targ_PSF_e1, targ_PSF_e2 = galsim.utilities.g1g2_to_e1e2(
                        tps1, tps2)
                    targ_PSF_sigma = (tpf / tps) * fwhm_to_sigma
                    mxx_PSF, myy_PSF, mxy_PSF = ellip_to_moments(
                        targ_PSF_e1, targ_PSF_e2, targ_PSF_sigma)
                    # get expected e1, e2, sigma for the PSF-convolved image
                    tot_e1, tot_e2, tot_sigma = moments_to_ellip(
                        mxx_gal + mxx_PSF, myy_gal + myy_PSF,
                        mxy_gal + mxy_PSF)

                    # compare with images that are expected
                    expected_gaussian = galsim.Gaussian(flux=fake_gal_flux,
                                                        sigma=tps * tot_sigma)
                    expected_gaussian = expected_gaussian.shear(e1=tot_e1,
                                                                e2=tot_e2)
                    expected_image = galsim.ImageD(sim_image.array.shape[0],
                                                   sim_image.array.shape[1])
                    expected_gaussian.drawImage(expected_image,
                                                scale=tps,
                                                method='no_pixel')
                    printval(expected_image, sim_image)
                    np.testing.assert_array_almost_equal(
                        sim_image.array,
                        expected_image.array,
                        decimal=3,
                        err_msg=
                        "Error in comparison of ideal Gaussian RealGalaxy calculations"
                    )
Пример #26
0
def test_gradient_center():
    """Next: fit spatially-varying PSF, with spatially-varying centers to multiple images.
    """
    if __name__ == '__main__':
        fiducial_list = [
            fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat
        ]
    else:
        fiducial_list = [fiducial_moffat]
    for fiducial in fiducial_list:
        print()
        print("fiducial = ", fiducial)
        print()
        mod = piff.GSObjectModel(fiducial, include_pixel=False)

        # Interpolator will be linear
        interp = piff.Polynomial(order=1)

        # Draw stars on a 2d grid of "focal plane" with 0<=u,v<=1
        positions = np.linspace(0., 1., 4)
        influx = 150.
        stars = []
        rng = galsim.BaseDeviate(1234)
        for u in positions:
            # Put gradient in pixel size
            for v in positions:
                # Draw stars in focal plane positions around a unit ring
                # spatially-varying fwhm, g1, g2.
                s = make_data(fiducial,
                              1.0 + u * 0.1 + 0.1 * v,
                              0.1 * u,
                              0.1 * v,
                              0.5 * u,
                              0.5 * v,
                              influx,
                              noise=0.1,
                              pix_scale=0.5,
                              fpu=u,
                              fpv=v,
                              rng=rng,
                              include_pixel=False)
                s = mod.initialize(s)
                stars.append(s)

        # import matplotlib.pyplot as plt
        # fig, axes = plt.subplots(4, 4)
        # for star, ax in zip(stars, axes.ravel()):
        #     ax.imshow(star.data.image.array)
        # plt.show()

        # Also store away a noiseless copy of the PSF, origin of focal plane
        s0 = make_data(fiducial,
                       1.0,
                       0.,
                       0.,
                       0.,
                       0.,
                       influx,
                       pix_scale=0.5,
                       include_pixel=False)
        s0 = mod.initialize(s0)

        # Polynomial doesn't need this, but it should work nonetheless.
        interp.initialize(stars)

        oldchisq = 0.
        # Iterate solution using interpolator
        for iteration in range(40):
            # Refit PSFs star by star:
            for i, s in enumerate(stars):
                stars[i] = mod.fit(s)
            # Run the interpolator
            interp.solve(stars)
            # Install interpolator solution into each
            # star, recalculate flux, report chisq
            chisq = 0.
            dof = 0
            for i, s in enumerate(stars):
                s = interp.interpolate(s)
                s = mod.reflux(s)
                chisq += s.fit.chisq
                dof += s.fit.dof
                stars[i] = s
                ###print('   chisq=',s.fit.chisq, 'dof=',s.fit.dof)
            print('iteration', iteration, 'chisq=', chisq, 'dof=', dof)
            if oldchisq > 0 and np.abs(oldchisq - chisq) < dof / 10.:
                break
            else:
                oldchisq = chisq

        for i, s in enumerate(stars):
            print(i, s.fit.center, s.fit.params[0:2])

        # Now use the interpolator to produce a noiseless rendering
        s1 = interp.interpolate(s0)
        s1 = mod.reflux(s1)
        print('Flux, ctr, chisq after interpolation: ', s1.fit.flux,
              s1.fit.center, s1.fit.chisq)
        # Less than 2 dp of accuracy here!
        np.testing.assert_almost_equal(s1.fit.flux / influx, 1.0, decimal=2)

        s1 = mod.draw(s1)
        print('max image abs diff = ',
              np.max(np.abs(s1.image.array - s0.image.array)))
        print('max image abs value = ', np.max(np.abs(s0.image.array)))
        peak = np.max(np.abs(s0.image.array))
        np.testing.assert_almost_equal(s1.image.array / peak,
                                       s0.image.array / peak,
                                       decimal=2)
Пример #27
0
def test_area_norm():
    """Check that area_norm works as expected"""
    f606w_cat = galsim.RealGalaxyCatalog('AEGIS_F606w_catalog.fits',
                                         dir=image_dir)
    f814w_cat = galsim.RealGalaxyCatalog('AEGIS_F814w_catalog.fits',
                                         dir=image_dir)

    psf = galsim.Gaussian(fwhm=0.6)

    rng = galsim.BaseDeviate(5772)
    crg1 = galsim.ChromaticRealGalaxy([f606w_cat, f814w_cat],
                                      random=True,
                                      rng=rng.duplicate())
    crg2 = galsim.ChromaticRealGalaxy([f606w_cat, f814w_cat],
                                      random=True,
                                      rng=rng.duplicate(),
                                      area_norm=galsim.real.HST_area)
    assert crg1 != crg2
    LSST_i = galsim.Bandpass(os.path.join(bppath, "LSST_r.dat"), 'nm')
    obj1 = galsim.Convolve(crg1, psf)
    obj2 = galsim.Convolve(crg2, psf)
    im1 = obj1.drawImage(LSST_i, exptime=1, area=1)
    im2 = obj2.drawImage(LSST_i, exptime=1, area=galsim.real.HST_area)
    printval(im1, im2)
    np.testing.assert_array_almost_equal(im1.array, im2.array)
    np.testing.assert_almost_equal(
        obj1.noise.getVariance(),
        obj2.noise.getVariance() * galsim.real.HST_area**2)

    # area_norm is equivalant to an overall scaling
    crg3 = galsim.ChromaticRealGalaxy([f606w_cat, f814w_cat],
                                      random=True,
                                      rng=rng.duplicate())
    crg3 /= galsim.real.HST_area
    obj3 = galsim.Convolve(crg3, psf)
    im3 = obj3.drawImage(LSST_i, exptime=1, area=galsim.real.HST_area)
    np.testing.assert_array_almost_equal(im3.array, im2.array)
    np.testing.assert_almost_equal(obj3.noise.getVariance(),
                                   obj2.noise.getVariance())

    rg1 = galsim.RealGalaxy(f606w_cat, index=1)
    rg2 = galsim.RealGalaxy(f606w_cat, index=1, area_norm=galsim.real.HST_area)
    assert rg1 != rg2
    obj1 = galsim.Convolve(rg1, psf)
    obj2 = galsim.Convolve(rg2, psf)
    im1 = obj1.drawImage()
    im2 = obj2.drawImage(exptime=1, area=galsim.real.HST_area)
    printval(im1, im2)
    np.testing.assert_array_almost_equal(im1.array, im2.array)
    np.testing.assert_almost_equal(
        obj1.noise.getVariance(),
        obj2.noise.getVariance() * galsim.real.HST_area**2)

    # area_norm is equivalant to an overall scaling
    rg3 = galsim.RealGalaxy(f606w_cat, index=1)
    rg3 /= galsim.real.HST_area
    obj3 = galsim.Convolve(rg3, psf)
    im3 = obj3.drawImage(exptime=1, area=galsim.real.HST_area)
    np.testing.assert_array_almost_equal(im3.array, im2.array)
    np.testing.assert_almost_equal(obj3.noise.getVariance(),
                                   obj2.noise.getVariance())
Пример #28
0
def test_PSE_weight():
    """Test of power spectrum estimation with weights.
    """
    array_size = 300
    n_ell = 8
    grid_spacing = 0.1
    ps_file = os.path.join(datapath, 'cosmo-fid.zmed1.00.out')
    rand_seed = 2718

    tab = galsim.LookupTable.from_file(ps_file)
    ps = galsim.PowerSpectrum(tab, units=galsim.radians)
    g1, g2 = ps.buildGrid(grid_spacing=grid_spacing, ngrid=array_size, units=galsim.degrees,
                          rng=galsim.BaseDeviate(rand_seed))

    pse = galsim.pse.PowerSpectrumEstimator(N=array_size,
                                            sky_size_deg=array_size*grid_spacing,
                                            nbin=n_ell)

    ell, P_e1, P_b1, P_eb1, P_theory = pse.estimate(g1, g2, weight_EE=True, weight_BB=True,
                                                    theory_func=tab)
    print('P_e1 = ',P_e1)
    print('rel_diff = ',(P_e1-P_theory)/P_theory)
    print('rel_diff using P[1] = ',(P_e1-P_theory)/P_theory[1])
    # The agreement here seems really bad.  Should I not expect these to be closer than this?
    eb_tolerance = 0.4
    zero_tolerance = 0.03

    np.testing.assert_allclose(P_e1[1:], P_theory[1:], rtol=eb_tolerance,
                               err_msg='Weighted PSE returned wrong E power')

    np.testing.assert_allclose(P_b1/P_theory, 0., atol=zero_tolerance,
                               err_msg='Weighted PSE found B power')
    print(P_eb1/P_theory)
    np.testing.assert_allclose(P_eb1/P_theory, 0., atol=zero_tolerance,
                               err_msg='Weighted PSE found EB cross-power')

    # Also check the case where P_e=P_b.
    ps = galsim.PowerSpectrum(tab, tab, units=galsim.radians)
    g1, g2 = ps.buildGrid(grid_spacing=grid_spacing, ngrid=array_size, units=galsim.degrees,
                          rng=galsim.BaseDeviate(rand_seed))
    ell, P_e2, P_b2, P_eb2 = pse.estimate(g1, g2, weight_EE=True, weight_BB=True)
    np.testing.assert_allclose(P_e2[1:], P_theory[1:], rtol=eb_tolerance,
                               err_msg='Weighted PSE returned wrong E power')
    np.testing.assert_allclose(P_b2[1:], P_theory[1:], rtol=eb_tolerance,
                               err_msg='Weighted PSE returned wrong B power')
    np.testing.assert_allclose(P_eb2[1:]/P_theory[1:], 0., atol=zero_tolerance,
                               err_msg='Weighted PSE found EB cross-power')

    # And check the case where P_b is nonzero and P_e is zero.
    ps = galsim.PowerSpectrum(e_power_function=None, b_power_function=tab,
                              units=galsim.radians)
    g1, g2 = ps.buildGrid(grid_spacing=grid_spacing, ngrid=array_size, units=galsim.degrees,
                          rng=galsim.BaseDeviate(rand_seed))
    ell, P_e3, P_b3, P_eb3 = pse.estimate(g1, g2, weight_EE=True, weight_BB=True)
    np.testing.assert_allclose(P_e3[1:]/P_theory[1:], 0., atol=zero_tolerance,
                               err_msg='Weighted PSE found E power when it should be zero')
    np.testing.assert_allclose(P_b3[1:], P_theory[1:], rtol=eb_tolerance,
                               err_msg='Weighted PSE returned wrong B power')
    np.testing.assert_allclose(P_eb3[1:]/P_theory[1:], 0., atol=zero_tolerance,
                               err_msg='Weighted PSE found EB cross-power')

    assert_raises(TypeError, pse.estimate, g1, g2, weight_EE=8)
    assert_raises(TypeError, pse.estimate, g1, g2, weight_BB='yes')

    # If N is fairly small, then can get zeros in the counts, which raises an error
    array_size = 5
    g1, g2 = ps.buildGrid(grid_spacing=grid_spacing, ngrid=array_size, units=galsim.degrees,
                          rng=galsim.BaseDeviate(rand_seed))
    pse = galsim.pse.PowerSpectrumEstimator(N=array_size,
                                            sky_size_deg=array_size*grid_spacing,
                                            nbin=n_ell)
    with assert_raises(galsim.GalSimError):
        pse.estimate(g1,g2)
Пример #29
0
def test_rescale():
    """Test the flux rescaling of a Sersic profile against a known result.
    """
    savedImg = galsim.fits.read(os.path.join(imgdir, "sersic_doubleflux.fits"))
    dx = 0.2
    myImg = galsim.ImageF(savedImg.bounds, scale=dx)
    myImg.setCenter(0,0)

    sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1)
    sersic.withFlux(2).drawImage(myImg,scale=dx, method="sb", use_true_center=False)
    np.testing.assert_array_almost_equal(
            myImg.array, savedImg.array, 5,
            err_msg="Using GSObject withFlux disagrees with expected result")
    np.testing.assert_almost_equal(
            myImg.array.max(), sersic.withFlux(2).max_sb, 5,
            err_msg="rescaled profile max_sb did not match maximum pixel value")

    sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1)
    sersic *= 2
    sersic.drawImage(myImg,scale=dx, method="sb", use_true_center=False)
    np.testing.assert_array_almost_equal(
            myImg.array, savedImg.array, 5,
            err_msg="Using GSObject *= 2 disagrees with expected result")
    sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1)
    sersic2 = sersic * 2
    sersic2.drawImage(myImg,scale=dx, method="sb", use_true_center=False)
    np.testing.assert_array_almost_equal(
            myImg.array, savedImg.array, 5,
            err_msg="Using GSObject obj * 2 disagrees with expected result")
    sersic2 = 2 * sersic
    sersic2.drawImage(myImg,scale=dx, method="sb", use_true_center=False)
    np.testing.assert_array_almost_equal(
            myImg.array, savedImg.array, 5,
            err_msg="Using GSObject 2 * obj disagrees with expected result")

    # Check with default_params
    sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1, gsparams=default_params)
    sersic *= 2
    sersic.drawImage(myImg,scale=dx, method="sb", use_true_center=False)
    np.testing.assert_array_almost_equal(
            myImg.array, savedImg.array, 5,
            err_msg="Using GSObject *= 2 with default_params disagrees with expected result")
    sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1, gsparams=galsim.GSParams())
    sersic *= 2
    sersic.drawImage(myImg,scale=dx, method="sb", use_true_center=False)
    np.testing.assert_array_almost_equal(
            myImg.array, savedImg.array, 5,
            err_msg="Using GSObject *= 2 with GSParams() disagrees with expected result")

    # Can also get a flux of 2 by drawing flux=1 twice with add_to_image=True
    sersic = galsim.Sersic(n=3, flux=1, half_light_radius=1)
    sersic.drawImage(myImg,scale=dx, method="sb", use_true_center=False)
    sersic.drawImage(myImg,scale=dx, method="sb",add_to_image=True,
                use_true_center=False)
    np.testing.assert_array_almost_equal(
            myImg.array, savedImg.array, 5,
            err_msg="Drawing with add_to_image=True disagrees with expected result")

    # With lower folding_threshold and maxk_threshold, the calculated flux should come out right
    # so long as we also convolve by a pixel:
    gsp1 = galsim.GSParams(folding_threshold=1.e-3, maxk_threshold=5.e-4)
    sersic_acc = galsim.Sersic(n=3, flux=1, half_light_radius=1, gsparams=gsp1)
    myImg2 = sersic_acc.drawImage(scale=dx, use_true_center=False)
    print(myImg2.array.sum(), myImg2.added_flux)
    np.testing.assert_almost_equal(myImg2.array.sum(), 1., 3,
            err_msg="Drawing with gsp1 results in wrong flux")
    np.testing.assert_almost_equal(myImg2.added_flux, 1., 3,
            err_msg="Drawing with gsp1 returned wrong added_flux")
    myImg2 = sersic_acc.drawImage(myImg2, add_to_image=True, use_true_center=False)
    print(myImg2.array.sum(), myImg2.added_flux)
    np.testing.assert_almost_equal(myImg2.array.sum(), 2., 3,
            err_msg="Drawing with add_to_image=True results in wrong flux")
    np.testing.assert_almost_equal(myImg2.added_flux, 1., 3,
            err_msg="Drawing with add_to_image=True returned wrong added_flux")

    # Check that the flux works out when adding multiple times.
    # With a Gaussian, we can take the thresholds even lower and get another digit of accuracy.
    gsp2 = galsim.GSParams(folding_threshold=1.e-5, maxk_threshold=1.e-5)
    gauss = galsim.Gaussian(flux=1.e5, sigma=2., gsparams=gsp2)
    myImg2 = gauss.drawImage(scale=dx, use_true_center=False)
    print('image size = ',myImg2.array.shape)
    print(myImg2.array.sum(), myImg2.added_flux)
    np.testing.assert_almost_equal(myImg2.array.sum()/1.e5, 1., 4,
            err_msg="Drawing Gaussian results in wrong flux")
    np.testing.assert_almost_equal(myImg2.added_flux/1.e5, 1., 4,
            err_msg="Drawing Gaussian returns wrong added_flux")
    myImg2 = gauss.drawImage(myImg2, add_to_image=True, use_true_center=False)
    print(myImg2.array.sum(), myImg2.added_flux)
    np.testing.assert_almost_equal(myImg2.array.sum()/1.e5, 2., 4,
            err_msg="Drawing Gaussian with add_to_image=True results in wrong flux")
    np.testing.assert_almost_equal(myImg2.added_flux/1.e5, 1., 4,
            err_msg="Drawing Gaussian with add_to_image=True returns wrong added_flux")
    rng = galsim.BaseDeviate(12345)
    myImg2 = gauss.drawImage(myImg2, add_to_image=True, poisson_flux=False, rng=rng, method='phot')
    print(myImg2.array.sum(), myImg2.added_flux)
    np.testing.assert_almost_equal(myImg2.array.sum()/1.e5, 3., 4,
            err_msg="Drawing Gaussian with method=phot, add_to_image=True, poisson_flux=False "+
                    "results in wrong flux")
    np.testing.assert_almost_equal(myImg2.added_flux/1.e5, 1., 4,
            err_msg="Drawing Gaussian with method=phot, add_to_image=True, poisson_flux=False "+
                    "returned wrong added_flux")
    myImg2 = gauss.drawImage(myImg2, add_to_image=True, rng=rng, method='phot')
    print(myImg2.array.sum(), myImg2.added_flux)
    np.testing.assert_almost_equal(myImg2.array.sum()/1.e5, 4., 1,
            err_msg="Drawing Gaussian with method=phot, add_to_image=True, poisson_flux=True "+
                    "results in wrong flux")
    np.testing.assert_almost_equal(myImg2.added_flux/1.e5, 1., 1,
            err_msg="Drawing Gaussian with method=phot, add_to_image=True, poisson_flux=True "+
                    "returned wrong added_flux")
    np.testing.assert_almost_equal(myImg2.array.sum()/1.e5, 3.+myImg2.added_flux/1.e5, 4,
            err_msg="Drawing Gaussian with method=phot, add_to_image=True results in wrong flux "+
                    "according to the returned added_flux")

    # Can also get a flux of 2 using gain = 0.5
    sersic.drawImage(myImg, scale=dx, gain=0.5, method="sb", use_true_center=False)
    np.testing.assert_array_almost_equal(
            myImg.array, savedImg.array, 5,
            err_msg="Drawing with gain=0.5 disagrees with expected result")
    myImg2 = sersic_acc.drawImage(scale=dx, gain=0.5, use_true_center=False)
    np.testing.assert_almost_equal(myImg2.array.sum(), 2., 3,
            err_msg="Drawing with gain=0.5 results in wrong flux")
    myImg2 = sersic_acc.drawImage(scale=dx, gain=4., use_true_center=False)
    np.testing.assert_almost_equal(myImg2.array.sum(), 0.25, 3,
            err_msg="Drawing with gain=4. results in wrong flux")
    # Check add_to_image in conjunction with gain
    sersic_acc.drawImage(myImg2, gain=4., add_to_image=True, use_true_center=False)
    np.testing.assert_almost_equal(myImg2.array.sum(), 0.5, 3,
            err_msg="Drawing with gain=4. results in wrong flux")

    # Convolve with a small gaussian to smooth out the central peak.
    sersic_smooth = galsim.Convolve(sersic2, galsim.Gaussian(sigma=0.3))
    check_basic(sersic_smooth, "scaled Sersic")

    # Test photon shooting.
    do_shoot(sersic_smooth,myImg,"scaled Sersic")

    # Test kvalues
    do_kvalue(sersic2,myImg, "scaled Sersic")

    # Check picklability
    do_pickle(sersic2, lambda x: x.drawImage())
    do_pickle(sersic2)
Пример #30
0
def main(argv):
    # Where to find and output data.
    path, filename = os.path.split(__file__)
    outpath = os.path.abspath(os.path.join(path, "output/"))

    # Just use a few galaxies, to save time.  Note that we are going to put 4000 galaxy images into
    # our big image, so if we have n_use=10, each galaxy will appear 400 times.  Users who want a
    # more interesting image with greater variation in the galaxy population can change `n_use` to
    # something larger (but it should be <=100, the number of galaxies in this small example
    # catalog).  With 4000 galaxies in a 4k x 4k image with the WFIRST pixel scale, the effective
    # galaxy number density is 74/arcmin^2.  This is not the number density that is expected for a
    # sample that is so bright (I<23.5) but it makes the image more visually interesting.  One could
    # think of it as what you'd get if you added up several images at once, making the images for a
    # sample that is much deeper have the same S/N as that for an I<23.5 sample in a single image.
    n_use = 10
    n_tot = 4000

    # Default is to use all filters.  Specify e.g. 'YJH' to only do Y106, J129, and H158.
    use_filters = None

    # quick and dirty command line parsing.
    for var in argv:
        if var.startswith('data='): datapath = var[5:]
        if var.startswith('out='): outpath = var[4:]
        if var.startswith('nuse='): n_use = int(var[5:])
        if var.startswith('ntot='): n_tot = int(var[5:])
        if var.startswith('filters='): use_filters = var[8:].upper()

    # Make output directory if not already present.
    if not os.path.isdir(outpath):
        os.mkdir(outpath)

    # In non-script code, use getLogger(__name__) at module scope instead.
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo13")

    # Initialize (pseudo-)random number generator.
    random_seed = 123456
    rng = galsim.BaseDeviate(random_seed)

    # Generate a Poisson noise model.
    poisson_noise = galsim.PoissonNoise(rng)
    logger.info('Poisson noise model created.')

    # Read in the WFIRST filters, setting an AB zeropoint appropriate for this telescope given its
    # diameter and (since we didn't use any keyword arguments to modify this) using the typical
    # exposure time for WFIRST images.  By default, this routine truncates the parts of the
    # bandpasses that are near 0 at the edges, and thins them by the default amount.
    filters = wfirst.getBandpasses(AB_zeropoint=True)
    logger.debug('Read in WFIRST imaging filters.')

    logger.info('Reading from a parametric COSMOS catalog.')
    # Read in a galaxy catalog - just a random subsample of 100 galaxies for F814W<23.5 from COSMOS.
    cat_file_name = 'real_galaxy_catalog_23.5_example_fits.fits'
    dir = 'data'
    # Use the routine that can take COSMOS real or parametric galaxy information, and tell it we
    # want parametric galaxies that represent an I<23.5 sample.
    cat = galsim.COSMOSCatalog(cat_file_name, dir=dir, use_real=False)
    logger.info('Read in %d galaxies from catalog' % cat.nobjects)

    # Here we carry out the initial steps that are necessary to get a fully chromatic PSF.  We use
    # the getPSF() routine in the WFIRST module, which knows all about the telescope parameters
    # (diameter, bandpasses, obscuration, etc.).  Note that we arbitrarily choose a single SCA
    # (Sensor Chip Assembly) rather than all of them, for faster calculations, and use a simple
    # representation of the struts for faster calculations.  To do a more exact calculation of the
    # chromaticity and pupil plane configuration, remove the `approximate_struts` and the `n_waves`
    # keyword from the call to getPSF():
    use_SCA = 7  # This could be any number from 1...18
    logger.info('Doing expensive pre-computation of PSF.')
    t1 = time.time()
    logger.setLevel(logging.DEBUG)
    # Need to make a separate PSF for each filter.  We are, however, ignoring the
    # position-dependence of the PSF within each SCA, just using the PSF at the center of the SCA
    # (default kwargs).
    PSFs = {}
    for filter_name, filter_ in filters.items():
        logger.info('PSF pre-computation for SCA %d, filter %s.' %
                    (use_SCA, filter_name))
        PSFs[filter_name] = wfirst.getPSF(use_SCA,
                                          filter_name,
                                          approximate_struts=True,
                                          n_waves=10,
                                          logger=logger)
    logger.setLevel(logging.INFO)
    t2 = time.time()
    logger.info('Done PSF precomputation in %.1f seconds!' % (t2 - t1))

    # Define the size of the postage stamp that we use for each individual galaxy within the larger
    # image, and for the PSF images.
    stamp_size = 256

    # We choose a particular (RA, dec) location on the sky for our observation.
    ra_targ = 90. * galsim.degrees
    dec_targ = -10. * galsim.degrees
    targ_pos = galsim.CelestialCoord(ra=ra_targ, dec=dec_targ)
    # Get the WCS for an observation at this position.  We are not supplying a date, so the routine
    # will assume it's the vernal equinox.  We are also not supplying a position angle for the
    # observatory, which means that it will just find the optimal one (the one that has the solar
    # panels pointed most directly towards the Sun given this targ_pos and date).  The output of
    # this routine is a dict of WCS objects, one for each SCA.  We then take the WCS for the SCA
    # that we are using.
    wcs_list = wfirst.getWCS(world_pos=targ_pos, SCAs=use_SCA)
    wcs = wcs_list[use_SCA]
    # We need to find the center position for this SCA.  We'll tell it to give us a CelestialCoord
    # corresponding to (X, Y) = (wfirst.n_pix/2, wfirst.n_pix/2).
    SCA_cent_pos = wcs.toWorld(
        galsim.PositionD(wfirst.n_pix / 2, wfirst.n_pix / 2))

    # We randomly distribute points in (X, Y) on the CCD.
    # If we had a real galaxy catalog with positions in terms of RA, dec we could use wcs.toImage()
    # to find where those objects should be in terms of (X, Y).
    pos_rng = galsim.UniformDeviate(random_seed)
    # Make a list of (X, Y, F814W magnitude, n_rot, flip) values.
    # (X, Y) give the position of the galaxy centroid (or the center of the postage stamp into which
    # we draw the galaxy) in the big image.
    # F814W magnitudes are randomly drawn from the catalog, and are used to create a more realistic
    # flux distribution for the galaxies instead of just having the 10 flux values for the galaxies
    # we have chosen to draw.
    # n_rot says how many 90 degree rotations to include for a given realization of each galaxy, so
    # it doesn't appear completely identical each time we put it in the image.
    # flip is a random number that will determine whether we include an x-y flip for this appearance
    # of the galaxy or not.
    x_stamp = []
    y_stamp = []
    mag_stamp = []
    n_rot_stamp = []
    flip_stamp = []
    for i_gal in range(n_tot):
        x_stamp.append(pos_rng() * wfirst.n_pix)
        y_stamp.append(pos_rng() * wfirst.n_pix)
        # Note that we could use wcs.toWorld() to get the (RA, dec) for these (x, y) positions.  Or,
        # if we had started with (RA, dec) positions, we could have used wcs.toImage() to get the
        # CCD coordinates for those positions.
        mag_stamp.append(cat.param_cat['mag_auto'][int(pos_rng() *
                                                       cat.nobjects)])
        n_rot_stamp.append(int(4 * pos_rng()))
        flip_stamp.append(pos_rng())

    # Make the 2-component parametric GSObjects for each object, including chromaticity (roughly
    # appropriate SEDs per galaxy component, at the appropriate galaxy redshift).  Note that since
    # the PSF is position-independent within the SCA, we can simply do the convolution with that PSF
    # now instead of using a different one for each position.  We also have to include the correct
    # flux scaling: The catalog returns objects that would be observed by HST in 1 second
    # exposures. So for our telescope we scale up by the relative area and exposure time.  Note that
    # what is important is the *effective* area after taking into account obscuration.
    logger.info(
        'Processing the objects in the catalog to get GSObject representations'
    )
    # Choose a random set of unique indices in the catalog (will be the same each time script is
    # run, due to use of the same random seed):
    rand_indices = []
    while len(rand_indices) < n_use:
        tmp_ind = int(pos_rng() * cat.nobjects)
        if tmp_ind not in rand_indices:
            rand_indices.append(tmp_ind)
    obj_list = cat.makeGalaxy(rand_indices,
                              chromatic=True,
                              gal_type='parametric')
    hst_eff_area = 2.4**2 * (1. - 0.33**2)
    wfirst_eff_area = galsim.wfirst.diameter**2 * (
        1. - galsim.wfirst.obscuration**2)
    flux_scaling = (wfirst_eff_area / hst_eff_area) * wfirst.exptime
    mag_list = []
    for ind in range(len(obj_list)):
        # First, let's check what magnitude this object has in F814W.  We want to do this because
        # (to inject some variety into our images) we are going to rescale the fluxes in all bands
        # for different instances of this galaxy in the final image in order to get a reasonable S/N
        # distribution.  So we need to save the original magnitude in F814W, to compare with a
        # randomly drawn one from the catalog.  This is not something that most users would need to
        # do.
        mag_list.append(
            cat.param_cat['mag_auto'][cat.orig_index[rand_indices[ind]]])

    # Calculate the sky level for each filter, and draw the PSF and the galaxies through the
    # filters.
    for filter_name, filter_ in filters.items():
        if use_filters is not None and filter_name[0] not in use_filters:
            logger.info('Skipping filter {0}.'.format(filter_name))
            continue

        logger.info('Beginning work for {0}.'.format(filter_name))

        # Drawing PSF.  Note that the PSF object intrinsically has a flat SED, so if we convolve it
        # with a galaxy, it will properly take on the SED of the galaxy.  For the sake of this demo,
        # we will simply convolve with a 'star' that has a flat SED and unit flux in this band, so
        # that the PSF image will be normalized to unit flux. This does mean that the PSF image
        # being drawn here is not quite the right PSF for the galaxy.  Indeed, the PSF for the
        # galaxy effectively varies within it, since it differs for the bulge and the disk.  To make
        # a real image, one would have to choose SEDs for stars and convolve with a star that has a
        # reasonable SED, but we just draw with a flat SED for this demo.
        out_filename = os.path.join(outpath,
                                    'demo13_PSF_{0}.fits'.format(filter_name))
        # Generate a point source.
        point = galsim.DeltaFunction(flux=1.)
        # Use a flat SED here, but could use something else.  A stellar SED for instance.
        # Or a typical galaxy SED.  Depending on your purpose for drawing the PSF.
        star_sed = galsim.SED(lambda x: 1, 'nm', 'flambda').withFlux(
            1., filter_)  # Give it unit flux in this filter.
        star = galsim.Convolve(point * star_sed, PSFs[filter_name])
        img_psf = galsim.ImageF(64, 64)
        star.drawImage(bandpass=filter_,
                       image=img_psf,
                       scale=wfirst.pixel_scale)
        img_psf.write(out_filename)
        logger.debug(
            'Created PSF with flat SED for {0}-band'.format(filter_name))

        # Set up the full image that will contain all the individual galaxy images, with information
        # about WCS:
        final_image = galsim.ImageF(wfirst.n_pix, wfirst.n_pix, wcs=wcs)

        # Draw the galaxies into the image.
        for i_gal in range(n_use):
            logger.info(
                'Drawing image for the object at row %d in the input catalog' %
                i_gal)

            # We want to only draw the galaxy once (for speed), not over and over with different
            # sub-pixel offsets.  For this reason we ignore the sub-pixel offset entirely.  Note
            # that we are setting the postage stamp to have the average WFIRST pixel scale.  This is
            # simply an approximation for the purpose of speed; really, one should draw directly
            # into final_image, which has the appropriate WCS for WFIRST.  In that case, the image
            # of the galaxy might look different in different parts of the detector due to the WCS
            # (including distortion), and we would have to re-draw each time.  To keep the demo
            # relatively quick, we are just using the approximate average pixel scale and drawing
            # once.
            stamp = galsim.Image(stamp_size,
                                 stamp_size,
                                 scale=wfirst.pixel_scale)

            # Convolve the chromatic galaxy and the chromatic PSF for this bandpass, and rescale flux.
            final = galsim.Convolve(flux_scaling * obj_list[ind],
                                    PSFs[filter_name])
            final.drawImage(filter_, image=stamp)

            # Have to find where to place it:
            for i_gal_use in range(i_gal * n_tot // n_use,
                                   (i_gal + 1) * n_tot // n_use):
                # Account for the fractional part of the position:
                ix = int(math.floor(x_stamp[i_gal_use] + 0.5))
                iy = int(math.floor(y_stamp[i_gal_use] + 0.5))
                # We don't actually use this offset.
                offset = galsim.PositionD(x_stamp[i_gal] - ix,
                                          y_stamp[i_gal] - iy)

                # Create a nominal bound for the postage stamp given the integer part of the
                # position.
                stamp_bounds = galsim.BoundsI(ix - 0.5 * stamp_size,
                                              ix + 0.5 * stamp_size - 1,
                                              iy - 0.5 * stamp_size,
                                              iy + 0.5 * stamp_size - 1)

                # Find the overlapping bounds between the large image and the individual postage
                # stamp.
                bounds = stamp_bounds & final_image.bounds

                # Just to inject a bit of variety into the image, so it isn't *quite* as obvious
                # that we've repeated the same 10 objects over and over, we randomly rotate the
                # postage stamp by some factor of 90 degrees and possibly include a random flip.
                if flip_stamp[i_gal_use] > 0.5:
                    new_arr = numpy.ascontiguousarray(
                        numpy.rot90(stamp.array, n_rot_stamp[i_gal_use]))
                else:
                    new_arr = numpy.ascontiguousarray(
                        numpy.fliplr(
                            numpy.rot90(stamp.array, n_rot_stamp[i_gal_use])))
                stamp_rot = galsim.Image(new_arr, scale=stamp.scale)
                stamp_rot.setOrigin(
                    galsim.PositionI(stamp_bounds.xmin, stamp_bounds.ymin))

                # Rescale the flux to match that of a randomly chosen galaxy in the galaxy, but
                # keeping the same SED as for this particular galaxy.  This gives a bit more
                # variety in the flux values and SNR of the galaxies in the image without having
                # to render images of many more objects.
                flux_scaling = 10**(-0.4 *
                                    (mag_stamp[i_gal_use] - mag_list[i_gal]))

                # Copy the image into the right place in the big image.
                final_image[bounds] += flux_scaling * stamp_rot[bounds]

        # Now we're done with the per-galaxy drawing for this image.  The rest will be done for the
        # entire image at once.
        logger.info(
            'Postage stamps of all galaxies drawn on a single big image for this filter.'
        )
        logger.info('Adding the sky level, noise and detector non-idealities.')

        # First we get the amount of zodaical light for a position corresponding to the center of
        # this SCA.  The results are provided in units of e-/arcsec^2, using the default WFIRST
        # exposure time since we did not explicitly specify one.  Then we multiply this by a factor
        # >1 to account for the amount of stray light that is expected.  If we do not provide a date
        # for the observation, then it will assume that it's the vernal equinox (sun at (0,0) in
        # ecliptic coordinates) in 2025.
        sky_level = wfirst.getSkyLevel(filters[filter_name],
                                       world_pos=SCA_cent_pos)
        sky_level *= (1.0 + wfirst.stray_light_fraction)
        # Make a image of the sky that takes into account the spatially variable pixel scale.  Note
        # that makeSkyImage() takes a bit of time.  If you do not care about the variable pixel
        # scale, you could simply compute an approximate sky level in e-/pix by multiplying
        # sky_level by wfirst.pixel_scale**2, and add that to final_image.
        sky_image = final_image.copy()
        wcs.makeSkyImage(sky_image, sky_level)
        # This image is in units of e-/pix.  Finally we add the expected thermal backgrounds in this
        # band.  These are provided in e-/pix/s, so we have to multiply by the exposure time.
        sky_image += wfirst.thermal_backgrounds[filter_name] * wfirst.exptime
        # Adding sky level to the image.
        final_image += sky_image

        # Now that all sources of signal (from astronomical objects and background) have been added
        # to the image, we can start adding noise and detector effects.  There is a utility,
        # galsim.wfirst.allDetectorEffects(), that can apply ALL implemented noise and detector
        # effects in the proper order.  Here we step through the process and explain these in a bit
        # more detail without using that utility.

        # First, we include the expected Poisson noise:
        final_image.addNoise(poisson_noise)
        # At this point in the image generation process, an integer number of photons gets
        # detected, unless any of the pre-noise values were > 2^30. That's when our Poisson
        # implementation switches over to the Gaussian approximation, which won't necessarily
        # produce integers.  This situation does not arise in practice for this demo, but if it did,
        # we could use final_image.quantize() to enforce integer pixel values.

        # The subsequent steps account for the non-ideality of the detectors.

        # 1) Reciprocity failure:
        # Reciprocity, in the context of photography, is the inverse relationship between the
        # incident flux (I) of a source object and the exposure time (t) required to produce a given
        # response(p) in the detector, i.e., p = I*t. However, in NIR detectors, this relation does
        # not hold always. The pixel response to a high flux is larger than its response to a low
        # flux. This flux-dependent non-linearity is known as 'reciprocity failure', and the
        # approximate amount of reciprocity failure for the WFIRST detectors is known, so we can
        # include this detector effect in our images.

        if diff_mode:
            # Save the image before applying the transformation to see the difference
            save_image = final_image.copy()

        # If we had wanted to, we could have specified a different exposure time than the default
        # one for WFIRST, but otherwise the following routine does not take any arguments.
        wfirst.addReciprocityFailure(final_image)
        logger.debug('Included reciprocity failure in {0}-band image'.format(
            filter_name))

        if diff_mode:
            # Isolate the changes due to reciprocity failure.
            diff = final_image - save_image

            out_filename = os.path.join(
                outpath, 'demo13_RecipFail_{0}.fits'.format(filter_name))
            final_image.write(out_filename)
            out_filename = os.path.join(
                outpath, 'demo13_diff_RecipFail_{0}.fits'.format(filter_name))
            diff.write(out_filename)

        # 2) Adding dark current to the image:
        # Even when the detector is unexposed to any radiation, the electron-hole pairs that
        # are generated within the depletion region due to finite temperature are swept by the
        # high electric field at the junction of the photodiode. This small reverse bias
        # leakage current is referred to as 'dark current'. It is specified by the average
        # number of electrons reaching the detectors per unit time and has an associated
        # Poisson noise since it is a random event.
        dark_current = wfirst.dark_current * wfirst.exptime
        dark_noise = galsim.DeviateNoise(
            galsim.PoissonDeviate(rng, dark_current))
        final_image.addNoise(dark_noise)

        # NOTE: Sky level and dark current might appear like a constant background that can be
        # simply subtracted. However, these contribute to the shot noise and matter for the
        # non-linear effects that follow. Hence, these must be included at this stage of the
        # image generation process. We subtract these backgrounds in the end.

        # 3) Applying a quadratic non-linearity:
        # In order to convert the units from electrons to ADU, we must use the gain factor. The gain
        # has a weak dependency on the charge present in each pixel. This dependency is accounted
        # for by changing the pixel values (in electrons) and applying a constant nominal gain
        # later, which is unity in our demo.

        # Save the image before applying the transformation to see the difference:
        if diff_mode:
            save_image = final_image.copy()

        # Apply the WFIRST nonlinearity routine, which knows all about the nonlinearity expected in
        # the WFIRST detectors.
        wfirst.applyNonlinearity(final_image)
        # Note that users who wish to apply some other nonlinearity function (perhaps for other NIR
        # detectors, or for CCDs) can use the more general nonlinearity routine, which uses the
        # following syntax:
        # final_image.applyNonlinearity(NLfunc=NLfunc)
        # with NLfunc being a callable function that specifies how the output image pixel values
        # should relate to the input ones.
        logger.debug(
            'Applied nonlinearity to {0}-band image'.format(filter_name))
        if diff_mode:
            diff = final_image - save_image

            out_filename = os.path.join(
                outpath, 'demo13_NL_{0}.fits'.format(filter_name))
            final_image.write(out_filename)
            out_filename = os.path.join(
                outpath, 'demo13_diff_NL_{0}.fits'.format(filter_name))
            diff.write(out_filename)

            # Save this image to do the diff after applying IPC.
            save_image = final_image.copy()

        # 4) Including Interpixel capacitance:
        # The voltage read at a given pixel location is influenced by the charges present in the
        # neighboring pixel locations due to capacitive coupling of sense nodes. This interpixel
        # capacitance effect is modeled as a linear effect that is described as a convolution of a
        # 3x3 kernel with the image.  The WFIRST IPC routine knows about the kernel already, so the
        # user does not have to supply it.
        wfirst.applyIPC(final_image)
        logger.debug('Applied interpixel capacitance to {0}-band image'.format(
            filter_name))

        if diff_mode:
            # Isolate the changes due to the interpixel capacitance effect.
            diff = final_image - save_image

            out_filename = os.path.join(
                outpath, 'demo13_IPC_{0}.fits'.format(filter_name))
            final_image.write(out_filename)
            out_filename = os.path.join(
                outpath, 'demo13_diff_IPC_{0}.fits'.format(filter_name))
            diff.write(out_filename)

        # 5) Adding read noise:
        # Read noise is the noise due to the on-chip amplifier that converts the charge into an
        # analog voltage.  We already applied the Poisson noise due to the sky level, so read noise
        # should just be added as Gaussian noise:
        read_noise = galsim.GaussianNoise(rng, sigma=wfirst.read_noise)
        final_image.addNoise(read_noise)
        logger.debug('Added readnoise to {0}-band image'.format(filter_name))

        # We divide by the gain to convert from e- to ADU. Currently, the gain value in the WFIRST
        # module is just set to 1, since we don't know what the exact gain will be, although it is
        # expected to be approximately 1. Eventually, this may change when the camera is assembled,
        # and there may be a different value for each SCA. For now, there is just a single number,
        # which is equal to 1.
        final_image /= wfirst.gain

        # Finally, the analog-to-digital converter reads in an integer value.
        final_image.quantize()
        # Note that the image type after this step is still a float.  If we want to actually
        # get integer values, we can do new_img = galsim.Image(final_image, dtype=int)

        # Since many people are used to viewing background-subtracted images, we provide a
        # version with the background subtracted (also rounding that to an int).
        sky_image.quantize()
        tot_sky_image = (sky_image + round(dark_current)) / wfirst.gain
        tot_sky_image.quantize()
        final_image -= tot_sky_image

        logger.debug(
            'Subtracted background for {0}-band image'.format(filter_name))
        # Write the final image to a file.
        out_filename = os.path.join(outpath,
                                    'demo13_{0}.fits'.format(filter_name))
        final_image.write(out_filename)

        logger.info('Completed {0}-band image.'.format(filter_name))

    logger.info(
        'You can display the output in ds9 with a command line that looks something like:'
    )
    logger.info('ds9 -zoom 0.5 -scale limits -500 1000 -rgb ' +
                '-red output/demo13_H158.fits ' +
                '-green output/demo13_J129.fits ' +
                '-blue output/demo13_Y106.fits')