コード例 #1
0
ファイル: fft_vs_geom_movie.py プロジェクト: LiuDezi/GalSim
def make_movie(args):
    rng = galsim.BaseDeviate(args.seed)
    u = galsim.UniformDeviate(rng)
    # Generate 1D Gaussian random fields for each aberration.
    t = np.arange(-args.n / 2, args.n / 2)
    corr = np.exp(-0.5 * t**2 / args.ell**2)
    pk = np.fft.fft(np.fft.fftshift(corr))
    ak = np.sqrt(2 * pk)
    phi = np.random.uniform(size=(args.n, args.jmax))
    zk = ak[:, None] * np.exp(2j * np.pi * phi)
    aberrations = args.n / 2 * np.fft.ifft(zk, axis=0).real
    measured_std = np.mean(np.std(aberrations, axis=0))
    aberrations *= args.sigma / measured_std
    aberrations -= np.mean(aberrations, axis=0)

    # For the atmosphere screens, we first estimates weights, so that the turbulence is dominated by
    # the lower layers consistent with direct measurements.  The specific values we use are from
    # SCIDAR measurements on Cerro Pachon as part of the 1998 Gemini site selection process
    # (Ellerbroek 2002, JOSA Vol 19 No 9).
    Ellerbroek_alts = [0.0, 2.58, 5.16, 7.73, 12.89, 15.46]  # km
    Ellerbroek_weights = [0.652, 0.172, 0.055, 0.025, 0.074, 0.022]
    Ellerbroek_interp = galsim.LookupTable(Ellerbroek_alts,
                                           Ellerbroek_weights,
                                           interpolant='linear')
    alts = np.max(Ellerbroek_alts) * np.arange(
        args.nlayers) / (args.nlayers - 1)
    weights = Ellerbroek_interp(alts)  # interpolate the weights
    weights /= sum(weights)  # and renormalize
    spd = []  # Wind speed in m/s
    dirn = []  # Wind direction in radians
    r0_500 = []  # Fried parameter in m at a wavelength of 500 nm.
    for i in range(args.nlayers):
        spd.append(
            u() *
            args.max_speed)  # Use a random speed between 0 and args.max_speed
        dirn.append(
            u() * 360 *
            galsim.degrees)  # And an isotropically distributed wind direction.
        r0_500.append(args.r0_500 * weights[i]**(-3. / 5))
        print(
            "Adding layer at altitude {:5.2f} km with velocity ({:5.2f}, {:5.2f}) m/s, "
            "and r0_500 {:5.3f} m.".format(alts[i], spd[i] * dirn[i].cos(),
                                           spd[i] * dirn[i].sin(), r0_500[i]))
    if args.nlayers > 0:
        atm = galsim.Atmosphere(r0_500=r0_500,
                                speed=spd,
                                direction=dirn,
                                altitude=alts,
                                rng=rng,
                                screen_size=args.screen_size,
                                screen_scale=args.screen_scale)
    else:
        atm = galsim.PhaseScreenList()

    # Setup Fourier and geometric apertures
    fft_aper = galsim.Aperture(args.diam,
                               args.lam,
                               obscuration=args.obscuration,
                               pad_factor=args.pad_factor,
                               oversampling=args.oversampling,
                               nstruts=args.nstruts,
                               strut_thick=args.strut_thick,
                               strut_angle=args.strut_angle * galsim.degrees)
    geom_aper = galsim.Aperture(args.diam,
                                args.lam,
                                obscuration=args.obscuration,
                                pad_factor=args.geom_oversampling,
                                oversampling=0.5,
                                nstruts=args.nstruts,
                                strut_thick=args.strut_thick,
                                strut_angle=args.strut_angle * galsim.degrees)

    scale = args.size / args.nx
    extent = np.r_[-1, 1, -1, 1] * args.size / 2

    fft_img_sum = galsim.ImageD(args.nx, args.nx, scale=scale)
    geom_img_sum = galsim.ImageD(args.nx, args.nx, scale=scale)

    # Code to setup the Matplotlib animation.
    metadata = dict(title="FFT vs geom movie", artist='Matplotlib')
    writer = anim.FFMpegWriter(fps=15, bitrate=10000, metadata=metadata)

    fig = Figure(facecolor='k', figsize=(16, 9))
    FigureCanvasAgg(fig)

    fft_ax = fig.add_axes([0.07, 0.08, 0.36, 0.9])
    fft_ax.set_xlabel("Arcsec")
    fft_ax.set_ylabel("Arcsec")
    fft_ax.set_title("Fourier Optics")
    fft_im = fft_ax.imshow(np.ones((args.nx, args.nx), dtype=float),
                           animated=True,
                           extent=extent,
                           vmin=0.0,
                           vmax=args.vmax)

    # Axis for the wavefront image on the right.
    geom_ax = fig.add_axes([0.50, 0.08, 0.36, 0.9])
    geom_ax.set_xlabel("Arcsec")
    geom_ax.set_ylabel("Arcsec")
    geom_ax.set_title("Geometric Optics")
    geom_im = geom_ax.imshow(np.ones((args.nx, args.nx), dtype=float),
                             animated=True,
                             extent=extent,
                             vmin=0.0,
                             vmax=args.vmax)

    # Color items white to show up on black background
    for ax in [fft_ax, geom_ax]:
        for _, spine in ax.spines.items():
            spine.set_color('w')
        ax.title.set_color('w')
        ax.xaxis.label.set_color('w')
        ax.yaxis.label.set_color('w')
        ax.tick_params(axis='both', colors='w')

    ztext = []
    for i in range(2, args.jmax + 1):
        x = 0.88
        y = 0.1 + (args.jmax - i) / args.jmax * 0.8
        ztext.append(fig.text(x, y, "Z{:d} = {:5.3f}".format(i, 0.0)))
        ztext[-1].set_color('w')

    M_fft = fft_ax.text(0.02, 0.955, '', transform=fft_ax.transAxes)
    M_fft.set_color('w')
    M_geom = geom_ax.text(0.02, 0.955, '', transform=geom_ax.transAxes)
    M_geom.set_color('w')

    etext_fft = fft_ax.text(0.02, 0.91, '', transform=fft_ax.transAxes)
    etext_fft.set_color('w')
    etext_geom = geom_ax.text(0.02, 0.91, '', transform=geom_ax.transAxes)
    etext_geom.set_color('w')

    fft_mom = np.empty((args.n, 8), dtype=float)
    geom_mom = np.empty((args.n, 8), dtype=float)

    fullpath = args.out + "movie.mp4"
    subdir, filename = os.path.split(fullpath)
    if subdir and not os.path.isdir(subdir):
        os.makedirs(subdir)

    with ProgressBar(args.n) as bar:
        with writer.saving(fig, fullpath, 100):
            t0 = 0.0
            for i, aberration in enumerate(aberrations):
                optics = galsim.OpticalScreen(args.diam,
                                              obscuration=args.obscuration,
                                              aberrations=[0] +
                                              aberration.tolist())
                psl = galsim.PhaseScreenList(atm._layers + [optics])
                fft_psf = psl.makePSF(lam=args.lam,
                                      aper=fft_aper,
                                      t0=t0,
                                      exptime=args.time_step)
                geom_psf = psl.makePSF(lam=args.lam,
                                       aper=geom_aper,
                                       t0=t0,
                                       exptime=args.time_step)

                fft_img0 = fft_psf.drawImage(nx=args.nx,
                                             ny=args.nx,
                                             scale=scale)

                geom_img0 = geom_psf.drawImage(nx=args.nx,
                                               ny=args.nx,
                                               scale=scale,
                                               method='phot',
                                               n_photons=100000)

                t0 += args.time_step

                if args.accumulate:
                    fft_img_sum += fft_img0
                    geom_img_sum += geom_img0
                    fft_img = fft_img_sum / (i + 1)
                    geom_img = geom_img_sum / (i + 1)
                else:
                    fft_img = fft_img0
                    geom_img = geom_img0

                fft_im.set_array(fft_img.array)
                geom_im.set_array(geom_img.array)

                for j, ab in enumerate(aberration):
                    if j == 0:
                        continue
                    ztext[j - 1].set_text("Z{:d} = {:5.3f}".format(j + 1, ab))

                # Calculate simple estimate of ellipticity
                mom_fft = galsim.utilities.unweighted_moments(
                    fft_img, origin=fft_img.trueCenter())
                mom_geom = galsim.utilities.unweighted_moments(
                    geom_img, origin=geom_img.trueCenter())
                e_fft = galsim.utilities.unweighted_shape(mom_fft)
                e_geom = galsim.utilities.unweighted_shape(mom_geom)

                Is = ("$M_x$={: 6.4f}, $M_y$={: 6.4f}, $M_{{xx}}$={:6.4f},"
                      " $M_{{yy}}$={:6.4f}, $M_{{xy}}$={: 6.4f}")
                M_fft.set_text(
                    Is.format(mom_fft['Mx'] * fft_img.scale,
                              mom_fft['My'] * fft_img.scale,
                              mom_fft['Mxx'] * fft_img.scale**2,
                              mom_fft['Myy'] * fft_img.scale**2,
                              mom_fft['Mxy'] * fft_img.scale**2))
                M_geom.set_text(
                    Is.format(mom_geom['Mx'] * geom_img.scale,
                              mom_geom['My'] * geom_img.scale,
                              mom_geom['Mxx'] * geom_img.scale**2,
                              mom_geom['Myy'] * geom_img.scale**2,
                              mom_geom['Mxy'] * geom_img.scale**2))
                etext_fft.set_text(
                    "$e_1$={: 6.4f}, $e_2$={: 6.4f}, $r^2$={:6.4f}".format(
                        e_fft['e1'], e_fft['e2'],
                        e_fft['rsqr'] * fft_img.scale**2))
                etext_geom.set_text(
                    "$e_1$={: 6.4f}, $e_2$={: 6.4f}, $r^2$={:6.4f}".format(
                        e_geom['e1'], e_geom['e2'],
                        e_geom['rsqr'] * geom_img.scale**2))

                fft_mom[i] = (mom_fft['Mx'] * fft_img.scale, mom_fft['My'] *
                              fft_img.scale, mom_fft['Mxx'] * fft_img.scale**2,
                              mom_fft['Myy'] * fft_img.scale**2,
                              mom_fft['Mxy'] * fft_img.scale**2, e_fft['e1'],
                              e_fft['e2'], e_fft['rsqr'] * fft_img.scale**2)

                geom_mom[i] = (mom_geom['Mx'] * geom_img.scale,
                               mom_geom['My'] * geom_img.scale,
                               mom_geom['Mxx'] * geom_img.scale**2,
                               mom_geom['Myy'] * geom_img.scale**2,
                               mom_geom['Mxy'] * geom_img.scale**2,
                               e_geom['e1'], e_geom['e2'],
                               e_geom['rsqr'] * geom_img.scale**2)

                writer.grab_frame(facecolor=fig.get_facecolor())

                bar.update()

    def symmetrize_axis(ax):
        xlim = ax.get_xlim()
        ylim = ax.get_ylim()
        lim = min(xlim[0], ylim[0]), max(xlim[1], ylim[1])
        ax.set_xlim(lim)
        ax.set_ylim(lim)
        ax.plot(lim, lim)

    # Centroid plot
    fig = Figure(figsize=(10, 6))
    FigureCanvasAgg(fig)
    axes = []
    axes.append(fig.add_subplot(1, 2, 1))
    axes.append(fig.add_subplot(1, 2, 2))
    axes[0].scatter(fft_mom[:, 0], geom_mom[:, 0])
    axes[1].scatter(fft_mom[:, 1], geom_mom[:, 1])
    axes[0].set_title("Mx")
    axes[1].set_title("My")
    for ax in axes:
        ax.set_xlabel("Fourier Optics")
        ax.set_ylabel("Geometric Optics")
        symmetrize_axis(ax)
    fig.tight_layout()
    fig.savefig(args.out + "centroid.png", dpi=300)

    # Second moment plot
    fig = Figure(figsize=(16, 6))
    FigureCanvasAgg(fig)
    axes = []
    axes.append(fig.add_subplot(1, 3, 1))
    axes.append(fig.add_subplot(1, 3, 2))
    axes.append(fig.add_subplot(1, 3, 3))
    axes[0].scatter(fft_mom[:, 2], geom_mom[:, 2])
    axes[1].scatter(fft_mom[:, 3], geom_mom[:, 3])
    axes[2].scatter(fft_mom[:, 4], geom_mom[:, 4])
    axes[0].set_title("Mxx")
    axes[1].set_title("Myy")
    axes[2].set_title("Mxy")
    for ax in axes:
        ax.set_xlabel("Fourier Optics")
        ax.set_ylabel("Geometric Optics")
        symmetrize_axis(ax)
    fig.tight_layout()
    fig.savefig(args.out + "2ndMoment.png", dpi=300)

    # Ellipticity plot
    fig = Figure(figsize=(16, 6))
    FigureCanvasAgg(fig)
    axes = []
    axes.append(fig.add_subplot(1, 3, 1))
    axes.append(fig.add_subplot(1, 3, 2))
    axes.append(fig.add_subplot(1, 3, 3))
    axes[0].scatter(fft_mom[:, 5], geom_mom[:, 5])
    axes[1].scatter(fft_mom[:, 6], geom_mom[:, 6])
    axes[2].scatter(fft_mom[:, 7], geom_mom[:, 7])
    axes[0].set_title("e1")
    axes[1].set_title("e2")
    axes[2].set_title("rsqr")
    for ax in axes:
        ax.set_xlabel("Fourier Optics")
        ax.set_ylabel("Geometric Optics")
        symmetrize_axis(ax)
    fig.tight_layout()
    fig.savefig(args.out + "ellipticity.png", dpi=300)
コード例 #2
0
def main(argv):
    """
    Make images similar to that done for the Great08 challenge:
      - Each fits file is 10 x 10 postage stamps.
        (The real Great08 images are 100x100, but in the interest of making the Demo
         script a bit quicker, we only build 100 stars and 100 galaxies.)
      - Each postage stamp is 40 x 40 pixels.
      - One image is all stars.
      - A second image is all galaxies.
      - Applied shear is the same for each galaxy.
      - Galaxies are oriented randomly, but in pairs to cancel shape noise.
      - Noise is Poisson using a nominal sky value of 1.e6.
      - Galaxies are Exponential profiles.
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo5")

    # Define some parameters we'll use below.
    # Normally these would be read in from some parameter file.

    nx_tiles = 10  #
    ny_tiles = 10  #
    stamp_xsize = 40  #
    stamp_ysize = 40  #

    random_seed = 6424512  #

    pixel_scale = 1.0  # arcsec / pixel
    sky_level = 1.e6  # ADU / arcsec^2

    # Make output directory if not already present.
    if not os.path.isdir('output'):
        os.mkdir('output')
    psf_file_name = os.path.join('output', 'g08_psf.fits')
    psf_beta = 3  #
    psf_fwhm = 2.85  # arcsec (=pixels)
    psf_trunc = 2. * psf_fwhm  # arcsec (=pixels)
    psf_e1 = -0.019  #
    psf_e2 = -0.007  #

    gal_file_name = os.path.join('output', 'g08_gal.fits')
    gal_signal_to_noise = 200  # Great08 "LowNoise" run
    gal_resolution = 0.98  # r_gal / r_psf (use r = half_light_radius)
    # Note: Great08 defined their resolution as r_obs / r_psf, using the convolved
    #       size rather than the pre-convolved size.
    #       Therefore, our r_gal/r_psf = 0.98 approximately corresponds to
    #       their r_obs / r_psf = 1.4.

    gal_ellip_rms = 0.2  # using "distortion" definition of ellipticity:
    #   e = (a^2-b^2)/(a^2+b^2), where a and b are the
    #   semi-major and semi-minor axes, respectively.
    gal_ellip_max = 0.6  # Maximum value of e, to avoid getting near e=1.
    gal_g1 = 0.013  # Applied shear, using normal shear definition:
    gal_g2 = -0.008  #   g = (a-b)/(a+b)

    shift_radius = 1.0  # arcsec (=pixels)

    logger.info('Starting demo script 5 using:')
    logger.info('    - image with %d x %d postage stamps', nx_tiles, ny_tiles)
    logger.info('    - postage stamps of size %d x %d pixels', stamp_xsize,
                stamp_ysize)
    logger.info('    - Moffat PSF (beta = %.1f, FWHM = %.2f, trunc = %.2f),',
                psf_beta, psf_fwhm, psf_trunc)
    logger.info('    - PSF ellip = (%.3f,%.3f)', psf_e1, psf_e2)
    logger.info('    - Exponential galaxies')
    logger.info('    - Resolution (r_gal / r_psf) = %.2f', gal_resolution)
    logger.info('    - Ellipticities have rms = %.1f, max = %.1f',
                gal_ellip_rms, gal_ellip_max)
    logger.info('    - Applied gravitational shear = (%.3f,%.3f)', gal_g1,
                gal_g2)
    logger.info('    - Poisson noise (sky level = %.1e).', sky_level)
    logger.info('    - Centroid shifts up to = %.2f pixels', shift_radius)

    # Define the PSF profile
    psf = galsim.Moffat(beta=psf_beta, fwhm=psf_fwhm, trunc=psf_trunc)

    # When something can be constructed from multiple sizes, e.g. Moffat, then
    # you can get any size out even if it wasn't the way the object was constructed.
    # In this case, we extract the half-light radius, even though we built it with fwhm.
    # We'll use this later to set the galaxy's half-light radius in terms of a resolution.
    psf_re = psf.half_light_radius

    psf = psf.shear(e1=psf_e1, e2=psf_e2)
    logger.debug('Made PSF profile')

    # Define the galaxy profile

    # First figure out the size we need from the resolution
    gal_re = psf_re * gal_resolution

    # Make the galaxy profile starting with flux = 1.
    gal = galsim.Exponential(flux=1., half_light_radius=gal_re)
    logger.debug('Made galaxy profile')

    # This profile is placed with different orientations and noise realizations
    # at each postage stamp in the gal image.
    gal_image = galsim.ImageF(stamp_xsize * nx_tiles - 1,
                              stamp_ysize * ny_tiles - 1,
                              scale=pixel_scale)
    psf_image = galsim.ImageF(stamp_xsize * nx_tiles - 1,
                              stamp_ysize * ny_tiles - 1,
                              scale=pixel_scale)

    shift_radius_sq = shift_radius**2

    first_in_pair = True  # Make pairs that are rotated by 90 degrees

    k = 0
    for iy in range(ny_tiles):
        for ix in range(nx_tiles):
            # The normal procedure for setting random numbers in GalSim is to start a new
            # random number generator for each object using sequential seed values.
            # This sounds weird at first (especially if you were indoctrinated by Numerical
            # Recipes), but for the boost random number generator we use, the "random"
            # number sequences produced from sequential initial seeds are highly uncorrelated.
            #
            # The reason for this procedure is that when we use multiple processes to build
            # our images, we want to make sure that the results are deterministic regardless
            # of the way the objects get parcelled out to the different processes.
            #
            # Of course, this script isn't using multiple processes, so it isn't required here.
            # However, we do it nonetheless in order to get the same results as the config
            # version of this demo script (demo5.yaml).
            ud = galsim.UniformDeviate(random_seed + k + 1)

            # Any kind of random number generator can take another RNG as its first
            # argument rather than a seed value.  This makes both objects use the same
            # underlying generator for their pseudo-random values.
            gd = galsim.GaussianDeviate(ud, sigma=gal_ellip_rms)

            # The -1's in the next line are to provide a border of
            # 1 pixel between postage stamps
            b = galsim.BoundsI(ix * stamp_xsize + 1,
                               (ix + 1) * stamp_xsize - 1,
                               iy * stamp_ysize + 1,
                               (iy + 1) * stamp_ysize - 1)
            sub_gal_image = gal_image[b]
            sub_psf_image = psf_image[b]

            # Great08 randomized the locations of the two galaxies in each pair,
            # but for simplicity, we just do them in sequential postage stamps.
            if first_in_pair:
                # Use a random orientation:
                beta = ud() * 2. * math.pi * galsim.radians

                # Determine the ellipticity to use for this galaxy.
                ellip = 1
                while (ellip > gal_ellip_max):
                    # Don't do `ellip = math.fabs(gd())`
                    # Python basically implements this as a macro, so gd() is called twice!
                    val = gd()
                    ellip = math.fabs(val)

                # Make a new copy of the galaxy with an applied e1/e2-type distortion
                # by specifying the ellipticity and a real-space position angle
                ellip_gal = gal.shear(e=ellip, beta=beta)

                first_in_pair = False
            else:
                # Use the previous ellip_gal profile and rotate it by 90 degrees
                ellip_gal = ellip_gal.rotate(90 * galsim.degrees)

                first_in_pair = True

            # Apply the gravitational reduced shear by specifying g1/g2
            this_gal = ellip_gal.shear(g1=gal_g1, g2=gal_g2)

            # Apply a random shift_radius:
            rsq = 2 * shift_radius_sq
            while (rsq > shift_radius_sq):
                dx = (2 * ud() - 1) * shift_radius
                dy = (2 * ud() - 1) * shift_radius
                rsq = dx**2 + dy**2

            this_gal = this_gal.shift(dx, dy)
            # Note that the shifted psf that we create here is purely for the purpose of being able
            # to draw a separate, shifted psf image.  We do not use it when convolving the galaxy
            # with the psf.
            this_psf = psf.shift(dx, dy)

            # Make the final image, convolving with the (unshifted) psf
            final_gal = galsim.Convolve([psf, this_gal])

            # Draw the image
            final_gal.drawImage(sub_gal_image)

            # Now add an appropriate amount of noise to get our desired S/N
            # There are lots of definitions of S/N, but here is the one used by Great08
            # We use a weighted integral of the flux:
            #   S = sum W(x,y) I(x,y) / sum W(x,y)
            #   N^2 = Var(S) = sum W(x,y)^2 Var(I(x,y)) / (sum W(x,y))^2
            # Now we assume that Var(I(x,y)) is constant so
            #   Var(I(x,y)) = noise_var
            # We also assume that we are using a matched filter for W, so W(x,y) = I(x,y).
            # Then a few things cancel and we find that
            # S/N = sqrt( sum I(x,y)^2 / noise_var )
            #
            # The above procedure is encapsulated in the function image.addNoiseSNR which
            # sets the flux appropriately given the variance of the noise model.
            # In our case, noise_var = sky_level_pixel
            sky_level_pixel = sky_level * pixel_scale**2
            noise = galsim.PoissonNoise(ud, sky_level=sky_level_pixel)
            sub_gal_image.addNoiseSNR(noise, gal_signal_to_noise)

            # Draw the PSF image
            # No noise on PSF images.  Just draw it as is.
            this_psf.drawImage(sub_psf_image)

            # For first instance, measure moments
            if ix == 0 and iy == 0:
                psf_shape = sub_psf_image.FindAdaptiveMom()
                temp_e = psf_shape.observed_shape.e
                if temp_e > 0.0:
                    g_to_e = psf_shape.observed_shape.g / temp_e
                else:
                    g_to_e = 0.0
                logger.info(
                    'Measured best-fit elliptical Gaussian for first PSF image: '
                )
                logger.info('  g1, g2, sigma = %7.4f, %7.4f, %7.4f (pixels)',
                            g_to_e * psf_shape.observed_shape.e1,
                            g_to_e * psf_shape.observed_shape.e2,
                            psf_shape.moments_sigma)

            x = b.center.x
            y = b.center.y
            logger.info(
                'Galaxy (%d,%d): center = (%.0f,%0.f)  (e,beta) = (%.4f,%.3f)',
                ix, iy, x, y, ellip, beta / galsim.radians)
            k = k + 1

    logger.info('Done making images of postage stamps')

    # Now write the images to disk.
    psf_image.write(psf_file_name)
    logger.info('Wrote PSF file %s', psf_file_name)

    gal_image.write(gal_file_name)
    logger.info('Wrote image to %r',
                gal_file_name)  # using %r adds quotes around filename for us
コード例 #3
0
ファイル: demo11.py プロジェクト: LiuDezi/GalSim
def main(argv):
    """
    Make images using constant PSF and variable shear:
      - The main image is 2048 x 2048 pixels.
      - Pixel scale is 0.2 arcsec/pixel, hence the image is about 0.11 degrees on a side.
      - Applied shear is from a cosmological power spectrum read in from file.
      - The PSF is a real one from SDSS, and corresponds to a convolution of atmospheric PSF,
        optical PSF, and pixel response, which has been sampled at pixel centers.  We used a PSF
        from SDSS in order to have a PSF profile that could correspond to what you see with a real
        telescope. However, in order that the galaxy resolution not be too poor, we tell GalSim that
        the pixel scale for that PSF image is 0.2" rather than 0.396".  We are simultaneously lying
        about the intrinsic size of the PSF and about the pixel scale when we do this.
      - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles
        (like in demo10) and parametric fits to those profiles.  We choose 30% of the galaxies
        to use the images, and the other 60% to use the parametric fits
      - The real galaxy images include some initial correlated noise from the original HST
        observation.  However, we whiten the noise of the final image so the final image has
        stationary Gaussian noise, rather than correlated noise.
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo11")

    # Define some parameters we'll use below.
    # Normally these would be read in from some parameter file.

    pixel_scale = 0.2  # arcsec/pixel
    image_size = 2048  # size of image in pixels
    image_size_arcsec = image_size * pixel_scale  # size of big image in each dimension (arcsec)
    noise_variance = 5.e4  # ADU^2  (Just use simple Gaussian noise here.)
    nobj = 288  # number of galaxies in entire field
    # (This corresponds to 8 galaxies / arcmin^2)
    grid_spacing = 90.0  # The spacing between the samples for the power spectrum
    # realization (arcsec)
    tel_diam = 4  # Let's figure out the flux for a 4 m class telescope
    exp_time = 300  # exposing for 300 seconds.
    center_ra = 19.3 * galsim.hours  # The RA, Dec of the center of the image on the sky
    center_dec = -33.1 * galsim.degrees

    # The catalog returns objects that are appropriate for HST in 1 second exposures.  So for our
    # telescope we scale up by the relative area and exposure time.  Note that what is important is
    # the *effective* area after taking into account obscuration.  For HST, the telescope diameter
    # is 2.4 but there is obscuration (a linear factor of 0.33).  Here, we assume that the telescope
    # we're simulating effectively has no obscuration factor.  We're also ignoring the pi/4 factor
    # since it appears in the numerator and denominator, so we use area = diam^2.
    hst_eff_area = 2.4**2 * (1. - 0.33**2)
    flux_scaling = (tel_diam**2 / hst_eff_area) * exp_time

    # random_seed is used for both the power spectrum realization and the random properties
    # of the galaxies.
    random_seed = 24783923

    file_name = os.path.join('output', 'tabulated_power_spectrum.fits.fz')

    logger.info('Starting demo script 11')

    # Read in galaxy catalog
    # The COSMOSCatalog uses the same input file as we have been using for RealGalaxyCatalogs
    # along with a second file called real_galaxy_catalog_23.5_examples_fits.fits, which stores
    # the information about the parameteric fits.  There is no need to specify the second file
    # name, since the name is derivable from the name of the main catalog.
    if True:
        # The catalog we distribute with the GalSim code only has 100 galaxies.
        # The galaxies will typically be reused several times here.
        cat_file_name = 'real_galaxy_catalog_23.5_example.fits'
        dir = 'data'
        cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir)
    else:
        # If you've run galsim_download_cosmos, you can leave out the cat_file_name and dir
        # to use the full COSMOS catalog with 56,000 galaxies in it.
        cosmos_cat = galsim.COSMOSCatalog()
    logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects)

    # Setup the PowerSpectrum object we'll be using:
    # To do this, we first have to read in the tabulated shear power spectrum, often denoted
    # C_ell(ell), where ell has units of inverse angle and C_ell has units of angle^2.  However,
    # GalSim works in the flat-sky approximation, so we use this notation interchangeably with
    # P(k).  GalSim does not calculate shear power spectra for users, who must be able to provide
    # their own (or use the examples in the repository).
    #
    # Here we use a tabulated power spectrum from iCosmo (http://icosmo.org), with the following
    # cosmological parameters and survey design:
    # H_0 = 70 km/s/Mpc
    # Omega_m = 0.25
    # Omega_Lambda = 0.75
    # w_0 = -1.0
    # w_a = 0.0
    # n_s = 0.96
    # sigma_8 = 0.8
    # Smith et al. prescription for the non-linear power spectrum.
    # Eisenstein & Hu transfer function with wiggles.
    # Default dN/dz with z_med = 1.0
    # The file has, as required, just two columns which are k and P(k).  However, iCosmo works in
    # terms of ell and C_ell; ell is inverse radians and C_ell in radians^2.  Since GalSim tends to
    # work in terms of arcsec, we have to tell it that the inputs are radians^-1 so it can convert
    # to store in terms of arcsec^-1.
    pk_file = os.path.join('data', 'cosmo-fid.zmed1.00.out')
    ps = galsim.PowerSpectrum(pk_file, units=galsim.radians)
    # The argument here is "e_power_function" which defines the E-mode power to use.
    logger.info('Set up power spectrum from tabulated P(k)')

    # Now let's read in the PSF.  It's a real SDSS PSF, which means pixel scale of 0.396".  However,
    # the typical seeing is 1.2" and we want to simulate better seeing, so we will just tell GalSim
    # that the pixel scale is 0.2".  We have to be careful with SDSS PSF images, as they have an
    # added 'soft bias' of 1000 which has been removed before creation of this file, so that the sky
    # level is properly zero.  Also, the file is bzipped, to demonstrate the ability of GalSim
    # handle this kind of compressed file (among others).  We read the image directly into an
    # InterpolatedImage GSObject, so we can manipulate it as needed (here, the only manipulation
    # needed is convolution).  The flux is 1 as needed for a PSF.
    psf_file = os.path.join('data', 'example_sdss_psf_sky0.fits.bz2')
    psf = galsim.InterpolatedImage(psf_file, scale=pixel_scale, flux=1.)
    logger.info('Read in PSF image from bzipped FITS file')

    # Setup the image:
    full_image = galsim.ImageF(image_size, image_size)

    # The default convention for indexing an image is to follow the FITS standard where the
    # lower-left pixel is called (1,1).  However, this can be counter-intuitive to people more
    # used to C or python indexing, where indices start at 0.  It is possible to change the
    # coordinates of the lower-left pixel with the methods `setOrigin`.  For this demo, we
    # switch to 0-based indexing, so the lower-left pixel will be called (0,0).
    full_image.setOrigin(0, 0)

    # As for demo10, we use random_seed for the random numbers required for the
    # whole image.  In this case, both the power spectrum realization and the noise on the
    # full image we apply later.
    rng = galsim.BaseDeviate(random_seed)

    # We want to make random positions within our image.  However, currently for shears from a power
    # spectrum we first have to get shears on a grid of positions, and then we can choose random
    # positions within that.  So, let's make the grid.  We're going to make it as large as the
    # image, with grid points spaced by 90 arcsec (hence interpolation only happens below 90"
    # scales, below the interesting scales on which we want the shear power spectrum to be
    # represented exactly).  The lensing engine wants positions in arcsec, so calculate that:
    ps.buildGrid(grid_spacing=grid_spacing,
                 ngrid=int(math.ceil(image_size_arcsec / grid_spacing)),
                 rng=rng)
    logger.info('Made gridded shears')

    # We keep track of how much noise is already in the image from the RealGalaxies.
    # The default initial value is all pixels = 0.
    noise_image = galsim.ImageF(image_size, image_size)
    noise_image.setOrigin(0, 0)

    # Make a slightly non-trivial WCS.  We'll use a slightly rotated coordinate system
    # and center it at the image center.
    theta = 0.17 * galsim.degrees
    # ( dudx  dudy ) = ( cos(theta)  -sin(theta) ) * pixel_scale
    # ( dvdx  dvdy )   ( sin(theta)   cos(theta) )
    dudx = math.cos(theta.rad()) * pixel_scale
    dudy = -math.sin(theta.rad()) * pixel_scale
    dvdx = math.sin(theta.rad()) * pixel_scale
    dvdy = math.cos(theta.rad()) * pixel_scale
    image_center = full_image.trueCenter()
    affine = galsim.AffineTransform(dudx,
                                    dudy,
                                    dvdx,
                                    dvdy,
                                    origin=full_image.trueCenter())

    # We can also put it on the celestial sphere to give it a bit more realism.
    # The TAN projection takes a (u,v) coordinate system on a tangent plane and projects
    # that plane onto the sky using a given point as the tangent point.  The tangent
    # point should be given as a CelestialCoord.
    sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec)

    # The third parameter, units, defaults to arcsec, but we make it explicit here.
    # It sets the angular units of the (u,v) intermediate coordinate system.
    wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec)
    full_image.wcs = wcs

    # Now we need to loop over our objects:
    for k in range(nobj):
        time1 = time.time()
        # The usual random number generator using a different seed for each galaxy.
        ud = galsim.UniformDeviate(random_seed + k + 1)

        # Choose a random RA, Dec around the sky_center.
        # Note that for this to come out close to a square shape, we need to account for the
        # cos(dec) part of the metric: ds^2 = dr^2 + r^2 d(dec)^2 + r^2 cos^2(dec) d(ra)^2
        # So need to calculate dec first.
        dec = center_dec + (ud() - 0.5) * image_size_arcsec * galsim.arcsec
        ra = center_ra + (ud() - 0.5) * image_size_arcsec / math.cos(
            dec.rad()) * galsim.arcsec
        world_pos = galsim.CelestialCoord(ra, dec)

        # We will need the image position as well, so use the wcs to get that
        image_pos = wcs.toImage(world_pos)

        # We also need this in the tangent plane, which we call "world coordinates" here,
        # since the PowerSpectrum class is really defined on that plane, not in (ra,dec).
        uv_pos = affine.toWorld(image_pos)

        # Get the reduced shears and magnification at this point
        g1, g2, mu = ps.getLensing(pos=uv_pos)

        # Now we will have the COSMOSCatalog make a galaxy profile for us.  It can make either
        # a RealGalaxy using the original HST image and PSF, or a parametric model based on
        # parametric fits to the light distribution of the HST observation.  The parametric
        # models are either a Sersic fit to the data or a bulge + disk fit according to which
        # one gave the better chisq value.  We will select a galaxy at random from the catalog.
        # One could easily do this by choosing an index = int(ud() * cosmos_cat.nobjects), but
        # we will instead allow the catalog to choose a random galaxy for us.  It will remove any
        # selection effects involved in postage stamp creation using weights that are stored in
        # the catalog.  (If for some reason you prefer not to do that, you can always choose a
        # purely random index yourself using int(ud() * cosmos_cat.nobjects).)  We employ this
        # random selection by simply failing to specify an index or identifier for a galaxy, in
        # which case it chooses a random one.

        # First determine whether we will make a real galaxy (`gal_type = 'real'`) or a parametric
        # galaxy (`gal_type = 'parametric'`).  The real galaxies take longer to render, so for this
        # script, we just use them 30% of the time and use parametric galaxies the other 70%.

        # We could just use `ud()<0.3` for this, but instead we introduce another Deviate type
        # available in GalSim that we haven't used yet: BinomialDeviate.
        # It takes an N and p value and returns integers according to a binomial distribution.
        # i.e. How many heads you get after N flips if each flip has a chance, p, of being heads.
        binom = galsim.BinomialDeviate(ud, N=1, p=0.3)
        real = binom()

        if real:
            # For real galaxies, we will want to whiten the noise in the image (below).
            # When whitening the image, we need to make sure the original correlated noise is
            # present throughout the whole image, otherwise the whitening will do the wrong thing
            # to the parts of the image that don't include the original image.  The RealGalaxy
            # stores the correct noise profile to use as the gal.noise attribute.  This noise
            # profile is automatically updated as we shear, dilate, convolve, etc.  But we need to
            # tell it how large to pad with this noise by hand.  This is a bit complicated for the
            # code to figure out on its own, so we have to supply the size for noise padding
            # with the noise_pad_size parameter.

            # The large galaxies will render fine without any noise padding, but the postage stamp
            # for the smaller galaxies will be sized appropriately for the PSF, which may make the
            # stamp larger than the original galaxy image.  The psf image is 40 x 40, although
            # the bright part is much more concentrated than that.  If we pad out the galaxy image
            # to at least 40 x sqrt(2), we should be safe even if the galaxy image is rotated
            # with respect to the psf image.
            #     noise_pad_size = 40 * sqrt(2) * 0.2 arcsec/pixel = 11.3 arcsec
            gal = cosmos_cat.makeGalaxy(gal_type='real',
                                        rng=ud,
                                        noise_pad_size=11.3)
        else:
            gal = cosmos_cat.makeGalaxy(gal_type='parametric', rng=ud)

        # Apply a random rotation
        theta = ud() * 2.0 * numpy.pi * galsim.radians
        gal = gal.rotate(theta)

        # Rescale the flux to match our telescope configuration.
        # This automatically scales up the noise variance by flux_scaling**2.
        gal *= flux_scaling

        # Apply the cosmological (reduced) shear and magnification at this position using a single
        # GSObject method.
        gal = gal.lens(g1, g2, mu)

        # Convolve with the PSF.
        final = galsim.Convolve(psf, gal)

        # Account for the fractional part of the position
        # cf. demo9.py for an explanation of this nominal position stuff.
        x_nominal = image_pos.x + 0.5
        y_nominal = image_pos.y + 0.5
        ix_nominal = int(math.floor(x_nominal + 0.5))
        iy_nominal = int(math.floor(y_nominal + 0.5))
        dx = x_nominal - ix_nominal
        dy = y_nominal - iy_nominal
        offset = galsim.PositionD(dx, dy)

        # We use method='no_pixel' here because the SDSS PSF image that we are using includes the
        # pixel response already.
        stamp = final.drawImage(wcs=wcs.local(image_pos),
                                offset=offset,
                                method='no_pixel')

        # Recenter the stamp at the desired position:
        stamp.setCenter(ix_nominal, iy_nominal)

        # Find the overlapping bounds:
        bounds = stamp.bounds & full_image.bounds

        # Now, if we are using a real galaxy, we want to ether whiten or at least symmetrize the
        # noise on the postage stamp to avoid having to deal with correlated noise in any kind of
        # image processing you would want to do on the final image.  (Like measure galaxy shapes.)

        # Galsim automatically propagates the noise correctly from the initial RealGalaxy object
        # through the applied shear, distortion, rotation, and convolution into the final object's
        # noise attribute.  To make the noise fully white, use the image.whitenNoise() method.
        # The returned value is the variance of the Gaussian noise that is present after the
        # whitening process.

        # However, this is often overkill for many applications.  If it is acceptable to merely end
        # up with noise with some degree of symmetry (say 4-fold or 8-fold symmetry), then you can
        # instead have GalSim just add enough noise to make the resulting noise have this kind of
        # symmetry.  Usually this requires adding significantly less additional noise, which means
        # you can have the resulting total variance be somewhat smaller.  The returned variance
        # corresponds to the zero-lag value of the noise correlation function, which will still have
        # off-diagonal elements.  We can do this step using the image.symmetrizeNoise() method.
        if real:
            if True:
                # We use the symmetrizing option here.
                new_variance = stamp.symmetrizeNoise(final.noise, 8)
            else:
                # Here is how you would do it if you wanted to fully whiten the image.
                new_variance = stamp.whitenNoise(final.noise)

            # We need to keep track of how much variance we have currently in the image, so when
            # we add more noise, we can omit what is already there.
            noise_image[bounds] += new_variance

        # Finally, add the stamp to the full image.
        full_image[bounds] += stamp[bounds]

        time2 = time.time()
        tot_time = time2 - time1
        logger.info('Galaxy %d: position relative to center = %s, t=%f s', k,
                    str(uv_pos), tot_time)

    # We already have some noise in the image, but it isn't uniform.  So the first thing to do is
    # to make the Gaussian noise uniform across the whole image.  We have a special noise class
    # that can do this.  VariableGaussianNoise takes an image of variance values and applies
    # Gaussian noise with the corresponding variance to each pixel.
    # So all we need to do is build an image with how much noise to add to each pixel to get us
    # up to the maximum value that we already have in the image.
    max_current_variance = numpy.max(noise_image.array)
    noise_image = max_current_variance - noise_image
    vn = galsim.VariableGaussianNoise(rng, noise_image)
    full_image.addNoise(vn)

    # Now max_current_variance is the noise level across the full image.  We don't want to add that
    # twice, so subtract off this much from the intended noise that we want to end up in the image.
    noise_variance -= max_current_variance

    # Now add Gaussian noise with this variance to the final image.  We have to do this step
    # at the end, rather than adding to individual postage stamps, in order to get the noise
    # level right in the overlap regions between postage stamps.
    noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance))
    full_image.addNoise(noise)
    logger.info('Added noise to final large image')

    # Now write the image to disk.  It is automatically compressed with Rice compression,
    # since the filename we provide ends in .fz.
    full_image.write(file_name)
    logger.info('Wrote image to %r', file_name)

    # Compute some sky positions of some of the pixels to compare with the values of RA, Dec
    # that ds9 reports.  ds9 always uses (1,1) for the lower left pixel, so the pixel coordinates
    # of these pixels are different by 1, but you can check that the RA and Dec values are
    # the same as what GalSim calculates.
    ra_str = center_ra.hms()
    dec_str = center_dec.dms()
    logger.info('Center of image    is at RA %sh %sm %ss, DEC %sd %sm %ss',
                ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3],
                dec_str[3:5], dec_str[5:])
    for (x, y) in [(0, 0), (0, image_size - 1), (image_size - 1, 0),
                   (image_size - 1, image_size - 1)]:
        world_pos = wcs.toWorld(galsim.PositionD(x, y))
        ra_str = world_pos.ra.hms()
        dec_str = world_pos.dec.dms()
        logger.info('Pixel (%4d, %4d) is at RA %sh %sm %ss, DEC %sd %sm %ss',
                    x, y, ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3],
                    dec_str[3:5], dec_str[5:])
    logger.info(
        'ds9 reports these pixels as (1,1), (1,2048), etc. with the same RA, Dec.'
    )
コード例 #4
0
def test_simplegeometry():
    """Test charge deflection model for image with charges in only the central pixel(s).
    """
    size = 50
    center = 25
    shiftcoeff = 1.e-7
    # shift coefficients in DECam are of that order
    # note that this is fully degenerate with the gain, i.e. the flux level in the simulations

    level = 1.e5

    # create otherwise empty image with central pixel at one
    i0 = galsim.Image(size, size, dtype=np.float64, init_value=0)
    i0.setValue(center, center, level)

    # create otherwise empty image with three central pixels at one
    # central row
    ir = galsim.Image(size, size, dtype=np.float64, init_value=0)
    ir.setValue(center - 1, center, level)
    ir.setValue(center, center, level)
    ir.setValue(center + 1, center, level)
    # central column
    it = galsim.Image(size, size, dtype=np.float64, init_value=0)
    it.setValue(center, center - 1, level)
    it.setValue(center, center, level)
    it.setValue(center, center + 1, level)

    # set up models, images
    cdr0 = galsim.cdmodel.PowerLawCD(2, shiftcoeff, 0, 0, 0, 0, 0, 0)
    i0cdr0 = cdr0.applyForward(i0)

    cdt0 = galsim.cdmodel.PowerLawCD(2, 0, shiftcoeff, 0, 0, 0, 0, 0)
    i0cdt0 = cdt0.applyForward(i0)
    cdrx = galsim.cdmodel.PowerLawCD(2, 0, 0, shiftcoeff, 0, 0, 0, 0)
    cdtx = galsim.cdmodel.PowerLawCD(2, 0, 0, 0, shiftcoeff, 0, 0, 0)

    # these should do something
    ircdtx = cdtx.applyForward(ir)
    itcdrx = cdrx.applyForward(it)

    # these shouldn't do anything
    itcdtx = cdtx.applyForward(it)
    ircdrx = cdrx.applyForward(ir)

    # R0, T0
    np.testing.assert_almost_equal(i0cdr0(center, center),
                                   level * (1. - level * shiftcoeff),
                                   13 - int(np.log10(level)),
                                   "Central pixel wrong in test_onepixel R0")
    np.testing.assert_almost_equal(i0cdt0(center, center),
                                   level * (1. - level * shiftcoeff),
                                   13 - int(np.log10(level)),
                                   "Central pixel wrong in test_onepixel T0")

    np.testing.assert_almost_equal(
        i0cdr0(center + 1, center), level * (level * shiftcoeff / 2.),
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel R0")
    np.testing.assert_almost_equal(
        i0cdr0(center - 1, center), level * (level * shiftcoeff / 2.),
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel R0")

    np.testing.assert_almost_equal(
        i0cdt0(center, center + 1), level * (level * shiftcoeff / 2.),
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel T0")
    np.testing.assert_almost_equal(
        i0cdt0(center, center - 1), level * (level * shiftcoeff / 2.),
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel T0")

    # Tx
    np.testing.assert_almost_equal(ircdtx(center, center),
                                   level * (1. - 2. * level * shiftcoeff),
                                   13 - int(np.log10(level)),
                                   "Central pixel wrong in test_onepixel TX")
    np.testing.assert_almost_equal(
        ircdtx(center - 1, center), level * (1. - level * shiftcoeff),
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel TX")
    np.testing.assert_almost_equal(
        ircdtx(center + 1, center), level * (1. - level * shiftcoeff),
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel TX")

    np.testing.assert_almost_equal(ircdtx(center, center + 1),
                                   level * level * shiftcoeff,
                                   13 - int(np.log10(level)),
                                   "Central pixel wrong in test_onepixel TX")
    np.testing.assert_almost_equal(
        ircdtx(center - 1, center + 1), level * level * shiftcoeff / 2.,
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel TX")
    np.testing.assert_almost_equal(
        ircdtx(center + 1, center + 1), level * level * shiftcoeff / 2.,
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel TX")

    np.testing.assert_almost_equal(ircdtx(center, center - 1),
                                   level * level * shiftcoeff,
                                   13 - int(np.log10(level)),
                                   "Central pixel wrong in test_onepixel TX")
    np.testing.assert_almost_equal(
        ircdtx(center - 1, center - 1), level * level * shiftcoeff / 2.,
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel TX")
    np.testing.assert_almost_equal(
        ircdtx(center + 1, center - 1), level * level * shiftcoeff / 2.,
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel TX")

    # Rx
    np.testing.assert_almost_equal(itcdrx(center, center),
                                   level * (1. - 2. * level * shiftcoeff),
                                   13 - int(np.log10(level)),
                                   "Central pixel wrong in test_onepixel RX")
    np.testing.assert_almost_equal(
        itcdrx(center, center - 1), level * (1. - level * shiftcoeff),
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel RX")
    np.testing.assert_almost_equal(
        itcdrx(center, center + 1), level * (1. - level * shiftcoeff),
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel RX")

    np.testing.assert_almost_equal(itcdrx(center + 1,
                                          center), level * level * shiftcoeff,
                                   13 - int(np.log10(level)),
                                   "Central pixel wrong in test_onepixel RX")
    np.testing.assert_almost_equal(
        itcdrx(center + 1, center - 1), level * level * shiftcoeff / 2.,
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel RX")
    np.testing.assert_almost_equal(
        itcdrx(center + 1, center + 1), level * level * shiftcoeff / 2.,
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel RX")

    np.testing.assert_almost_equal(itcdrx(center - 1,
                                          center), level * level * shiftcoeff,
                                   13 - int(np.log10(level)),
                                   "Central pixel wrong in test_onepixel RX")
    np.testing.assert_almost_equal(
        itcdrx(center - 1, center - 1), level * level * shiftcoeff / 2.,
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel RX")
    np.testing.assert_almost_equal(
        itcdrx(center - 1, center + 1), level * level * shiftcoeff / 2.,
        13 - int(np.log10(level)),
        "Off-center pixel wrong in test_onepixel RX")

    # a model that should not change anything here
    u = galsim.UniformDeviate(rseed)

    cdnull = galsim.cdmodel.PowerLawCD(2, 0, 0, shiftcoeff * u(),
                                       shiftcoeff * u(), shiftcoeff * u(),
                                       shiftcoeff * u(), 0)
    i0cdnull = cdnull.applyForward(i0)

    # setting all pixels to 0 that we expect to be not 0...
    i0.setValue(center, center, 0)
    i0cdnull.setValue(center, center, 0)
    i0cdr0.setValue(center, center, 0)
    i0cdr0.setValue(center + 1, center, 0)
    i0cdr0.setValue(center - 1, center, 0)
    i0cdt0.setValue(center, center, 0)
    i0cdt0.setValue(center, center + 1, 0)
    i0cdt0.setValue(center, center - 1, 0)

    ircdtx.subImage(
        galsim.BoundsI(center - 1, center + 1, center - 1, center + 1)).fill(0)
    itcdrx.subImage(
        galsim.BoundsI(center - 1, center + 1, center - 1, center + 1)).fill(0)

    ircdrx.subImage(galsim.BoundsI(center - 1, center + 1, center,
                                   center)).fill(0)
    itcdtx.subImage(galsim.BoundsI(center, center, center - 1,
                                   center + 1)).fill(0)

    # ... and comparing
    np.testing.assert_array_almost_equal(
        i0cdnull.array, i0.array, 10,
        "i0cdnull array is not 0 where it should be")
    np.testing.assert_array_almost_equal(
        i0cdr0.array, i0.array, 10, "i0cdr0 array is not 0 where it should be")
    np.testing.assert_array_almost_equal(
        i0cdt0.array, i0.array, 10, "i0cdr0 array is not 0 where it should be")
    np.testing.assert_array_almost_equal(
        ircdtx.array, i0.array, 10, "ircdtx array is not 0 where it should be")
    np.testing.assert_array_almost_equal(
        ircdrx.array, i0.array, 10, "ircdrx array is not 0 where it should be")
    np.testing.assert_array_almost_equal(
        itcdtx.array, i0.array, 10, "itcdtx array is not 0 where it should be")
    np.testing.assert_array_almost_equal(
        itcdrx.array, i0.array, 10, "itcdrx array is not 0 where it should be")
コード例 #5
0
def main(argv):
    # Where to find and output data.
    path, filename = os.path.split(__file__)
    datapath = os.path.abspath(os.path.join(path, "data/"))
    outpath = os.path.abspath(os.path.join(path, "output/"))

    # In non-script code, use getLogger(__name__) at module scope instead.
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo13")

    # Initialize (pseudo-)random number generator.
    random_seed = 123456
    rng = galsim.BaseDeviate(random_seed)

    # Generate a Poisson noise model.
    poisson_noise = galsim.PoissonNoise(rng)
    logger.info('Poisson noise model created.')

    # Read in the WFIRST filters, setting an AB zeropoint appropriate for this telescope given its
    # diameter and (since we didn't use any keyword arguments to modify this) using the typical
    # exposure time for WFIRST images.
    filters = wfirst.getBandpasses(AB_zeropoint=True)
    logger.debug('Read in WFIRST imaging filters.')

    logger.info('Reading from a parametric COSMOS catalog.')
    # Read in a galaxy catalog - just a random subsample of 100 galaxies for F814W<23.5 from COSMOS.
    cat_file_name = 'real_galaxy_catalog_example_fits.fits'
    dir = 'data'
    # Use the routine that can take COSMOS real or parametric galaxy information, and tell it we
    # want parametric galaxies that represent an I<23.5 sample.
    cat = galsim.COSMOSCatalog(cat_file_name, dir=dir, use_real=False)
    logger.info('Read in %d galaxies from catalog' % cat.nobjects)
    # Just use a few galaxies, to save time.  Note that we are going to put 4000 galaxy images into
    # our big image, so if we have n_use=10, each galaxy will appear 400 times.  Users who want a
    # more interesting image with greater variation in the galaxy population can change `n_use` to
    # something larger (but it should be <=100, the number of galaxies in this small example
    # catalog).  With 4000 galaxies in a 4k x 4k image with the WFIRST pixel scale, the effective
    # galaxy number density is 74/arcmin^2.  This is not the number density that is expected for a
    # sample that is so bright (I<23.5) but it makes the image more visually interesting.  One could
    # think of it as what you'd get if you added up several images at once, making the images for a
    # sample that is much deeper have the same S/N as that for an I<23.5 sample in a single image.
    n_use = 10
    n_tot = 4000

    # Here we carry out the initial steps that are necessary to get a fully chromatic PSF.  We use
    # the getPSF() routine in the WFIRST module, which knows all about the telescope parameters
    # (diameter, bandpasses, obscuration, etc.).  Note that we arbitrarily choose a single SCA
    # (Sensor Chip Assembly) rather than all of them, for faster calculations, and use a simple
    # representation of the struts for faster calculations.  To do a more exact calculation of the
    # chromaticity and pupil plane configuration, remove the `approximate_struts` and the `n_waves`
    # keyword from the call to getPSF():
    use_SCA = 7  # This could be any number from 1...18
    logger.info('Doing expensive pre-computation of PSF.')
    t1 = time.time()
    logger.setLevel(logging.DEBUG)
    PSFs = wfirst.getPSF(SCAs=use_SCA,
                         approximate_struts=True,
                         n_waves=10,
                         logger=logger)
    logger.setLevel(logging.INFO)
    PSF = PSFs[use_SCA]
    t2 = time.time()
    logger.info('Done PSF precomputation in %.1f seconds!' % (t2 - t1))

    # Define the size of the postage stamp that we use for each individual galaxy within the larger
    # image, and for the PSF images.
    stamp_size = 256

    # We choose a particular (RA, dec) location on the sky for our observation.
    ra_targ = 90. * galsim.degrees
    dec_targ = -10. * galsim.degrees
    targ_pos = galsim.CelestialCoord(ra=ra_targ, dec=dec_targ)
    # Get the WCS for an observation at this position.  We are not supplying a date, so the routine
    # will assume it's the vernal equinox.  We are also not supplying a position angle for the
    # observatory, which means that it will just find the optimal one (the one that has the solar
    # panels pointed most directly towards the Sun given this targ_pos and date).  The output of
    # this routine is a dict of WCS objects, one for each SCA.  We then take the WCS for the SCA
    # that we are using.
    wcs_list = wfirst.getWCS(world_pos=targ_pos, SCAs=use_SCA)
    wcs = wcs_list[use_SCA]
    # We need to find the center position for this SCA.  We'll tell it to give us a CelestialCoord
    # corresponding to (X, Y) = (wfirst.n_pix/2, wfirst.n_pix/2).
    SCA_cent_pos = wcs.toWorld(
        galsim.PositionD(wfirst.n_pix / 2, wfirst.n_pix / 2))

    # We randomly distribute points in (X, Y) on the CCD.
    # If we had a real galaxy catalog with positions in terms of RA, dec we could use wcs.toImage()
    # to find where those objects should be in terms of (X, Y).
    pos_rng = galsim.UniformDeviate(random_seed)
    # Make a list of (X, Y, F814W magnitude, n_rot, flip) values.
    # (X, Y) give the position of the galaxy centroid (or the center of the postage stamp into which
    # we draw the galaxy) in the big image.
    # F814W magnitudes are randomly drawn from the catalog, and are used to create a more realistic
    # flux distribution for the galaxies instead of just having the 10 flux values for the galaxies
    # we have chosen to draw.
    # n_rot says how many 90 degree rotations to include for a given realization of each galaxy, so
    # it doesn't appear completely identical each time we put it in the image.
    # flip is a random number that will determine whether we include an x-y flip for this appearance
    # of the galaxy or not.
    x_stamp = []
    y_stamp = []
    mag_stamp = []
    n_rot_stamp = []
    flip_stamp = []
    for i_gal in xrange(n_tot):
        x_stamp.append(pos_rng() * wfirst.n_pix)
        y_stamp.append(pos_rng() * wfirst.n_pix)
        # Note that we could use wcs.toWorld() to get the (RA, dec) for these (x, y) positions.  Or,
        # if we had started with (RA, dec) positions, we could have used wcs.toImage() to get the
        # CCD coordinates for those positions.
        mag_stamp.append(cat.param_cat.mag_auto[pos_rng() * cat.nobjects])
        n_rot_stamp.append(int(4 * pos_rng()))
        flip_stamp.append(pos_rng())

    # Make the 2-component parametric GSObjects for each object, including chromaticity (roughly
    # appropriate SEDs per galaxy component, at the appropriate galaxy redshift).  Note that since
    # the PSF is position-independent within the SCA, we can simply do the convolution with that PSF
    # now instead of using a different one for each position.  We also have to include the correct
    # flux scaling: The catalog returns objects that would be observed by HST in 1 second
    # exposures. So for our telescope we scale up by the relative area and exposure time.  Note that
    # what is important is the *effective* area after taking into account obscuration.
    logger.info(
        'Processing the objects in the catalog to get GSObject representations'
    )
    # Choose a random set of unique indices in the catalog (will be the same each time script is
    # run, due to use of the same random seed):
    rand_indices = []
    while len(rand_indices) < n_use:
        tmp_ind = int(pos_rng() * cat.nobjects)
        if tmp_ind not in rand_indices:
            rand_indices.append(tmp_ind)
    obj_list = cat.makeGalaxy(rand_indices,
                              chromatic=True,
                              gal_type='parametric',
                              deep=True)
    gal_list = []
    hst_eff_area = 2.4**2 * (1. - 0.33**2)
    wfirst_eff_area = galsim.wfirst.diameter**2 * (
        1. - galsim.wfirst.obscuration**2)
    flux_scaling = (wfirst_eff_area / hst_eff_area) * wfirst.exptime
    mag_list = []
    for ind in range(len(obj_list)):
        # First, let's check what magnitude this object has in F814W.  We want to do this because
        # (to inject some variety into our images) we are going to rescale the fluxes in all bands
        # for different instances of this galaxy in the final image in order to get a reasonable S/N
        # distribution.  So we need to save the original magnitude in F814W, to compare with a
        # randomly drawn one from the catalog.  This is not something that most users would need to
        # do.
        mag_list.append(
            cat.param_cat.mag_auto[cat.orig_index[rand_indices[ind]]])

        # Convolve the chromatic galaxy and the chromatic PSF, and rescale flux.
        final = galsim.Convolve(flux_scaling * obj_list[ind], PSF)
        logger.debug('Pre-processing for galaxy %d completed.' % ind)
        gal_list.append(final)

    # Calculate the sky level for each filter, and draw the PSF and the galaxies through the
    # filters.
    for filter_name, filter_ in filters.iteritems():
        logger.info('Beginning work for {0}.'.format(filter_name))

        # Drawing PSF.  Note that the PSF object intrinsically has a flat SED, so if we convolve it
        # with a galaxy, it will properly take on the SED of the galaxy.  For the sake of this demo,
        # we will simply convolve with a 'star' that has a flat SED and unit flux in this band, so
        # that the PSF image will be normalized to unit flux. This does mean that the PSF image
        # being drawn here is not quite the right PSF for the galaxy.  Indeed, the PSF for the
        # galaxy effectively varies within it, since it differs for the bulge and the disk.  To make
        # a real image, one would have to choose SEDs for stars and convolve with a star that has a
        # reasonable SED, but we just draw with a flat SED for this demo.
        out_filename = os.path.join(outpath,
                                    'demo13_PSF_{0}.fits'.format(filter_name))
        # Approximate a point source.
        point = galsim.Gaussian(sigma=1.e-8, flux=1.)
        # Use a flat SED here, but could use something else.  A stellar SED for instance.
        # Or a typical galaxy SED.  Depending on your purpose for drawing the PSF.
        star_sed = galsim.SED(lambda x: 1).withFlux(
            1., filter_)  # Give it unit flux in this filter.
        star = galsim.Convolve(point * star_sed, PSF)
        img_psf = galsim.ImageF(64, 64)
        star.drawImage(bandpass=filter_,
                       image=img_psf,
                       scale=wfirst.pixel_scale)
        img_psf.write(out_filename)
        logger.debug(
            'Created PSF with flat SED for {0}-band'.format(filter_name))

        # Set up the full image that will contain all the individual galaxy images, with information
        # about WCS:
        final_image = galsim.ImageF(wfirst.n_pix, wfirst.n_pix, wcs=wcs)

        # Draw the galaxies into the image.
        for i_gal in xrange(n_use):
            logger.info(
                'Drawing image for the object at row %d in the input catalog' %
                i_gal)

            # We want to only draw the galaxy once (for speed), not over and over with different
            # sub-pixel offsets.  For this reason we ignore the sub-pixel offset entirely.  Note
            # that we are setting the postage stamp to have the average WFIRST pixel scale.  This is
            # simply an approximation for the purpose of speed; really, one should draw directly
            # into final_image, which has the appropriate WCS for WFIRST.  In that case, the image
            # of the galaxy might look different in different parts of the detector due to the WCS
            # (including distortion), and we would have to re-draw each time.  To keep the demo
            # relatively quick, we are just using the approximate average pixel scale and drawing
            # once.
            stamp = galsim.Image(stamp_size,
                                 stamp_size,
                                 scale=wfirst.pixel_scale)
            gal_list[i_gal].drawImage(filter_, image=stamp)

            # Have to find where to place it:
            for i_gal_use in range(i_gal * n_tot / n_use,
                                   (i_gal + 1) * n_tot / n_use):
                # Account for the fractional part of the position:
                ix = int(math.floor(x_stamp[i_gal_use] + 0.5))
                iy = int(math.floor(y_stamp[i_gal_use] + 0.5))
                # We don't actually use this offset.
                offset = galsim.PositionD(x_stamp[i_gal] - ix,
                                          y_stamp[i_gal] - iy)

                # Create a nominal bound for the postage stamp given the integer part of the
                # position.
                stamp_bounds = galsim.BoundsI(ix - 0.5 * stamp_size,
                                              ix + 0.5 * stamp_size - 1,
                                              iy - 0.5 * stamp_size,
                                              iy + 0.5 * stamp_size - 1)

                # Find the overlapping bounds between the large image and the individual postage
                # stamp.
                bounds = stamp_bounds & final_image.bounds

                # Just to inject a bit of variety into the image, so it isn't *quite* as obvious
                # that we've repeated the same 10 objects over and over, we randomly rotate the
                # postage stamp by some factor of 90 degrees and possibly include a random flip.
                if flip_stamp[i_gal_use] > 0.5:
                    new_arr = numpy.ascontiguousarray(
                        numpy.rot90(stamp.array, n_rot_stamp[i_gal_use]))
                else:
                    new_arr = numpy.ascontiguousarray(
                        numpy.fliplr(
                            numpy.rot90(stamp.array, n_rot_stamp[i_gal_use])))
                stamp_rot = galsim.Image(new_arr, scale=stamp.scale)
                stamp_rot.setOrigin(
                    galsim.PositionI(stamp_bounds.xmin, stamp_bounds.ymin))

                # Rescale the flux to match that of a randomly chosen galaxy in the galaxy, but
                # keeping the same SED as for this particular galaxy.  This gives a bit more
                # variety in the flux values and SNR of the galaxies in the image without having
                # to render images of many more objects.
                flux_scaling = 10**(-0.4 *
                                    (mag_stamp[i_gal_use] - mag_list[i_gal]))

                # Copy the image into the right place in the big image.
                final_image[bounds] += flux_scaling * stamp_rot[bounds]

        # Now we're done with the per-galaxy drawing for this image.  The rest will be done for the
        # entire image at once.
        logger.info(
            'Postage stamps of all galaxies drawn on a single big image for this filter.'
        )
        logger.info('Adding the sky level, noise and detector non-idealities.')

        # First we get the amount of zodaical light for a position corresponding to the center of
        # this SCA.  The results are provided in units of e-/arcsec^2, using the default WFIRST
        # exposure time since we did not explicitly specify one.  Then we multiply this by a factor
        # >1 to account for the amount of stray light that is expected.  If we do not provide a date
        # for the observation, then it will assume that it's the vernal equinox (sun at (0,0) in
        # ecliptic coordinates) in 2025.
        sky_level = wfirst.getSkyLevel(filters[filter_name],
                                       world_pos=SCA_cent_pos)
        sky_level *= (1.0 + wfirst.stray_light_fraction)
        # Make a image of the sky that takes into account the spatially variable pixel scale.  Note
        # that makeSkyImage() takes a bit of time.  If you do not care about the variable pixel
        # scale, you could simply compute an approximate sky level in e-/pix by multiplying
        # sky_level by wfirst.pixel_scale**2, and add that to final_image.
        sky_image = final_image.copy()
        wcs.makeSkyImage(sky_image, sky_level)
        # This image is in units of e-/pix.  Finally we add the expected thermal backgrounds in this
        # band.  These are provided in e-/pix/s, so we have to multiply by the exposure time.
        sky_image += wfirst.thermal_backgrounds[filter_name] * wfirst.exptime
        # Adding sky level to the image.
        final_image += sky_image

        # Now that all sources of signal (from astronomical objects and background) have been added
        # to the image, we can start adding noise and detector effects.  There is a utility,
        # galsim.wfirst.allDetectorEffects(), that can apply ALL implemented noise and detector
        # effects in the proper order.  Here we step through the process and explain these in a bit
        # more detail without using that utility.

        # First, we include the expected Poisson noise:
        final_image.addNoise(poisson_noise)

        # The subsequent steps account for the non-ideality of the detectors.

        # 1) Reciprocity failure:
        # Reciprocity, in the context of photography, is the inverse relationship between the
        # incident flux (I) of a source object and the exposure time (t) required to produce a given
        # response(p) in the detector, i.e., p = I*t. However, in NIR detectors, this relation does
        # not hold always. The pixel response to a high flux is larger than its response to a low
        # flux. This flux-dependent non-linearity is known as 'reciprocity failure', and the
        # approximate amount of reciprocity failure for the WFIRST detectors is known, so we can
        # include this detector effect in our images.

        if diff_mode:
            # Save the image before applying the transformation to see the difference
            save_image = final_image.copy()

        # If we had wanted to, we could have specified a different exposure time than the default
        # one for WFIRST, but otherwise the following routine does not take any arguments.
        wfirst.addReciprocityFailure(final_image)
        logger.debug('Included reciprocity failure in {0}-band image'.format(
            filter_name))

        if diff_mode:
            # Isolate the changes due to reciprocity failure.
            diff = final_image - save_image

            out_filename = os.path.join(
                outpath, 'demo13_RecipFail_{0}.fits'.format(filter_name))
            final_image.write(out_filename)
            out_filename = os.path.join(
                outpath, 'demo13_diff_RecipFail_{0}.fits'.format(filter_name))
            diff.write(out_filename)

        # At this point in the image generation process, an integer number of photons gets
        # detected, hence we have to round the pixel values to integers:
        final_image.quantize()

        # 2) Adding dark current to the image:
        # Even when the detector is unexposed to any radiation, the electron-hole pairs that
        # are generated within the depletion region due to finite temperature are swept by the
        # high electric field at the junction of the photodiode. This small reverse bias
        # leakage current is referred to as 'dark current'. It is specified by the average
        # number of electrons reaching the detectors per unit time and has an associated
        # Poisson noise since it is a random event.
        dark_current = wfirst.dark_current * wfirst.exptime
        dark_noise = galsim.DeviateNoise(
            galsim.PoissonDeviate(rng, dark_current))
        final_image.addNoise(dark_noise)

        # NOTE: Sky level and dark current might appear like a constant background that can be
        # simply subtracted. However, these contribute to the shot noise and matter for the
        # non-linear effects that follow. Hence, these must be included at this stage of the
        # image generation process. We subtract these backgrounds in the end.

        # 3) Applying a quadratic non-linearity:
        # In order to convert the units from electrons to ADU, we must use the gain factor. The gain
        # has a weak dependency on the charge present in each pixel. This dependency is accounted
        # for by changing the pixel values (in electrons) and applying a constant nominal gain
        # later, which is unity in our demo.

        # Save the image before applying the transformation to see the difference:
        if diff_mode:
            save_image = final_image.copy()

        # Apply the WFIRST nonlinearity routine, which knows all about the nonlinearity expected in
        # the WFIRST detectors.
        wfirst.applyNonlinearity(final_image)
        # Note that users who wish to apply some other nonlinearity function (perhaps for other NIR
        # detectors, or for CCDs) can use the more general nonlinearity routine, which uses the
        # following syntax:
        # final_image.applyNonlinearity(NLfunc=NLfunc)
        # with NLfunc being a callable function that specifies how the output image pixel values
        # should relate to the input ones.
        logger.debug(
            'Applied nonlinearity to {0}-band image'.format(filter_name))
        if diff_mode:
            diff = final_image - save_image

            out_filename = os.path.join(
                outpath, 'demo13_NL_{0}.fits'.format(filter_name))
            final_image.write(out_filename)
            out_filename = os.path.join(
                outpath, 'demo13_diff_NL_{0}.fits'.format(filter_name))
            diff.write(out_filename)

            # Save this image to do the diff after applying IPC.
            save_image = final_image.copy()

        # 4) Including Interpixel capacitance:
        # The voltage read at a given pixel location is influenced by the charges present in the
        # neighboring pixel locations due to capacitive coupling of sense nodes. This interpixel
        # capacitance effect is modeled as a linear effect that is described as a convolution of a
        # 3x3 kernel with the image.  The WFIRST IPC routine knows about the kernel already, so the
        # user does not have to supply it.
        wfirst.applyIPC(final_image)
        logger.debug('Applied interpixel capacitance to {0}-band image'.format(
            filter_name))

        if diff_mode:
            # Isolate the changes due to the interpixel capacitance effect.
            diff = final_image - save_image

            out_filename = os.path.join(
                outpath, 'demo13_IPC_{0}.fits'.format(filter_name))
            final_image.write(out_filename)
            out_filename = os.path.join(
                outpath, 'demo13_diff_IPC_{0}.fits'.format(filter_name))
            diff.write(out_filename)

        # 5) Adding read noise:
        # Read noise is the noise due to the on-chip amplifier that converts the charge into an
        # analog voltage.  We already applied the Poisson noise due to the sky level, so read noise
        # should just be added as Gaussian noise:
        read_noise = galsim.GaussianNoise(rng, sigma=wfirst.read_noise)
        final_image.addNoise(read_noise)
        logger.debug('Added readnoise to {0}-band image'.format(filter_name))

        # We divide by the gain to convert from e- to ADU. Currently, the gain value in the WFIRST
        # module is just set to 1, since we don't know what the exact gain will be, although it is
        # expected to be approximately 1. Eventually, this may change when the camera is assembled,
        # and there may be a different value for each SCA. For now, there is just a single number,
        # which is equal to 1.
        final_image /= wfirst.gain

        # Finally, the analog-to-digital converter reads in an integer value.
        final_image.quantize()
        # Note that the image type after this step is still a float.  If we want to actually
        # get integer values, we can do new_img = galsim.Image(final_image, dtype=int)

        # Since many people are used to viewing background-subtracted images, we provide a
        # version with the background subtracted (also rounding that to an int).
        tot_sky_image = (sky_image + dark_current) / wfirst.gain
        tot_sky_image.quantize()
        final_image -= tot_sky_image

        logger.debug(
            'Subtracted background for {0}-band image'.format(filter_name))
        # Write the final image to a file.
        out_filename = os.path.join(outpath,
                                    'demo13_{0}.fits'.format(filter_name))
        final_image.write(out_filename)

        logger.info('Completed {0}-band image.'.format(filter_name))

    logger.info(
        'You can display the output in ds9 with a command line that looks something like:'
    )
    logger.info('ds9 -zoom 0.5 -scale limits -500 1000 -rgb ' +
                '-red output/demo13_H158.fits ' +
                '-green output/demo13_J129.fits ' +
                '-blue output/demo13_Y106.fits')
コード例 #6
0
ファイル: real.py プロジェクト: kuonanhong/GalSim
    def __init__(self, real_galaxy_catalog, index=None, id=None, random=False,
                 rng=None, x_interpolant=None, k_interpolant=None, flux=None, flux_rescale=None,
                 pad_factor=4, noise_pad_size=0, gsparams=None, logger=None):


        if rng is None:
            self.rng = galsim.BaseDeviate()
        elif not isinstance(rng, galsim.BaseDeviate):
            raise TypeError("The rng provided to RealGalaxy constructor is not a BaseDeviate")
        else:
            self.rng = rng
        self._rng = self.rng.duplicate()  # This is only needed if we want to make sure eval(repr)
                                          # results in the same object.

        if flux is not None and flux_rescale is not None:
            raise TypeError("Cannot supply a flux and a flux rescaling factor!")

        if isinstance(real_galaxy_catalog, tuple):
            # Special (undocumented) way to build a RealGalaxy without needing the rgc directly
            # by providing the things we need from it.  Used by COSMOSGalaxy.
            self.gal_image, self.psf_image, noise_image, pixel_scale, var = real_galaxy_catalog
            use_index = 0  # For the logger statements below.
            if logger:
                logger.debug('RealGalaxy %d: Start RealGalaxy constructor.',use_index)
            self.catalog_file = None
        else:
            # Get the index to use in the catalog
            if index is not None:
                if id is not None or random is True:
                    raise AttributeError('Too many methods for selecting a galaxy!')
                use_index = index
            elif id is not None:
                if random is True:
                    raise AttributeError('Too many methods for selecting a galaxy!')
                use_index = real_galaxy_catalog.getIndexForID(id)
            elif random:
                ud = galsim.UniformDeviate(self.rng)
                use_index = int(real_galaxy_catalog.nobjects * ud())
                if hasattr(real_galaxy_catalog, 'weight'):
                    # If weight factors are available, make sure the random selection uses the
                    # weights to remove the catalog-level selection effects (flux_radius-dependent
                    # probability of making a postage stamp for a given object).
                    while ud() > real_galaxy_catalog.weight[use_index]:
                        # Pick another one to try.
                        use_index = int(real_galaxy_catalog.nobjects * ud())
            else:
                raise AttributeError('No method specified for selecting a galaxy!')
            if logger:
                logger.debug('RealGalaxy %d: Start RealGalaxy constructor.',use_index)

            # Read in the galaxy, PSF images; for now, rely on pyfits to make I/O errors.
            self.gal_image = real_galaxy_catalog.getGal(use_index)
            if logger:
                logger.debug('RealGalaxy %d: Got gal_image',use_index)

            self.psf_image = real_galaxy_catalog.getPSF(use_index)
            if logger:
                logger.debug('RealGalaxy %d: Got psf_image',use_index)

            #self.noise = real_galaxy_catalog.getNoise(use_index, self.rng, gsparams)
            # We need to duplication some of the RealGalaxyCatalog.getNoise() function, since we
            # want it to be possible to have the RealGalaxyCatalog in another process, and the
            # BaseCorrelatedNoise object is not picklable.  So we just build it here instead.
            noise_image, pixel_scale, var = real_galaxy_catalog.getNoiseProperties(use_index)
            if logger:
                logger.debug('RealGalaxy %d: Got noise_image',use_index)
            self.catalog_file = real_galaxy_catalog.getFileName()

        if noise_image is None:
            self.noise = galsim.UncorrelatedNoise(var, rng=self.rng, scale=pixel_scale,
                                                  gsparams=gsparams)
        else:
            ii = galsim.InterpolatedImage(noise_image, normalization="sb",
                                          calculate_stepk=False, calculate_maxk=False,
                                          x_interpolant='linear', gsparams=gsparams)
            self.noise = galsim.correlatednoise._BaseCorrelatedNoise(self.rng, ii, noise_image.wcs)
            self.noise = self.noise.withVariance(var)
        if logger:
            logger.debug('RealGalaxy %d: Finished building noise',use_index)

        # Save any other relevant information as instance attributes
        self.catalog = real_galaxy_catalog
        self.index = use_index
        self.pixel_scale = float(pixel_scale)
        self._x_interpolant = x_interpolant
        self._k_interpolant = k_interpolant
        self._pad_factor = pad_factor
        self._noise_pad_size = noise_pad_size
        self._flux = flux
        self._gsparams = gsparams

        # Convert noise_pad to the right noise to pass to InterpolatedImage
        if noise_pad_size:
            noise_pad = self.noise
        else:
            noise_pad = 0.

        # Build the InterpolatedImage of the PSF.
        self.original_psf = galsim.InterpolatedImage(
            self.psf_image, x_interpolant=x_interpolant, k_interpolant=k_interpolant,
            flux=1.0, gsparams=gsparams)
        if logger:
            logger.debug('RealGalaxy %d: Made original_psf',use_index)

        # Build the InterpolatedImage of the galaxy.
        # Use the stepK() value of the PSF as a maximum value for stepK of the galaxy.
        # (Otherwise, low surface brightness galaxies can get a spuriously high stepk, which
        # leads to problems.)
        self.original_gal = galsim.InterpolatedImage(
                self.gal_image, x_interpolant=x_interpolant, k_interpolant=k_interpolant,
                pad_factor=pad_factor, noise_pad_size=noise_pad_size,
                calculate_stepk=self.original_psf.stepK(),
                calculate_maxk=self.original_psf.maxK(),
                noise_pad=noise_pad, rng=self.rng, gsparams=gsparams)
        if logger:
            logger.debug('RealGalaxy %d: Made original_gal',use_index)

        # If flux is None, leave flux as given by original image
        if flux is not None:
            flux_rescale = flux / self.original_gal.getFlux()
        if flux_rescale is not None:
            self.original_gal *= flux_rescale
            self.noise *= flux_rescale**2

        # Calculate the PSF "deconvolution" kernel
        psf_inv = galsim.Deconvolve(self.original_psf, gsparams=gsparams)

        # Initialize the SBProfile attribute
        GSObject.__init__(
            self, galsim.Convolve([self.original_gal, psf_inv], gsparams=gsparams))
        if logger:
            logger.debug('RealGalaxy %d: Made gsobject',use_index)

        # Save the noise in the image as an accessible attribute
        self.noise = self.noise.convolvedWith(psf_inv, gsparams)
        if logger:
            logger.debug('RealGalaxy %d: Finished building RealGalaxy',use_index)
コード例 #7
0
    def __init__(self,
                 seed=0,
                 function=None,
                 x_min=None,
                 x_max=None,
                 interpolant=None,
                 npoints=256,
                 _init=True,
                 lseed=None):
        # lseed is an obsolete synonym for seed
        # I think this was the only place that the name lseed was actually used in the docs.
        # so we keep it for now for backwards compatibility.
        if lseed is not None:  # pragma: no cover
            from galsim.deprecated import depr
            depr('lseed', 1.1, 'seed')
            seed = lseed
        import galsim

        # Special internal "private" constructor option that doesn't do any initialization.
        if not _init: return

        # Set up the PRNG
        _galsim.BaseDeviate.__init__(self, seed)
        self._ud = galsim.UniformDeviate(self)

        # Basic input checking and setups
        if not function:
            raise TypeError('You must pass a function to DistDeviate!')

        self._function = function  # Save the inputs to be used in repr
        self._interpolant = interpolant
        self._npoints = npoints
        self._xmin = x_min
        self._xmax = x_max

        # Figure out if a string is a filename or something we should be using in an eval call
        if isinstance(function, str):
            input_function = function
            import os.path
            if os.path.isfile(function):
                if interpolant is None:
                    interpolant = 'linear'
                if x_min or x_max:
                    raise TypeError('Cannot pass x_min or x_max alongside a '
                                    'filename in arguments to DistDeviate')
                function = galsim.LookupTable(file=function,
                                              interpolant=interpolant)
                x_min = function.x_min
                x_max = function.x_max
            else:
                try:
                    function = galsim.utilities.math_eval('lambda x : ' +
                                                          function)
                    if x_min is not None:  # is not None in case x_min=0.
                        function(x_min)
                    else:
                        # Somebody would be silly to pass a string for evaluation without x_min,
                        # but we'd like to throw reasonable errors in that case anyway
                        function(
                            0.6
                        )  # A value unlikely to be a singular point of a function
                except Exception as e:
                    raise ValueError(
                        "String function must either be a valid filename or something that "
                        + "can eval to a function of x.\n" +
                        "Input provided: {0}\n".format(input_function) +
                        "Caught error: {0}".format(e))
        else:
            # Check that the function is actually a function
            if not (isinstance(function, galsim.LookupTable)
                    or hasattr(function, '__call__')):
                raise TypeError(
                    'Keyword function must be a callable function or a string')
            if interpolant:
                raise TypeError(
                    'Cannot provide an interpolant with a callable function argument'
                )
            if isinstance(function, galsim.LookupTable):
                if x_min or x_max:
                    raise TypeError(
                        'Cannot provide x_min or x_max with a LookupTable function '
                        + 'argument')
                x_min = function.x_min
                x_max = function.x_max
            else:
                if x_min is None or x_max is None:
                    raise TypeError(
                        'Must provide x_min and x_max when function argument is a '
                        + 'regular python callable function')

        # Compute the cumulative distribution function
        xarray = x_min + (1. * x_max - x_min) / (npoints - 1) * np.array(
            range(npoints), float)
        # cdf is the cumulative distribution function--just easier to type!
        dcdf = [
            galsim.integ.int1d(function, xarray[i], xarray[i + 1])
            for i in range(npoints - 1)
        ]
        cdf = [sum(dcdf[0:i]) for i in range(npoints)]
        # Quietly renormalize the probability if it wasn't already normalized
        totalprobability = cdf[-1]
        cdf = np.array(cdf) / totalprobability
        # Recompute delta CDF in case of floating-point differences in near-flat probabilities
        dcdf = np.diff(cdf)
        # Check that the probability is nonnegative
        if not np.all(dcdf >= 0):
            raise ValueError('Negative probability passed to DistDeviate: %s' %
                             function)
        # Now get rid of points with dcdf == 0
        elif not np.all(dcdf > 0.):
            # Remove consecutive dx=0 points, except endpoints
            zeroindex = np.where(dcdf == 0)[0]
            # numpy.where returns a tuple containing 1 array, which tends to be annoying for
            # indexing, so the [0] returns the actual array of interest (indices of dcdf==0).
            # Now, we want to remove consecutive dcdf=0 points, leaving the lower end.
            # Zeroindex contains the indices of all the dcdf=0 points, so we look for ones that are
            # only 1 apart; this tells us the *lower* of the two points, but we want to remove the
            # *upper*, so we add 1 to the resultant array.
            dindex = np.where(np.diff(zeroindex) == 1)[0] + 1
            # So dindex contains the indices of the elements of array zeroindex, which tells us the
            # indices that we might want to delete from cdf and xarray, so we delete
            # zeroindex[dindex].
            cdf = np.delete(cdf, zeroindex[dindex])
            xarray = np.delete(xarray, zeroindex[dindex])
            dcdf = np.diff(cdf)
            # Tweak the edges of dx=0 regions so function is always increasing
            for index in np.where(
                    dcdf == 0)[0][::-1]:  # reverse in case we need to delete
                if index + 2 < len(cdf):
                    # get epsilon, the smallest element where 1+eps>1
                    eps = np.finfo(cdf[index + 1].dtype).eps
                    if cdf[index + 2] - cdf[index + 1] > eps:
                        cdf[index + 1] += eps
                    else:
                        cdf = np.delete(cdf, index + 1)
                        xarray = np.delete(xarray, index + 1)
                else:
                    cdf = cdf[:-1]
                    xarray = xarray[:-1]
            dcdf = np.diff(cdf)
            if not (np.all(dcdf > 0)):
                raise RuntimeError(
                    'Cumulative probability in DistDeviate is too flat for program to fix'
                )

        self._inverseprobabilitytable = galsim.LookupTable(
            cdf, xarray, interpolant='linear')
        self.x_min = x_min
        self.x_max = x_max
コード例 #8
0
ファイル: scene.py プロジェクト: AriannaLanz/GalSim
    def makeGalaxy(self, index=None, gal_type=None, chromatic=False, noise_pad_size=5,
                   deep=False, sersic_prec=0.05, rng=None, gsparams=None):
        """
        Routine to construct GSObjects corresponding to the catalog entry with a particular index 
        or indices.

        The flux of the galaxy corresponds to a 1 second exposure time with the Hubble Space
        Telescope.  Users who wish to simulate F814W images with a different telescope and an
        exposure time longer than 1 second should multiply by that exposure time, and by the square
        of the ratio of the effective diameter of their telescope compared to that of HST.
        (Effective diameter may differ from the actual diameter if there is significant
        obscuration.)  See demo11.py for an example that explicitly takes this normalization into
        account.

        Due to the adopted flux normalization, drawing into an image with the COSMOS bandpass,
        zeropoint of 25.94, and pixel scale should give the right pixel values to mimic the actual
        COSMOS science images.  The COSMOS science images that we use are normalized to a count rate
        of 1 second, which is why there is no need to rescale to account for the COSMOS exposure
        time.

        There is an option to make chromatic objects (`chromatic=True`); however, it is important
        to bear in mind that we do not actually have spatially-resolved color information for these
        galaxies, so this keyword can only be True if we are using parametric galaxies.  Even then,
        we simply do the most arbitrary thing possible, which is to assign bulges an elliptical
        SED, disks a disk-like SED, and Sersic galaxies with intermediate values of n some
        intermediate SED.  We assume that the photometric redshift is the correct redshift for
        these galaxies (which is a good assumption for COSMOS 30-band photo-z for these bright
        galaxies).  For the given SED and redshift, we then normalize to give the right (observed)
        flux in F814W.  Note that for a mock "deep" sample, the redshift distributions of the
        galaxies would be modified, which is not included here.

        For this chromatic option, it is still the case that the output flux normalization is
        appropriate for the HST effective telescope diameter and a 1 second exposure time, so users
        who are simulating another scenario should account for this.

        Note that the returned objects use arcsec for the units of their linear dimension.  If you
        are using a different unit for other things (the PSF, WCS, etc.), then you should dilate
        the resulting object with `gal.dilate(galsim.arcsec / scale_unit)`.

        @param index            Index of the desired galaxy in the catalog for which a GSObject
                                should be constructed.  You may also provide a list or array of
                                indices, in which case a list of objects is returned. If None,
                                then a single galaxy is chosen at random.  [default: None]
        @param gal_type         Either 'real' or 'parametric'.  This determines which kind of 
                                galaxy model is made. [If catalog was loaded with `use_real=False`,
                                then this defaults to 'parametric', and in fact 'real' is 
                                not allowed.]
        @param chromatic        Make this a chromatic object, or not?  [default: False]
        @param noise_pad_size   For realistic galaxies, the size of region to pad with noise,
                                in arcsec.  [default: 5, an arbitrary, but not completely
                                ridiculous choice.]
        @param deep             Modify fluxes and sizes of galaxies from the F814W<23.5 sample in
                                order to roughly simulate an F814W<25 sample but with higher S/N, as
                                in GREAT3? [default: False]  Note that this keyword will be ignored
                                (except for issuing a warning) if the input catalog already
                                represents the F814W<25.2 sample.
        @param sersic_prec      The desired precision on the Sersic index n in parametric galaxies.
                                GalSim is significantly faster if it gets a smallish number of
                                Sersic values, so it can cache some of the calculations and use
                                them again the next time it gets a galaxy with the same index.
                                If `sersic_prec` is 0.0, then use the exact value of index n from
                                the catalog.  But if it is >0, then round the index to that
                                precision.  [default: 0.05]
        @param rng              A random number generator to use for selecting a random galaxy
                                (may be any kind of BaseDeviate or None) and to use in generating
                                any noise field when padding.  [default: None]
        @param gsparams         An optional GSParams argument.  See the docstring for GSParams for
                                details. [default: None]

        @returns    Either a GSObject or a ChromaticObject depending on the value of `chromatic`,
                    or a list of them if `index` is an iterable.
        """
        if not self.use_real:
            if gal_type is None:
                gal_type = 'parametric'
            elif gal_type != 'parametric':
                raise ValueError("Only 'parametric' galaxy type is allowed when use_real == False")

        if gal_type not in ['real', 'parametric']:
            raise ValueError("Invalid galaxy type %r"%gal_type)

        # We'll set these up if and when we need them.
        self._bandpass = None
        self._sed = None

        # Make rng if we will need it.
        if index is None or gal_type == 'real':
            if rng is None:
                rng = galsim.BaseDeviate()
            elif not isinstance(rng, galsim.BaseDeviate):
                raise TypeError("The rng provided to makeGalaxy is not a BaseDeviate")

        if index is None:
            ud = galsim.UniformDeviate(rng)
            index = int(self.nobjects * ud())

        if hasattr(index, '__iter__'):
            indices = index
        else:
            indices = [index]

        # Check whether this is a COSMOSCatalog meant to represent real or parametric objects, then
        # call the appropriate helper routine for that case.
        if gal_type == 'real':
            if chromatic:
                raise RuntimeError("Cannot yet make real chromatic galaxies!")
            gal_list = self._makeReal(indices, noise_pad_size, rng, gsparams)
        else:
            # If no pre-selection was done based on radius or flux, then we won't have checked
            # whether we're using the old or new catalog (the latter of which has a lot of
            # precomputations done).  Just in case, let's check here, though it does seem like a bit
            # of overkill to emit this warning each time.
            if 'hlr' not in self.param_cat.dtype.names:
                import warnings
                warnings.warn(
                    'You seem to have an old version of the COSMOS parameter file. '+
                    'Please run `galsim_download_cosmos` to re-download the COSMOS catalog ' +
                    'and take advantage of pre-computation of many quantities..')

            gal_list = self._makeParametric(indices, chromatic, sersic_prec, gsparams)

        # If trying to use the 23.5 sample and "fake" a deep sample, rescale the size and flux as
        # suggested in the GREAT3 handbook.
        if deep:
            if self.use_sample == '23.5':
                # Rescale the flux to get a limiting mag of 25 in F814W when starting with a
                # limiting mag of 23.5.  Make the galaxies a factor of 0.6 smaller and appropriately
                # fainter.
                flux_factor = 10.**(-0.4*1.5)
                size_factor = 0.6
                gal_list = [ gal.dilate(size_factor) * flux_factor for gal in gal_list ]
            elif self.use_sample == '25.2':
                import warnings
                warnings.warn(
                    'Ignoring `deep` argument, because the sample being used already '+
                    'corresponds to a flux limit of F814W<25.2')
            else:
                import warnings
                warnings.warn(
                    'Ignoring `deep` argument, because the sample being used does not '+
                    'corresponds to a flux limit of F814W<23.5')

        # Store the orig_index as gal.index regardless of whether we have a RealGalaxy or not.
        # It gets set by _makeReal, but not by _makeParametric.
        # And if we are doing the deep scaling, then it gets messed up by that.
        # So just put it in here at the end to be sure.
        for gal, idx in zip(gal_list, indices):
            gal.index = self.orig_index[idx]
            if hasattr(gal, 'original'): gal.original.index = self.orig_index[idx]

        if hasattr(index, '__iter__'):
            return gal_list
        else:
            return gal_list[0]
コード例 #9
0
ファイル: demo7.py プロジェクト: mardom/GalSim
def main(argv):
    """
    Make a fits image cube where each frame has two images of the same galaxy drawn 
    with regular FFT convolution and with photon shooting.

    We do this for 5 different PSFs and 5 different galaxies, each with 4 different (random)
    fluxes, sizes, and shapes.
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo7")

    # To turn off logging:
    #logger.propagate = False

    # Define some parameters we'll use below.

    # Make output directory if not already present.
    if not os.path.isdir('output'):
        os.mkdir('output')

    file_name = os.path.join('output', 'cube_phot.fits.gz')

    random_seed = 553728
    sky_level = 1.e4  # ADU / arcsec^2
    pixel_scale = 0.28  # arcsec
    nx = 64
    ny = 64

    gal_flux_min = 1.e4  # Range for galaxy flux
    gal_flux_max = 1.e5
    gal_hlr_min = 0.3  # arcsec
    gal_hlr_max = 1.3  # arcsec
    gal_e_min = 0.  # Range for ellipticity
    gal_e_max = 0.8

    psf_fwhm = 0.65  # arcsec

    # This script is set up as a comparison between using FFTs for doing the convolutions and
    # shooting photons.  The two methods have trade-offs in speed and accuracy which vary
    # with the kind of profile being drawn and the S/N of the object, among other factors.
    # In addition, for each method, there are a number of parameters GalSim uses that control
    # aspects of the calculation that further affect the speed and accuracy.
    #
    # We encapsulate these parameters with an object called GSParams.  The default values
    # are intended to be accurate enough for normal precision shear tests, without sacrificing
    # too much speed.
    #
    # Any PSF or galaxy object can be given a gsparams argument on construction that can
    # have different values to make the calculation more or less accurate (typically trading
    # off for speed or memory).
    #
    # In this script, we adjust some of the values slightly, just to show you how it works.
    # You could play around with these values and see what effect they have on the drawn images.
    # Usually, it requires a pretty drastic change in these parameters for you to be able to
    # notice the difference by eye.  But subtle effects that may impact the shapes of galaxies
    # can happen well before then.

    # Type help(galsim.GSParams) for the complete list of parameters and more detailed
    # documentation, including the default values for each parameter.
    gsparams = galsim.GSParams(
        alias_threshold=
        1.e-2,  # maximum fractional flux that may be aliased around edge of FFT
        maxk_threshold=
        2.e-3,  # k-values less than this may be excluded off edge of FFT
        xvalue_accuracy=
        1.e-4,  # approximations in real space aim to be this accurate
        kvalue_accuracy=
        1.e-4,  # approximations in fourier space aim to be this accurate
        shoot_accuracy=
        1.e-4,  # approximations in photon shooting aim to be this accurate
        minimum_fft_size=64)  # minimum size of ffts

    logger.info('Starting demo script 7')

    # Make the pixel:
    pix = galsim.Pixel(xw=pixel_scale)

    # Make the PSF profiles:
    psf1 = galsim.Gaussian(fwhm=psf_fwhm, gsparams=gsparams)
    psf2 = galsim.Moffat(fwhm=psf_fwhm, beta=2.4, gsparams=gsparams)
    psf3_inner = galsim.Gaussian(fwhm=psf_fwhm, flux=0.8, gsparams=gsparams)
    psf3_outer = galsim.Gaussian(fwhm=2 * psf_fwhm,
                                 flux=0.2,
                                 gsparams=gsparams)
    psf3 = psf3_inner + psf3_outer
    atmos = galsim.Gaussian(fwhm=psf_fwhm, gsparams=gsparams)
    # The OpticalPSF and set of Zernike values chosen below correspond to a reasonably well aligned,
    # smallish ~0.3m / 12 inch diameter telescope with a central obscuration of ~0.12m or 5 inches
    # diameter, being used in optical wavebands.
    # In the Noll convention, the value of the Zernike coefficient also gives the RMS optical path
    # difference across a circular pupil.  An RMS difference of ~0.5 or larger indicates that parts
    # of the wavefront are in fully destructive interference, and so we might expect aberrations to
    # become strong when Zernike aberrations summed in quadrature approach 0.5 wave.
    # The aberrations chosen in this case correspond to operating close to a 0.25 wave RMS optical
    # path difference:
    optics = galsim.OpticalPSF(lam_over_diam=0.6 * psf_fwhm,
                               obscuration=0.4,
                               defocus=0.06,
                               astig1=0.12,
                               astig2=-0.08,
                               coma1=0.07,
                               coma2=0.04,
                               spher=-0.13,
                               gsparams=gsparams)
    psf4 = galsim.Convolve([atmos, optics
                            ])  # Convolve inherits the gsparams from the first
    # item in the list.  (Or you can supply a gsparams
    # argument explicitly if you want to override this.)
    atmos = galsim.Kolmogorov(fwhm=psf_fwhm, gsparams=gsparams)
    optics = galsim.Airy(lam_over_diam=0.3 * psf_fwhm, gsparams=gsparams)
    psf5 = galsim.Convolve([atmos, optics])
    psfs = [psf1, psf2, psf3, psf4, psf5]
    psf_names = [
        "Gaussian", "Moffat", "Double Gaussian", "OpticalPSF",
        "Kolmogorov * Airy"
    ]
    psf_times = [0, 0, 0, 0, 0]
    psf_fft_times = [0, 0, 0, 0, 0]
    psf_phot_times = [0, 0, 0, 0, 0]

    # Make the galaxy profiles:
    gal1 = galsim.Gaussian(half_light_radius=1, gsparams=gsparams)
    gal2 = galsim.Exponential(half_light_radius=1, gsparams=gsparams)
    gal3 = galsim.DeVaucouleurs(half_light_radius=1, gsparams=gsparams)
    gal4 = galsim.Sersic(half_light_radius=1, n=2.5, gsparams=gsparams)
    # A Sersic profile may be truncated if desired.
    # The units for this are expected to be arcsec (or specifically -- whatever units
    # you are using for all the size values as defined by the pixel_scale).
    bulge = galsim.Sersic(half_light_radius=0.7,
                          n=3.2,
                          trunc=8.5,
                          gsparams=gsparams)
    disk = galsim.Sersic(half_light_radius=1.2, n=1.5, gsparams=gsparams)
    gal5 = 0.4 * bulge + 0.6 * disk  # Net half-light radius is only approximate for this one.
    gals = [gal1, gal2, gal3, gal4, gal5]
    gal_names = [
        "Gaussian", "Exponential", "Devaucouleurs", "n=2.5 Sersic",
        "Bulge + Disk"
    ]
    gal_times = [0, 0, 0, 0, 0]
    gal_fft_times = [0, 0, 0, 0, 0]
    gal_phot_times = [0, 0, 0, 0, 0]

    # Other times to keep track of:
    setup_times = 0
    fft_times = 0
    phot_times = 0
    noise_times = 0

    # Loop over combinations of psf, gal, and make 4 random choices for flux, size, shape.
    all_images = []
    k = 0
    for ipsf in range(len(psfs)):
        psf = psfs[ipsf]
        psf_name = psf_names[ipsf]
        for igal in range(len(gals)):
            gal = gals[igal]
            gal_name = gal_names[igal]
            for i in range(4):
                logger.debug('Start work on image %d', i)
                t1 = time.time()

                # Initialize the random number generator we will be using.
                rng = galsim.UniformDeviate(random_seed + k)

                # Get a new copy, we'll want to keep the original unmodified.
                gal1 = gal.copy()

                # Generate random variates:
                flux = rng() * (gal_flux_max - gal_flux_min) + gal_flux_min
                gal1.setFlux(flux)

                hlr = rng() * (gal_hlr_max - gal_hlr_min) + gal_hlr_min
                gal1.applyDilation(hlr)

                beta_ellip = rng() * 2 * math.pi * galsim.radians
                ellip = rng() * (gal_e_max - gal_e_min) + gal_e_min
                gal_shape = galsim.Shear(e=ellip, beta=beta_ellip)
                gal1.applyShear(gal_shape)

                # Build the final object by convolving the galaxy, PSF and pixel response.
                final = galsim.Convolve([psf, pix, gal1])
                # For photon shooting, need a version without the pixel (see below).
                final_nopix = galsim.Convolve([psf, gal1])

                # Create the large, double width output image
                # Rather than provide a dx= argument to the draw commands, we can also
                # set the pixel scale in the image constructor.
                # Note: You can also change it after the construction with im.scale=pixel_scale
                image = galsim.ImageF(2 * nx + 2, ny, scale=pixel_scale)

                # Assign the following two "ImageViews", fft_image and phot_image.
                # Using the syntax below, these are views into the larger image.
                # Changes/additions to the sub-images referenced by the views are automatically
                # reflected in the original image.
                fft_image = image[galsim.BoundsI(1, nx, 1, ny)]
                phot_image = image[galsim.BoundsI(nx + 3, 2 * nx + 2, 1, ny)]

                logger.debug(
                    '   Read in training sample galaxy and PSF from file')
                t2 = time.time()

                # Draw the profile
                final.draw(fft_image)

                logger.debug(
                    '   Drew fft image.  Total drawn flux = %f.  .flux = %f',
                    fft_image.array.sum(), final.getFlux())
                t3 = time.time()

                # Add Poisson noise
                sky_level_pixel = sky_level * pixel_scale**2
                fft_image.addNoise(
                    galsim.PoissonNoise(rng, sky_level=sky_level_pixel))

                t4 = time.time()

                # The next two lines are just to get the output from this demo script
                # to match the output from the parsing of demo7.yaml.
                rng = galsim.UniformDeviate(random_seed + k)
                rng()
                rng()
                rng()
                rng()

                # Repeat for photon shooting image.
                # Photon shooting automatically convolves by the pixel, so we've made sure not
                # to include it in the profile!
                final_nopix.drawShoot(phot_image,
                                      max_extra_noise=sky_level_pixel / 100,
                                      rng=rng)
                t5 = time.time()

                # For photon shooting, galaxy already has Poisson noise, so we want to make
                # sure not to add that noise again!  Thus, we just add sky noise, which
                # is Poisson with the mean = sky_level_pixel
                pd = galsim.PoissonDeviate(rng, mean=sky_level_pixel)
                # DeviateNoise just adds the action of the given deviate to every pixel.
                phot_image.addNoise(galsim.DeviateNoise(pd))
                # For PoissonDeviate, the mean is not zero, so for a background-subtracted
                # image, we need to subtract the mean back off when we are done.
                phot_image -= sky_level_pixel

                logger.debug(
                    '   Added Poisson noise.  Image fluxes are now %f and %f',
                    fft_image.array.sum(), phot_image.array.sum())
                t6 = time.time()

                # Store that into the list of all images
                all_images += [image]

                k = k + 1
                logger.info(
                    '%d: %s * %s, flux = %.2e, hlr = %.2f, ellip = (%.2f,%.2f)',
                    k, gal_name, psf_name, flux, hlr, gal_shape.getE1(),
                    gal_shape.getE2())
                logger.debug('   Times: %f, %f, %f, %f, %f', t2 - t1, t3 - t2,
                             t4 - t3, t5 - t4, t6 - t5)

                psf_times[ipsf] += t6 - t1
                psf_fft_times[ipsf] += t3 - t2
                psf_phot_times[ipsf] += t5 - t4
                gal_times[igal] += t6 - t1
                gal_fft_times[igal] += t3 - t2
                gal_phot_times[igal] += t5 - t4
                setup_times += t2 - t1
                fft_times += t3 - t2
                phot_times += t5 - t4
                noise_times += t4 - t3 + t6 - t5

    logger.info('Done making images of galaxies')
    logger.info('')
    logger.info('Some timing statistics:')
    logger.info('   Total time for setup steps = %f', setup_times)
    logger.info('   Total time for regular fft drawing = %f', fft_times)
    logger.info('   Total time for photon shooting = %f', phot_times)
    logger.info('   Total time for adding noise = %f', noise_times)
    logger.info('')
    logger.info('Breakdown by PSF type:')
    for ipsf in range(len(psfs)):
        logger.info('   %s: Total time = %f  (fft: %f, phot: %f)',
                    psf_names[ipsf], psf_times[ipsf], psf_fft_times[ipsf],
                    psf_phot_times[ipsf])
    logger.info('')
    logger.info('Breakdown by Galaxy type:')
    for igal in range(len(gals)):
        logger.info('   %s: Total time = %f  (fft: %f, phot: %f)',
                    gal_names[igal], gal_times[igal], gal_fft_times[igal],
                    gal_phot_times[igal])
    logger.info('')

    # Now write the image to disk.
    # With any write command, you can optionally compress the file using several compression
    # schemes:
    #   'gzip' uses gzip on the full output file.
    #   'bzip2' uses bzip2 on the full output file.
    #   'rice' uses rice compression on the image, leaving the fits headers readable.
    #   'gzip_tile' uses gzip in tiles on the output image, leaving the fits headers readable.
    #   'hcompress' uses hcompress on the image, but it is only valid for 2-d data, so it
    #               doesn't work for writeCube.
    #   'plio' uses plio on the image, but it is only valid for positive integer data.
    # Furthermore, the first three have standard filename extensions associated with them,
    # so if you don't specify a compression, but the filename ends with '.gz', '.bz2' or '.fz',
    # the corresponding compression will be selected automatically.
    # In other words, the `compression='gzip'` specification is actually optional here:
    galsim.fits.writeCube(all_images, file_name, compression='gzip')
    logger.info('Wrote fft image to fits data cube %r', file_name)
コード例 #10
0
def run_tests(random_seed, outfile, config=None, gsparams=None, wmult=None, logger=None,
              fail_value=-666.):
    """Run a full set of tests, writing pickled tuple output to outfile.
    """
    import cPickle
    import numpy as np
    import galsim
    import galaxy_sample
    
    if config is None:
        use_config = False
        if gsparams is None:
            import warnings
            warnings.warn("No gsparams provided to run_tests?")
        if wmult is None:
            raise ValueError("wmult must be set if config=None.")
    else:
        use_config = True
        if gsparams is not None:
            import warnings
            warnings.warn(
                "gsparams is provided as a kwarg but the config['image']['gsparams'] will take "+
                "precedence.")
        if wmult is not None:
            import warnings
            warnings.warn(
                "wmult is provided as a kwarg but the config['image']['wmult'] will take "+
                "precedence.")
    # Get galaxy sample
    n_cosmos, hlr_cosmos, gabs_cosmos = galaxy_sample.get()
    # Only take the first NOBS objects
    n_cosmos = n_cosmos[0: NOBS]
    hlr_cosmos = hlr_cosmos[0: NOBS]
    gabs_cosmos = gabs_cosmos[0: NOBS]
    # Setup a UniformDeviate
    ud = galsim.UniformDeviate(random_seed)
    # Open the output file and write a header:
    fout = open(outfile, 'wb')
    fout.write(
        '#  g1obs_draw g2obs_draw sigma_draw delta_g1obs delta_g2obs delta_sigma '+
        'err_g1obs err_g2obs err_sigma\n')
    # Start looping through the sample objects and collect the results
    for i, hlr, gabs in zip(range(NOBS), hlr_cosmos, gabs_cosmos):
        print "Testing galaxy #"+str(i+1)+"/"+str(NOBS)+\
              " with (hlr, |g|) = "+str(hlr)+", "+str(gabs)
        random_theta = 2. * np.pi * ud()
        g1 = gabs * np.cos(2. * random_theta)
        g2 = gabs * np.sin(2. * random_theta)
        if use_config:
            # Increment the random seed so that each test gets a unique one
            config['image']['random_seed'] = random_seed + i * NOBS + 1
            config['gal'] = {
                "type" : "Gaussian" , "half_light_radius" : hlr ,
                "ellip" : {
                    "type" : "G1G2" , "g1" : g1 , "g2" : g2
                }
            }
            config['psf'] = {"type" : "Airy" , "lam_over_diam" : PSF_LAM_OVER_DIAM }
            try:
                results = galsim.utilities.compare_dft_vs_photon_config(
                    config, abs_tol_ellip=TOL_ELLIP, abs_tol_size=TOL_SIZE, logger=logger)
                test_ran = True
            except RuntimeError as err:
                test_ran = False
                pass
            # Uncomment lines below to ouput a check image
            #import copy
            #checkimage = galsim.config.BuildImage(copy.deepcopy(config))[0] #im = first element
            #checkimage.write('junk_'+str(i + 1)+'_'+str(j + 1)+'.fits')
        else:
            test_gsparams = galsim.GSParams(maximum_fft_size=MAX_FFT_SIZE)
            galaxy = galsim.Gaussian(half_light_radius=hlr, gsparams=test_gsparams)
            galaxy.applyShear(g1=g1, g2=g2)
            psf = galsim.Airy(lam_over_diam=PSF_LAM_OVER_DIAM, gsparams=test_gsparams)
            try:
                results = galsim.utilities.compare_dft_vs_photon_object(
                    galaxy, psf_object=psf, rng=ud, pixel_scale=PIXEL_SCALE, size=IMAGE_SIZE,
                    abs_tol_ellip=TOL_ELLIP, abs_tol_size=TOL_SIZE,
                    n_photons_per_trial=NPHOTONS, wmult=wmult)
                test_ran = True
            except RuntimeError, err:
                test_ran = False
                pass

        if not test_ran:
            import warnings
            warnings.warn(
                'RuntimeError encountered for galaxy '+str(i + 1)+'/'+str(NOBS)+': '+str(err))
            fout.write(
                '%e %e %e %e %e %e %e %e %e %e %e %e\n' % (
                    fail_value, fail_value, fail_value, fail_value, fail_value, fail_value,
                    fail_value, fail_value, fail_value, fail_value, fail_value, fail_value,
                )
            )
            fout.flush()
        else:
            fout.write(
                '%e %e %e %e %e %e %e %e %e %e %e %e\n' % (
                    results.g1obs_draw, results.g2obs_draw, results.sigma_draw,
                    results.delta_g1obs, results.delta_g2obs, results.delta_sigma,
                    results.err_g1obs, results.err_g2obs, results.err_sigma, hlr, g1, g2
                )
            )
            fout.flush()
コード例 #11
0
#    this list of conditions, and the disclaimer given in the documentation
#    and/or other materials provided with the distribution.
#

# imports, etc.
import galsim
import galsim.roman
import datetime
import numpy as np
import matplotlib.pyplot as plt
from radec_to_chip import *

# Make a list of RA/dec central values and nearby values
n_vals = 100
seed = 314159
ud = galsim.UniformDeviate(seed=seed)
min_ra = 0.0
max_ra = 360.0
min_cos_dec = -0.95
max_cos_dec = 0.3
ra_cen_vals = np.zeros(n_vals)
dec_cen_vals = np.zeros(n_vals)
ra_vals = np.zeros(n_vals)
dec_vals = np.zeros(n_vals)
delta_dist = 0.5  # degrees offset allowed for (ra, dec) compared to center of focal plane
chris_sca = np.zeros(n_vals).astype(int)
pa_arr = np.zeros(n_vals)
date = datetime.datetime(2025, 1, 12)
for i in range(n_vals):
    # Keep choosing random FPA center positions until we get one that can be observed on the chosen
    # date.
コード例 #12
0
def main(argv):
    """
    Make 4 directories, each with 5 files, each of which has 20 galaxies.

    Also, each directory corresponds to a different mass halo.
    The files in each direction are just different noise realizations and galaxy locations.

    The images also all have a second HDU with a weight image.

    And we build the multiple files in parallel.
    """
    from multiprocessing import current_process, cpu_count
    if sys.version_info < (3, 0):
        from multiprocessing import Process, Queue
    else:
        # Python 3 has different contexts for doing multiprocessing.
        # The "spawn" context is supposedly the safest context, since it pickles pretty much
        # everything to communicate across processes.  However, this can be highly inefficient
        # in some cases, and some things aren't picklable, so they can break when using
        # the spawn context.
        # The "fork" context doesn't pickle as much (especially some of those unpicklable things),
        # so it is generally more efficient and works in a wider variety of cases.
        # Starting in Python 3.8, some systems (e.g. MacOS) started making the spawn context the
        # default, so things can break using the default context.  This bit forces Python
        # to use the fork context here.
        from multiprocessing import get_context
        ctx = get_context('fork')
        Process = ctx.Process
        Queue = ctx.Queue

    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo9")

    # Define some parameters we'll use below.

    mass_list = [7.e14, 4.e14, 2.e14, 1.e14]  # mass in Msun/h
    nfiles = 5  # number of files per item in mass list

    image_size = 512  # pixels
    sky_level = 1.e2  # ADU / arcsec^2

    psf_D = 2.4  # meters
    psf_lam = 900.0  # nanometers; note that OpticalPSF will automatically convert units to
    # get lam/diam in units of arcsec, unless told otherwise.  In this case,
    # that is (900e-9m / 2.4m) * 206265 arcsec/rad = 0.077 arcsec.
    psf_obsc = 0.125  # (0.3m / 2.4m) = 0.125
    psf_nstruts = 4
    psf_strut_thick = 0.07
    psf_strut_angle = 15 * galsim.degrees

    psf_defocus = 0.04  # The aberrations are all taken to be quite modest here.
    psf_astig1 = 0.03  # (I don't actually know what are appropriate for HST...)
    psf_astig2 = -0.01
    psf_coma1 = 0.02
    psf_coma2 = 0.04
    psf_trefoil1 = -0.02
    psf_trefoil2 = 0.04

    gal_r_min = 0.05  # arcsec
    gal_r_max = 0.20  # arcsec
    gal_h_over_r_min = 0.1  #
    gal_h_over_r_max = 0.2  #
    gal_flux_min = 1.e4  # ADU
    gal_flux_max = 1.e6  # ADU

    field_g1 = 0.03  # The field shear is some cosmic shear applied to the whole field,
    field_g2 = 0.01  # taken to be behind the foreground NFW halo.
    nfw_conc = 4  # concentration parameter = virial radius / NFW scale radius
    nfw_z_halo = 0.3  # redshift of the halo
    nfw_z_source = 0.6  # redshift of the lensed sources
    omega_m = 0.3  # Omega matter for the background cosmology.
    omega_lam = 0.7  # Omega lambda for the background cosmology.

    field_shear = galsim.Shear(g1=field_g1, g2=field_g2)

    random_seed = 8383721

    logger.info('Starting demo script 9')

    def build_file(file_name, mass, nobj, ud, truth_file_name, halo_id,
                   first_obj_id):
        """A function that does all the work to build a single file.
           Returns the total time taken.
        """
        t1 = time.time()

        # Build the image onto which we will draw the galaxies.
        full_image = galsim.ImageF(image_size, image_size)

        # The "true" center of the image is allowed to be halfway between two pixels, as is the
        # case for even-sized images.  full_image.center is an integer position,
        # which would be 1/2 pixel up and to the right of the true center in this case.
        im_center = full_image.true_center

        # For the WCS, this time we use UVFunction, which lets you define arbitrary u(x,y)
        # and v(x,y) functions.  We use a simple cubic radial function to create a
        # pincushion distortion.  This is a typical kind of telescope distortion, although
        # we exaggerate the magnitude of the effect to make it more apparent.
        # The pixel size in the center of the image is 0.05, but near the corners (r=362),
        # the pixel size is approximately 0.075, which is much more distortion than is
        # normally present in typical telescopes.  But it makes the effect of the variable
        # pixel area obvious when you look at the weight image in the output files.
        ufunc1 = lambda x, y: 0.05 * x * (1. + 2.e-6 * (x**2 + y**2))
        vfunc1 = lambda x, y: 0.05 * y * (1. + 2.e-6 * (x**2 + y**2))

        # It's not required to provide the inverse functions.  However, if we don't, then
        # you will only be able to do toWorld operations, not the inverse toImage.
        # The inverse function does not have to be exact either.  For example, you could provide
        # a function that does some kind of iterative solution to whatever accuracy you care
        # about.  But in this case, we can do the exact inverse.
        #
        # Let w = sqrt(u**2 + v**2) and r = sqrt(x**2 + y**2).  Then the solutions are:
        # x = (u/w) r and y = (u/w) r, and we use Cardano's method to solve for r given w:
        # See http://en.wikipedia.org/wiki/Cubic_function#Cardano.27s_method
        #
        # w = 0.05 r + 2.e-6 * 0.05 * r**3
        # r = 100 * ( ( 5 sqrt(w**2 + 5.e3/27) + 5 w )**(1./3.) -
        #           - ( 5 sqrt(w**2 + 5.e3/27) - 5 w )**(1./3.) )

        def xfunc1(u, v):
            import math
            wsq = u * u + v * v
            if wsq == 0.:
                return 0.
            else:
                w = math.sqrt(wsq)
                temp = 5. * math.sqrt(wsq + 5.e3 / 27)
                r = 100. * ((temp + 5 * w)**(1. / 3.) -
                            (temp - 5 * w)**(1. / 3))
                return u * r / w

        def yfunc1(u, v):
            import math
            wsq = u * u + v * v
            if wsq == 0.:
                return 0.
            else:
                w = math.sqrt(wsq)
                temp = 5. * math.sqrt(wsq + 5.e3 / 27)
                r = 100. * ((temp + 5 * w)**(1. / 3.) -
                            (temp - 5 * w)**(1. / 3))
                return v * r / w

        # You could pass the above functions to UVFunction, and normally we would do that.
        # The only down side to doing so is that the specification of the WCS in the FITS
        # file is rather ugly.  GalSim is able to turn the python byte code into strings,
        # but they are basically a really ugly mess of random-looking characters.  GalSim
        # will be able to read it back in, but human readers will have no idea what WCS
        # function was used.  To see what they look like, uncomment this line and comment
        # out the later wcs line.
        #wcs = galsim.UVFunction(ufunc1, vfunc1, xfunc1, yfunc1, origin=im_center)

        # If you provide the functions as strings, then those strings will be preserved
        # in the FITS header in a form that is more legible to human readers.
        # It also has the extra benefit of matching the output from demo9.yaml, which we
        # always try to do.  The config file has no choice but to specify the functions
        # as strings.

        ufunc = '0.05 * x * (1. + 2.e-6 * (x**2 + y**2))'
        vfunc = '0.05 * y * (1. + 2.e-6 * (x**2 + y**2))'
        xfunc = (
            '( lambda w: ( 0 if w==0 else ' +
            '100.*u/w*(( 5*(w**2 + 5.e3/27.)**0.5 + 5*w )**(1./3.) - ' +
            '( 5*(w**2 + 5.e3/27.)**0.5 - 5*w )**(1./3.))))( (u**2+v**2)**0.5 )'
        )
        yfunc = (
            '( lambda w: ( 0 if w==0 else ' +
            '100.*v/w*(( 5*(w**2 + 5.e3/27.)**0.5 + 5*w )**(1./3.) - ' +
            '( 5*(w**2 + 5.e3/27.)**0.5 - 5*w )**(1./3.))))( (u**2+v**2)**0.5 )'
        )

        # The origin parameter defines where on the image should be considered (x,y) = (0,0)
        # in the WCS functions.
        wcs = galsim.UVFunction(ufunc, vfunc, xfunc, yfunc, origin=im_center)

        # Assign this wcs to full_image
        full_image.wcs = wcs

        # The weight image will hold the inverse variance for each pixel.
        # We can set the wcs directly on construction with the wcs parameter.
        weight_image = galsim.ImageF(image_size, image_size, wcs=wcs)

        # It is common for astrometric images to also have a bad pixel mask.  We don't have any
        # defect simulation currently, so our bad pixel masks are currently all zeros.
        # But someday, we plan to add defect functionality to GalSim, at which point, we'll
        # be able to mark those defects on a bad pixel mask.
        # Note: the S in ImageS means to use "short int" for the data type.
        # This is a typical choice for a bad pixel image.
        badpix_image = galsim.ImageS(image_size, image_size, wcs=wcs)

        # We also draw a PSF image at the location of every galaxy.  This isn't normally done,
        # and since some of the PSFs overlap, it's not necessarily so useful to have this kind
        # of image.  But in this case, it's fun to look at the psf image, especially with
        # something like log scaling in ds9 to see how crazy an aberrated OpticalPSF with
        # struts can look when there is no atmospheric component to blur it out.
        psf_image = galsim.ImageF(image_size, image_size, wcs=wcs)

        # We will also write some truth information to an output catalog.
        # In real simulations, it is often useful to have a catalog of the truth values
        # to compare to measurements either directly or as cuts on the galaxy sample to
        # find where systematic errors are largest.
        # For now, we just make an empty OutputCatalog object with the names and types of the
        # columns.
        names = [
            'object_id', 'halo_id', 'flux', 'radius', 'h_over_r',
            'inclination.rad', 'theta.rad', 'mu', 'redshift', 'shear.g1',
            'shear.g2', 'pos.x', 'pos.y', 'image_pos.x', 'image_pos.y',
            'halo_mass', 'halo_conc', 'halo_redshift'
        ]
        types = [
            int, int, float, float, float, float, float, float, float, float,
            float, float, float, float, float, float, float, float
        ]
        truth_cat = galsim.OutputCatalog(names, types)

        # Setup the NFWHalo stuff:
        nfw = galsim.NFWHalo(mass=mass,
                             conc=nfw_conc,
                             redshift=nfw_z_halo,
                             omega_m=omega_m,
                             omega_lam=omega_lam)
        # Note: the last two are optional.  If they are omitted, then (omega_m=0.3, omega_lam=0.7)
        # are actually the defaults.  If you only specify one of them, the other is set so that
        # the total is 1.  But you can define both values so that the total is not 1 if you want.
        # Radiation is assumed to be zero and dark energy equation of state w = -1.
        # If you want to include either radiation or more complicated dark energy models,
        # you can define your own cosmology class that defines the functions a(z), E(a), and
        # Da(z_source, z_lens).  Then you can pass this to NFWHalo as a `cosmo` parameter.

        # Make the PSF profile outside the loop to minimize the (significant) OpticalPSF
        # construction overhead.
        psf = galsim.OpticalPSF(lam=psf_lam,
                                diam=psf_D,
                                obscuration=psf_obsc,
                                nstruts=psf_nstruts,
                                strut_thick=psf_strut_thick,
                                strut_angle=psf_strut_angle,
                                defocus=psf_defocus,
                                astig1=psf_astig1,
                                astig2=psf_astig2,
                                coma1=psf_coma1,
                                coma2=psf_coma2,
                                trefoil1=psf_trefoil1,
                                trefoil2=psf_trefoil2)

        for k in range(nobj):

            # Determine where this object is going to go.
            # We choose points randomly within a donut centered at the center of the main image
            # in order to avoid placing galaxies too close to the halo center where the lensing
            # is not weak.  We use an inner radius of 3 arcsec and an outer radius of 21 arcsec,
            # which is large enough to cover all the way to the corners, although we'll need
            # to watch out for galaxies that are fully off the edge of the image.
            radius = 21
            inner_radius = 3
            max_rsq = radius**2
            min_rsq = inner_radius**2
            while True:  # (This is essentially a do..while loop.)
                x = (2. * ud() - 1) * radius
                y = (2. * ud() - 1) * radius
                rsq = x**2 + y**2
                if rsq >= min_rsq and rsq <= max_rsq: break
            pos = galsim.PositionD(x, y)

            # We also need the position in pixels to determine where to place the postage
            # stamp on the full image.
            image_pos = wcs.toImage(pos)

            # Draw the flux from a power law distribution: N(f) ~ f^-1.5
            # For this, we use the class DistDeviate which can draw deviates from an arbitrary
            # probability distribution.  This distribution can be defined either as a functional
            # form as we do here, or as tabulated lists of x and p values, from which the
            # function is interpolated.
            flux_dist = galsim.DistDeviate(ud,
                                           function=lambda x: x**-1.5,
                                           x_min=gal_flux_min,
                                           x_max=gal_flux_max)
            flux = flux_dist()

            # We introduce here another surface brightness profile, called InclinedExponential.
            # It represents a typical 3D galaxy disk profile inclined at an arbitrary angle
            # relative to face on.
            #
            #     inclination =  0 degrees corresponds to a face-on disk, which is equivalent to
            #                             the regular Exponential profile.
            #     inclination = 90 degrees corresponds to an edge-on disk.
            #
            # A random orientation corresponds to the inclination angle taking the probability
            # distribution:
            #
            #     P(inc) = 0.5 sin(inc)
            #
            # so we again use a DistDeviate to generate these values.
            inc_dist = galsim.DistDeviate(ud,
                                          function=lambda x: 0.5 * math.sin(x),
                                          x_min=0,
                                          x_max=math.pi)
            inclination = inc_dist() * galsim.radians

            # The parameters scale_radius and scale_height give the scale distances in the
            # 3D distribution:
            #
            #     I(R,z) = I_0 / (2 scale_height) * sech^2(z/scale_height) * exp(-r/scale_radius)
            #
            # These values can be given separately if desired.  However, it is often easier to
            # give the ratio scale_h_over_r as an independent value, since the radius and height
            # values are correlated, while h/r is approximately independent of h or r.
            h_over_r = ud() * (gal_h_over_r_max -
                               gal_h_over_r_min) + gal_h_over_r_min

            radius = ud() * (gal_r_max - gal_r_min) + gal_r_min

            # The inclination is around the x-axis, so we want to rotate the galaxy by a
            # random angle.
            theta = ud() * math.pi * 2. * galsim.radians

            # Make the galaxy profile with these values:
            gal = galsim.InclinedExponential(scale_radius=radius,
                                             scale_h_over_r=h_over_r,
                                             inclination=inclination,
                                             flux=flux)
            gal = gal.rotate(theta)

            # Now apply the appropriate lensing effects for this position from
            # the NFW halo mass.
            try:
                g1, g2 = nfw.getShear(pos, nfw_z_source)
                nfw_shear = galsim.Shear(g1=g1, g2=g2)
            except:
                # This shouldn't happen, since we exclude the inner 10 arcsec, but it's a
                # good idea to use the try/except block here anyway.
                import warnings
                warnings.warn(
                    "Warning: NFWHalo shear is invalid -- probably strong lensing!  "
                    + "Using shear = 0.")
                nfw_shear = galsim.Shear(g1=0, g2=0)

            nfw_mu = nfw.getMagnification(pos, nfw_z_source)
            if nfw_mu < 0:
                import warnings
                warnings.warn(
                    "Warning: mu < 0 means strong lensing!  Using mu=25.")
                nfw_mu = 25
            elif nfw_mu > 25:
                import warnings
                warnings.warn(
                    "Warning: mu > 25 means strong lensing!  Using mu=25.")
                nfw_mu = 25

            # Calculate the total shear to apply
            # Since shear addition is not commutative, it is worth pointing out that
            # the order is in the sense that the second shear is applied first, and then
            # the first shear.  i.e. The field shear is taken to be behind the cluster.
            # Kind of a cosmic shear contribution between the source and the cluster.
            # However, this is not quite the same thing as doing:
            #     gal.shear(field_shear).shear(nfw_shear)
            # since the shear addition ignores the rotation that would occur when doing the
            # above lines.  This is normally ok, because the rotation is not observable, but
            # it is worth keeping in mind.
            total_shear = nfw_shear + field_shear

            # Apply the magnification and shear to the galaxy
            gal = gal.magnify(nfw_mu)
            gal = gal.shear(total_shear)

            # Build the final object
            final = galsim.Convolve([gal, psf])

            # Draw the stamp image
            # This will construct an appropriately sized postage-stamp image with the galaxy
            # drawn near the center of the image.  The bounds of the postage stamp will be set
            # such that the given center is close to the stamp center.  And the galaxy will be drawn
            # centered at that sub-pixel location on the image.
            # We also need to provide the local wcs at the current position.
            local_wcs = wcs.local(image_pos)
            stamp = final.drawImage(wcs=local_wcs, center=image_pos)

            # Find overlapping bounds
            bounds = stamp.bounds & full_image.bounds
            # If there is no overlap, then the intersection comes out as not defined, which we
            # can check with bounds.isDefined().
            if not bounds.isDefined():
                logger.info(
                    "object %d is fully off the edge of the image.  Skipping this one.",
                    k)
                continue
            full_image[bounds] += stamp[bounds]

            # Also draw the PSF
            psf_stamp = galsim.ImageF(
                stamp.bounds)  # Use same bounds as galaxy stamp
            psf.drawImage(psf_stamp, wcs=local_wcs, center=image_pos)
            psf_image[bounds] += psf_stamp[bounds]

            # Add the truth information for this object to the truth catalog
            row = ((first_obj_id + k), halo_id, flux, radius, h_over_r,
                   inclination.rad, theta.rad, nfw_mu, nfw_z_source,
                   total_shear.g1, total_shear.g2, pos.x, pos.y, image_pos.x,
                   image_pos.y, mass, nfw_conc, nfw_z_halo)
            truth_cat.addRow(row)

        # Add Poisson noise to the full image
        # Note: The normal calculation of Poission noise isn't quite correct right now.
        # The pixel area is variable, which means the amount of sky flux that enters each
        # pixel is also variable.  The wcs classes have a function `makeSkyImage` which
        # will fill an image with the correct amount of sky flux given the sky level
        # in units of ADU/arcsec^2.  We use the weight image as our work space for this.
        wcs.makeSkyImage(weight_image, sky_level)

        # Add this to the current full_image (temporarily).
        full_image += weight_image

        # Add Poisson noise, given the current full_image.
        full_image.addNoise(galsim.PoissonNoise(ud))

        # Subtract the sky back off.
        full_image -= weight_image

        # The weight image is nominally the inverse variance of the pixel noise.  However, it is
        # common to exclude the Poisson noise from the objects themselves and only include the
        # noise from the sky photons.  The variance of the noise is just the sky level, which is
        # what is currently in the weight_image.  (If we wanted to include the variance from the
        # objects too, then we could use the full_image before we added the PoissonNoise to it.)
        # So all we need to do now is to invert the values in weight_image.
        weight_image.invertSelf()

        # Write the file to disk:
        galsim.fits.writeMulti(
            [full_image, badpix_image, weight_image, psf_image], file_name)

        # And write the truth catalog file
        truth_cat.write(truth_file_name)

        t2 = time.time()
        return t2 - t1

    def worker(input, output):
        """input is a queue with (args, info) tuples:
               args are the arguments to pass to build_file
               info is passed along to the output queue.
           output is a queue storing (result, info, proc) tuples:
               result is the return value of from build_file
               info is passed through from the input queue.
               proc is the process name.
        """
        for (args, info) in iter(input.get, 'STOP'):
            result = build_file(*args)
            output.put((result, info, current_process().name))

    t1 = time.time()

    ntot = nfiles * len(mass_list)

    try:
        from multiprocessing import cpu_count
        ncpu = cpu_count()
        if ncpu > ntot:
            nproc = ntot
        else:
            nproc = ncpu
        logger.info("ncpu = %d.  Using %d processes", ncpu, nproc)
    except:
        nproc = 2
        logger.info("Unable to determine ncpu.  Using %d processes", nproc)

    # Make output directory if not already present.
    if not os.path.isdir('output'):
        os.mkdir('output')

    # Set up the task list
    task_queue = Queue()
    seed = random_seed
    halo_id = 0
    first_obj_id = 0
    for i in range(len(mass_list)):
        mass = mass_list[i]
        dir_name = "nfw%d" % (i + 1)
        dir = os.path.join('output', dir_name)
        if not os.path.isdir(dir): os.mkdir(dir)
        for j in range(nfiles):
            file_name = "cluster%04d.fits" % j
            file_name = os.path.join(dir, file_name)
            truth_file_name = "truth%04d.dat" % j
            truth_file_name = os.path.join(dir, truth_file_name)

            # Each image has a different number of objects.
            # We use a random number from 15 to 30.
            ud = galsim.UniformDeviate(seed)
            min = 15
            max = 30
            nobj = int(math.floor(ud() * (max - min + 1))) + min
            logger.info('Number of objects for %s = %d', file_name, nobj)

            # We put on the task queue the args to the buld_file function and
            # some extra info to pass through to the output queue.
            # Our extra info is just the file name that we use to write out which file finished.
            args = (file_name, mass, nobj, ud, truth_file_name, halo_id,
                    first_obj_id)
            task_queue.put((args, file_name))
            # Need to step by the number of galaxies in each file to match the behavior
            # of the config parser.
            seed += nobj
            halo_id += 1
            first_obj_id += nobj

    # Run the tasks
    # Each Process command starts up a parallel process that will keep checking the queue
    # for a new task. If there is one there, it grabs it and does it. If not, it waits
    # until there is one to grab. When it finds a 'STOP', it shuts down.
    done_queue = Queue()
    for k in range(nproc):
        Process(target=worker, args=(task_queue, done_queue)).start()

    # In the meanwhile, the main process keeps going.  We pull each image off of the
    # done_queue and put it in the appropriate place on the main image.
    # This loop is happening while the other processes are still working on their tasks.
    # You'll see that these logging statements get print out as the stamp images are still
    # being drawn.
    for i in range(ntot):
        result, info, proc = done_queue.get()
        file_name = info
        t = result
        logger.info('%s: Time for file %s was %f', proc, file_name, t)

    # Stop the processes
    # The 'STOP's could have been put on the task list before starting the processes, or you
    # can wait.  In some cases it can be useful to clear out the done_queue (as we just did)
    # and then add on some more tasks.  We don't need that here, but it's perfectly fine to do.
    # Once you are done with the processes, putting nproc 'STOP's will stop them all.
    # This is important, because the program will keep running as long as there are running
    # processes, even if the main process gets to the end.  So you do want to make sure to
    # add those 'STOP's at some point!
    for k in range(nproc):
        task_queue.put('STOP')

    t2 = time.time()

    logger.info('Total time taken using %d processes = %f', nproc, t2 - t1)
コード例 #13
0
ファイル: make_default_input.py プロジェクト: mjuric/GalSim
def make_default_input():

    # Set the PSF catalogue values
    moffat_beta = np.zeros(NOBJECTS) + MOFFAT_BETA
    moffat_fwhm = np.zeros(NOBJECTS) + MOFFAT_FWHM
    moffat_e1 = np.zeros(NOBJECTS) + MOFFAT_E1
    moffat_e2 = np.zeros(NOBJECTS) + MOFFAT_E2
    moffat_trunc = np.zeros(NOBJECTS) + MOFFAT_TRUNCATIONFWHM * MOFFAT_FWHM
    # Then set the exponential disc catalogue fixed values
    exponential_hlr = np.zeros(NOBJECTS) + EXPONENTIAL_HLR
    # Then set the dVc bulge catalogue fixed values
    devaucouleurs_hlr = np.zeros(NOBJECTS) + DEVAUCOULEURS_HLR
    # Then set up the Gaussian RNG for making the ellipticity values
    urng = galsim.UniformDeviate(RNG_SEED)
    edist = galsim.GaussianDeviate(urng, sigma=EXPONENTIAL_DEVAUCOULEURS_SIGMA_E)
    # Slightly hokey way of making vectors of Gaussian deviates, using images... No direct NumPy
    # array-filling with galsim RNGs at the moment.
    #
    # In GREAT08 these galaxy ellipticies were made in rotated pairs to reduce shape noise, but for
    # this illustrative default file we do not do this.
    ime1 = galsim.ImageD(NOBJECTS, 1)
    ime1.addNoise(edist)
    exponential_e1 = ime1.array.flatten()
    ime2 = galsim.ImageD(NOBJECTS, 1)
    ime2.addNoise(edist)
    exponential_e2 = ime2.array.flatten()
    # Make galaxies co-elliptical
    devaucouleurs_e1 = exponential_e1
    devaucouleurs_e2 = exponential_e2

    # Add a centroid shift in drawn uniform randomly from the unit circle around (0., 0.)
    dx = np.empty(NOBJECTS)
    dy = np.empty(NOBJECTS)
    for i in xrange(NOBJECTS):
        # Apply a random centroid shift:
        rsq = 2 * GAL_CENTROID_SHIFT_RADIUS_SQUARED
        while (rsq > GAL_CENTROID_SHIFT_RADIUS_SQUARED):
            dx[i] = (2. * urng() - 1.) * GAL_CENTROID_SHIFT_RADIUS
            dy[i] = (2. * urng() - 1.) * GAL_CENTROID_SHIFT_RADIUS
            rsq = dx[i]**2 + dy[i]**2

    # Then write this to file
    path, modfile = os.path.split(__file__)
    outfile = os.path.join(path, "galsim_default_input.asc")
    # Make a nice header with the default fields described
    header = ("# psf.beta  psf.fwhm  psf.e1  psf.e2  psf.trunc"+
              "  disk.hlr  disk.e1  disk.e2"+
              "  bulge.hlr  bulge.e1  bulge.e2"+
              "  gal.shift.dx  gal.shift.dy \n")
    # Open the file and output the columns in the correct order, row-by-row
    output = open(outfile, "w")
    output.write("#  galsim_default_input.asc : illustrative default input catalog for GalSim\n")
    output.write("#\n")
    output.write(header)
    for i in xrange(NOBJECTS):
        outline = (" %6.2f  %6.2f  %7.3f  %7.3f  %6.2f  %6.2f  %14.7f  %14.7f "+
                   "%6.2f  %14.7f  %14.7f  %14.7f  %14.7f\n") % \
            (moffat_beta[i], moffat_fwhm[i], moffat_e1[i], moffat_e2[i], moffat_trunc[i],
             exponential_hlr[i], exponential_e1[i], exponential_e2[i],
             devaucouleurs_hlr[i], devaucouleurs_e1[i], devaucouleurs_e2[i], dx[i], dy[i])
        output.write(outline)
    output.close()
コード例 #14
0
def main(argv):
    """
    Make images to be used for characterizing the brighter-fatter effect
      - Each fits file is 5 x 5 postage stamps.
      - Each postage stamp is 40 x 40 pixels.
      - There are 3 sets of 5 images each.  The 5 images are at 5 different flux levels
      - The three sets are (bf_1) B-F off, (bf_2) B-F on, diffusion off, (bf_3) B-F and diffusion on
      - Each image is in output/bf_set/bf_nfile.fits, where set ranges from 1-3 and nfile ranges from 1-5.
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("bf_plots")

    # Add the wavelength info
    bppath = "../../share/bandpasses/"
    sedpath = "../../share/"
    sed = galsim.SED(os.path.join(sedpath, 'CWW_E_ext.sed'), 'nm',
                     'flambda').thin()

    # Add the directions (seems to work - CL)
    fratio = 1.2
    obscuration = 0.2
    seed = 12345
    assigner = galsim.FRatioAngles(fratio, obscuration, seed)
    bandpass = galsim.Bandpass(os.path.join(bppath, 'LSST_r.dat'), 'nm').thin()
    rng3 = galsim.BaseDeviate(1234)
    sampler = galsim.WavelengthSampler(sed, bandpass, rng3)

    # Define some parameters we'll use below.
    # Normally these would be read in from some parameter file.

    nx_tiles = 10  #
    ny_tiles = 10  #
    stamp_xsize = 40  #
    stamp_ysize = 40  #

    random_seed = 6424512  #

    pixel_scale = 0.2  # arcsec / pixel
    sky_level = 0.01  # ADU / arcsec^2

    # Make output directory if not already present.
    if not os.path.isdir('output'):
        os.mkdir('output')

    gal_sigma = 0.2  # arcsec
    psf_sigma = 0.01  # arcsec
    pixel_scale = 0.2  # arcsec / pixel
    noise = 0.01  # standard deviation of the counts in each pixel

    shift_radius = 0.2  # arcsec (=pixels)

    logger.info('Starting bf_plots using:')
    logger.info('    - image with %d x %d postage stamps', nx_tiles, ny_tiles)
    logger.info('    - postage stamps of size %d x %d pixels', stamp_xsize,
                stamp_ysize)
    logger.info('    - Centroid shifts up to = %.2f pixels', shift_radius)

    rng = galsim.BaseDeviate(5678)
    sensor1 = galsim.Sensor()
    sensor2 = galsim.SiliconSensor(rng=rng, diffusion_factor=0.0)
    sensor3 = galsim.SiliconSensor(rng=rng)

    for set in range(1, 4):
        starttime = time.time()
        exec("sensor = sensor%d" % set)
        for nfile in range(1, 6):
            # Make bf_x directory if not already present.
            if not os.path.isdir('output/bf_%d' % set):
                os.mkdir('output/bf_%d' % set)

            gal_file_name = os.path.join('output',
                                         'bf_%d/bf_%d.fits' % (set, nfile))
            sex_file_name = os.path.join(
                'output', 'bf_%d/bf_%d_SEX.fits.cat.reg' % (set, nfile))
            sexfile = open(sex_file_name, 'w')
            gal_flux = 2.0e5 * nfile  # total counts on the image
            # Define the galaxy profile
            gal = galsim.Gaussian(flux=gal_flux, sigma=gal_sigma)
            logger.debug('Made galaxy profile')

            # Define the PSF profile
            psf = galsim.Gaussian(
                flux=1., sigma=psf_sigma)  # PSF flux should always = 1
            logger.debug('Made PSF profile')

            # This profile is placed with different orientations and noise realizations
            # at each postage stamp in the gal image.
            gal_image = galsim.ImageF(stamp_xsize * nx_tiles - 1,
                                      stamp_ysize * ny_tiles - 1,
                                      scale=pixel_scale)
            psf_image = galsim.ImageF(stamp_xsize * nx_tiles - 1,
                                      stamp_ysize * ny_tiles - 1,
                                      scale=pixel_scale)

            shift_radius_sq = shift_radius**2

            first_in_pair = True  # Make pairs that are rotated by 90 degrees

            k = 0
            for iy in range(ny_tiles):
                for ix in range(nx_tiles):
                    # The normal procedure for setting random numbers in GalSim is to start a new
                    # random number generator for each object using sequential seed values.
                    # This sounds weird at first (especially if you were indoctrinated by Numerical
                    # Recipes), but for the boost random number generator we use, the "random"
                    # number sequences produced from sequential initial seeds are highly uncorrelated.
                    #
                    # The reason for this procedure is that when we use multiple processes to build
                    # our images, we want to make sure that the results are deterministic regardless
                    # of the way the objects get parcelled out to the different processes.
                    #
                    # Of course, this script isn't using multiple processes, so it isn't required here.
                    # However, we do it nonetheless in order to get the same results as the config
                    # version of this demo script (demo5.yaml).
                    ud = galsim.UniformDeviate(random_seed + k)

                    # Any kind of random number generator can take another RNG as its first
                    # argument rather than a seed value.  This makes both objects use the same
                    # underlying generator for their pseudo-random values.
                    #gd = galsim.GaussianDeviate(ud, sigma=gal_ellip_rms)

                    # The -1's in the next line are to provide a border of
                    # 1 pixel between postage stamps
                    b = galsim.BoundsI(ix * stamp_xsize + 1,
                                       (ix + 1) * stamp_xsize - 1,
                                       iy * stamp_ysize + 1,
                                       (iy + 1) * stamp_ysize - 1)
                    sub_gal_image = gal_image[b]
                    sub_psf_image = psf_image[b]

                    # Great08 randomized the locations of the two galaxies in each pair,
                    # but for simplicity, we just do them in sequential postage stamps.

                    if first_in_pair:
                        # Use a random orientation:
                        beta = ud() * 2. * math.pi * galsim.radians

                        # Determine the ellipticity to use for this galaxy.
                        ellip = 0.0
                        first_in_pair = False
                    else:
                        # Use the previous ellip and beta + 90 degrees
                        beta += 90 * galsim.degrees
                        first_in_pair = True

                    # Make a new copy of the galaxy with an applied e1/e2-type distortion
                    # by specifying the ellipticity and a real-space position angle
                    this_gal = gal  #gal.shear(e=ellip, beta=beta)

                    # Apply a random shift_radius:
                    rsq = 2 * shift_radius_sq
                    while (rsq > shift_radius_sq):
                        dx = (2 * ud() - 1) * shift_radius
                        dy = (2 * ud() - 1) * shift_radius
                        rsq = dx**2 + dy**2

                    this_gal = this_gal.shift(dx, dy)
                    # Note that the shifted psf that we create here is purely for the purpose of being able
                    # to draw a separate, shifted psf image.  We do not use it when convolving the galaxy
                    # with the psf.
                    this_psf = psf.shift(dx, dy)

                    # Make the final image, convolving with the (unshifted) psf
                    final_gal = galsim.Convolve([psf, this_gal])

                    # Draw the image

                    if ix == 0 and iy == 0:
                        final_gal.drawImage(sub_gal_image,
                                            method='phot',
                                            sensor=sensor,
                                            surface_ops=[sampler, assigner],
                                            rng=rng,
                                            save_photons=True)
                        photon_file = os.path.join(
                            'output',
                            'bf_%d/bf_%d_nx_%d_ny_%d_photon_file.fits' %
                            (set, nfile, ix, iy))
                        sub_gal_image.photons.write(photon_file)
                    else:
                        final_gal.drawImage(sub_gal_image,
                                            method='phot',
                                            sensor=sensor,
                                            surface_ops=[sampler, assigner],
                                            rng=rng)

                    # Now add an appropriate amount of noise to get our desired S/N
                    # There are lots of definitions of S/N, but here is the one used by Great08
                    # We use a weighted integral of the flux:
                    #   S = sum W(x,y) I(x,y) / sum W(x,y)
                    #   N^2 = Var(S) = sum W(x,y)^2 Var(I(x,y)) / (sum W(x,y))^2
                    # Now we assume that Var(I(x,y)) is constant so
                    #   Var(I(x,y)) = noise_var
                    # We also assume that we are using a matched filter for W, so W(x,y) = I(x,y).
                    # Then a few things cancel and we find that
                    # S/N = sqrt( sum I(x,y)^2 / noise_var )
                    #
                    # The above procedure is encapsulated in the function image.addNoiseSNR which
                    # sets the flux appropriately given the variance of the noise model.
                    # In our case, noise_var = sky_level_pixel
                    sky_level_pixel = sky_level * pixel_scale**2
                    noise = galsim.PoissonNoise(ud, sky_level=sky_level_pixel)
                    #sub_gal_image.addNoiseSNR(noise, gal_signal_to_noise)

                    # Draw the PSF image
                    # No noise on PSF images.  Just draw it as is.
                    this_psf.drawImage(sub_psf_image)

                    # For first instance, measure moments
                    """
                    if ix==0 and iy==0:
                        psf_shape = sub_psf_image.FindAdaptiveMom()
                        temp_e = psf_shape.observed_shape.e
                        if temp_e > 0.0:
                            g_to_e = psf_shape.observed_shape.g / temp_e
                        else:
                            g_to_e = 0.0
                        logger.info('Measured best-fit elliptical Gaussian for first PSF image: ')
                        logger.info('  g1, g2, sigma = %7.4f, %7.4f, %7.4f (pixels)',
                                    g_to_e*psf_shape.observed_shape.e1,
                                    g_to_e*psf_shape.observed_shape.e2, psf_shape.moments_sigma)
                    """
                    x = b.center().x
                    y = b.center().y
                    logger.info(
                        'Galaxy (%d,%d): center = (%.0f,%0.f)  (e,beta) = (%.4f,%.3f)',
                        ix, iy, x, y, ellip, beta / galsim.radians)
                    k = k + 1
                    sexline = 'circle %f %f %f\n' % (x + dx / pixel_scale,
                                                     y + dy / pixel_scale,
                                                     gal_sigma / pixel_scale)
                    sexfile.write(sexline)

            sexfile.close()
            logger.info('Done making images of postage stamps')

            # Now write the images to disk.
            #psf_image.write(psf_file_name)
            #logger.info('Wrote PSF file %s',psf_file_name)

            gal_image.write(gal_file_name)
            logger.info(
                'Wrote image to %r',
                gal_file_name)  # using %r adds quotes around filename for us

        finishtime = time.time()
        print("Time to complete set %d = %.2f seconds\n" %
              (set, finishtime - starttime))
コード例 #15
0
def test_fit():
    """Test fitting values to a Zernike series, using the ZernikeBasis function"""
    u = galsim.UniformDeviate(161803)
    for i in range(10):
        x = np.empty((1000, ), dtype=np.float)
        y = np.empty((1000, ), dtype=np.float)
        u.generate(x)
        u.generate(y)
        x -= 0.5
        y -= 0.5
        R_outer = (i % 5 / 5.0) + 1
        R_inner = ((i % 3 / 6.0) + 0.1) * (R_outer)
        x *= R_outer
        y *= R_outer

        # Should be able to fit quintic polynomial by including Zernikes up to Z_21
        cartesian_coefs = [[
            u() - 0.5, u() - 0.5,
            u() - 0.5, u() - 0.5,
            u() - 0.5
        ], [u() - 0.5, u() - 0.5,
            u() - 0.5, u() - 0.5, 0], [u() - 0.5,
                                       u() - 0.5,
                                       u() - 0.5, 0, 0],
                           [u() - 0.5, u() - 0.5, 0, 0, 0],
                           [u() - 0.5, 0, 0, 0, 0]]
        z = galsim.utilities.horner2d(x, y, cartesian_coefs)
        z2 = galsim.utilities.horner2d(x, y, cartesian_coefs, triangle=True)
        np.testing.assert_equal(z, z2)

        basis = galsim.zernike.zernikeBasis(21,
                                            x,
                                            y,
                                            R_outer=R_outer,
                                            R_inner=R_inner)
        coefs, _, _, _ = np.linalg.lstsq(basis.T, z, rcond=-1.)
        resids = (galsim.zernike.Zernike(
            coefs, R_outer=R_outer, R_inner=R_inner).evalCartesian(x, y) - z)
        resids2 = np.dot(basis.T, coefs).T - z
        assert resids.shape == x.shape
        assert resids2.shape == x.shape

        np.testing.assert_allclose(resids, 0, atol=1e-14)
        np.testing.assert_allclose(resids2, 0, atol=1e-14)

        # import matplotlib.pyplot as plt
        # fig, axes = plt.subplots(ncols=2, figsize=(8, 4))
        # scat1 = axes[0].scatter(x, y, c=z)
        # plt.colorbar(scat1, ax=axes[0])
        # scat2 = axes[1].scatter(x, y, c=resids)
        # plt.colorbar(scat2, ax=axes[1])
        # plt.show()
        # print(np.mean(resids), np.std(resids))

    # Should also work, and make congruent output, if the shapes of x and y are multi-dimensional
    for i in range(10):
        x = np.empty((1000, ), dtype=np.float)
        y = np.empty((1000, ), dtype=np.float)
        u.generate(x)
        u.generate(y)
        x -= 0.5
        y -= 0.5
        R_outer = (i % 5 / 5.0) + 1
        R_inner = ((i % 3 / 6.0) + 0.1) * (R_outer)
        x *= R_outer
        y *= R_outer
        x = x.reshape(25, 40)
        y = y.reshape(25, 40)

        # Should be able to fit quintic polynomial by including Zernikes up to Z_21
        cartesian_coefs = [[
            u() - 0.5, u() - 0.5,
            u() - 0.5, u() - 0.5,
            u() - 0.5
        ], [u() - 0.5, u() - 0.5,
            u() - 0.5, u() - 0.5, 0], [u() - 0.5,
                                       u() - 0.5,
                                       u() - 0.5, 0, 0],
                           [u() - 0.5, u() - 0.5, 0, 0, 0],
                           [u() - 0.5, 0, 0, 0, 0]]
        z = galsim.utilities.horner2d(x, y, cartesian_coefs)
        assert z.shape == (25, 40)
        z2 = galsim.utilities.horner2d(x, y, cartesian_coefs, triangle=True)
        np.testing.assert_equal(z, z2)

        basis = galsim.zernike.zernikeBasis(21,
                                            x,
                                            y,
                                            R_outer=R_outer,
                                            R_inner=R_inner)
        assert basis.shape == (22, 25, 40)
        # lstsq doesn't handle the extra dimension though...
        coefs, _, _, _ = np.linalg.lstsq(basis.reshape(21 + 1, 1000).T,
                                         z.ravel(),
                                         rcond=-1.)
        resids = (galsim.zernike.Zernike(
            coefs, R_outer=R_outer, R_inner=R_inner).evalCartesian(x, y) - z)
        resids2 = np.dot(basis.T, coefs).T - z
        assert resids.shape == resids2.shape == x.shape

        np.testing.assert_allclose(resids, 0, atol=1e-14)
        np.testing.assert_allclose(resids2, 0, atol=1e-14)
コード例 #16
0
#

import cPickle
import numpy as np
import galsim
import matplotlib.pyplot as plt

# For information on where to download the .pkl file below, see the python script
# `devel/external/hst/make_cosmos_cfimage.py`
NOISEIMFILE = "acs_I_unrot_sci_20_noisearrays.pkl"  # Input pickled list filename
CFIMFILE_SUB = "acs_I_unrot_sci_20_cf_subtracted.fits" # Output image of correlation function (sub)
CFIMFILE_UNS = "acs_I_unrot_sci_20_cf_unsubtracted.fits" # Output image of correlation function

RSEED = 12334566

ud = galsim.UniformDeviate(RSEED)

# Case 1: subtract_mean=True; Case 2: subtract_mean=False
cn1 = galsim.getCOSMOSNoise(ud, CFIMFILE_SUB, dx_cosmos=1.)
cn2 = galsim.getCOSMOSNoise(ud, CFIMFILE_UNS, dx_cosmos=1.)

testim1 = galsim.ImageD(7, 7)
testim2 = galsim.ImageD(7, 7)
var1 = 0.
var2 = 0.

noisearrays = cPickle.load(open(NOISEIMFILE, 'rb'))
for noisearray, i in zip(noisearrays, range(len(noisearrays))):
    noise1 = galsim.ImageViewD((noisearray.copy()).astype(np.float64), scale=1.)
    noise2 = galsim.ImageViewD((noisearray.copy()).astype(np.float64), scale=1.)
    cn1.applyWhiteningTo(noise1)
コード例 #17
0
def test_gradient():
    # Start with a few that just quote the literature, e.g., Stephenson (2014).

    Z11 = galsim.zernike.Zernike([0] * 11 + [1])

    x = np.linspace(-1, 1, 100)
    x, y = np.meshgrid(x, x)

    def Z11_grad(x, y):
        # Z11 = sqrt(5) (6(x^2+y^2)^2 - 6(x^2+y^2)+1)
        r2 = x**2 + y**2
        gradx = 12 * np.sqrt(5) * x * (2 * r2 - 1)
        grady = 12 * np.sqrt(5) * y * (2 * r2 - 1)
        return gradx, grady

    # import matplotlib.pyplot as plt
    # fig, axes = plt.subplots(ncols=3, figsize=(12, 3))
    # scat0 = axes[0].scatter(x, y, c=Z11.evalCartesianGrad(x, y)[0])
    # fig.colorbar(scat0, ax=axes[0])
    # scat1 = axes[1].scatter(x, y, c=Z11_grad(x, y)[0])
    # fig.colorbar(scat1, ax=axes[1])
    # scat2 = axes[2].scatter(x, y, c=Z11.evalCartesianGrad(x, y)[0] - Z11_grad(x, y)[0])
    # fig.colorbar(scat2, ax=axes[2])
    # plt.show()

    np.testing.assert_allclose(Z11.evalCartesianGrad(x, y),
                               Z11_grad(x, y),
                               rtol=0,
                               atol=1e-12)

    Z28 = galsim.zernike.Zernike([0] * 28 + [1])

    def Z28_grad(x, y):
        # Z28 = sqrt(14) (x^6 - 15 x^4 y^2 + 15 x^2 y^4 - y^6)
        gradx = 6 * np.sqrt(14) * x * (x**4 - 10 * x**2 * y**2 + 5 * y**4)
        grady = -6 * np.sqrt(14) * y * (5 * x**4 - 10 * x**2 * y**2 + y**4)
        return gradx, grady

    np.testing.assert_allclose(Z28.evalCartesianGrad(x, y),
                               Z28_grad(x, y),
                               rtol=0,
                               atol=1e-12)

    # Now try some finite differences on a broader set of input

    def finite_difference_gradient(Z, x, y):
        dh = 1e-5
        return ((Z.evalCartesian(x + dh, y) - Z.evalCartesian(x - dh, y)) /
                (2 * dh),
                (Z.evalCartesian(x, y + dh) - Z.evalCartesian(x, y - dh)) /
                (2 * dh))

    u = galsim.UniformDeviate(1234)

    # Test finite difference against analytic result for 25 different Zernikes with random number of
    # random coefficients and random inner/outer radii.
    for j in range(25):
        nj = 1 + int(u() * 55)
        R_inner = 0.2 + 0.6 * u()
        R_outer = R_inner + 0.2 + 0.6 * u()
        Z = galsim.zernike.Zernike([0] + [u() for _ in range(nj)],
                                   R_inner=R_inner,
                                   R_outer=R_outer)

        np.testing.assert_allclose(finite_difference_gradient(Z, x, y),
                                   Z.evalCartesianGrad(x, y),
                                   rtol=1e-5,
                                   atol=1e-5)

    # Make sure the gradient of the zero-Zernike works
    Z = galsim.zernike.Zernike([0, 0])
    assert Z == Z.gradX == Z.gradX.gradX == Z.gradY == Z.gradY.gradY
コード例 #18
0
ファイル: test_config_value.py プロジェクト: maxmen/GalSim
def test_int_value():
    """Test various ways to generate an int value
    """
    import time
    t1 = time.time()

    config = {
        'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' } },

        'val1' : 9,
        'val2' : float(8.7),  # Reading as int will drop the fraction.
        'val3' : -400.8,      # Not floor - negatives will round up.
        'str1' : '8',
        'str2' : '-2',
        'cat1' : { 'type' : 'InputCatalog' , 'col' : 2 },
        'cat2' : { 'type' : 'InputCatalog' , 'col' : 3 },
        'ran1' : { 'type' : 'Random', 'min' : 0, 'max' : 3 },
        'ran2' : { 'type' : 'Random', 'min' : -5, 'max' : 10 },
        'seq1' : { 'type' : 'Sequence' },
        'seq2' : { 'type' : 'Sequence', 'step' : 3 },
        'seq3' : { 'type' : 'Sequence', 'first' : 1, 'step' : 5 },
        'seq4' : { 'type' : 'Sequence', 'first' : 10, 'step' : -2 },
        'seq5' : { 'type' : 'Sequence', 'first' : 1, 'last' : 2, 'repeat' : 2 },
        'list1' : { 'type' : 'List', 'items' : [ 73, 8, 3 ] },
        'list2' : { 'type' : 'List',
                    'items' : [ 6, 8, 1, 7, 3, 5, 1, 0, 6, 3, 8, 2 ],
                    'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }
    }

    galsim.config.ProcessInput(config)

    # Test direct values
    val1 = galsim.config.ParseValue(config,'val1',config, int)[0]
    np.testing.assert_equal(val1, 9)

    val2 = galsim.config.ParseValue(config,'val2',config, int)[0]
    np.testing.assert_equal(val2, 8)

    val3 = galsim.config.ParseValue(config,'val3',config, int)[0]
    np.testing.assert_equal(val3, -400)

    # Test conversions from strings
    str1 = galsim.config.ParseValue(config,'str1',config, int)[0]
    np.testing.assert_equal(str1, 8)

    str2 = galsim.config.ParseValue(config,'str2',config, int)[0]
    np.testing.assert_equal(str2, -2)

    # Test values read from an InputCatalog
    input_cat = galsim.InputCatalog(dir='config_input', file_name='catalog.txt')
    cat1 = []
    cat2 = []
    for k in range(5):
        config['seq_index'] = k
        cat1.append(galsim.config.ParseValue(config,'cat1',config, int)[0])
        cat2.append(galsim.config.ParseValue(config,'cat2',config, int)[0])

    np.testing.assert_array_equal(cat1, [ 9, 0, -4, 9, 0 ])
    np.testing.assert_array_equal(cat2, [ -3, 8, 17, -3, 8 ])

    # Test values generated from a uniform deviate
    rng = galsim.UniformDeviate(1234)
    config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed.
    for k in range(6):
        ran1 = galsim.config.ParseValue(config,'ran1',config, int)[0]
        np.testing.assert_equal(ran1, int(math.floor(rng() * 4)))

        ran2 = galsim.config.ParseValue(config,'ran2',config, int)[0]
        np.testing.assert_equal(ran2, int(math.floor(rng() * 16))-5)

    # Test values generated from a Sequence
    seq1 = []
    seq2 = []
    seq3 = []
    seq4 = []
    seq5 = []
    for k in range(6):
        config['seq_index'] = k
        seq1.append(galsim.config.ParseValue(config,'seq1',config, int)[0])
        seq2.append(galsim.config.ParseValue(config,'seq2',config, int)[0])
        seq3.append(galsim.config.ParseValue(config,'seq3',config, int)[0])
        seq4.append(galsim.config.ParseValue(config,'seq4',config, int)[0])
        seq5.append(galsim.config.ParseValue(config,'seq5',config, int)[0])

    np.testing.assert_array_equal(seq1, [ 0, 1, 2, 3, 4, 5 ])
    np.testing.assert_array_equal(seq2, [ 0, 3, 6, 9, 12, 15 ])
    np.testing.assert_array_equal(seq3, [ 1, 6, 11, 16, 21, 26 ])
    np.testing.assert_array_equal(seq4, [ 10, 8, 6, 4, 2, 0 ])
    np.testing.assert_array_equal(seq5, [ 1, 1, 2, 2, 1, 1 ])

    # Test values taken from a List
    list1 = []
    list2 = []
    for k in range(5):
        config['seq_index'] = k
        list1.append(galsim.config.ParseValue(config,'list1',config, int)[0])
        list2.append(galsim.config.ParseValue(config,'list2',config, int)[0])

    np.testing.assert_array_equal(list1, [ 73, 8, 3, 73, 8 ])
    np.testing.assert_array_equal(list2, [ 8, 0, 3, 8, 8 ])

    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
コード例 #19
0
ファイル: real.py プロジェクト: kuonanhong/GalSim
def simReal(real_galaxy, target_PSF, target_pixel_scale, g1=0.0, g2=0.0, rotation_angle=None,
            rand_rotate=True, rng=None, target_flux=1000.0, image=None): # pragma: no cover
    """Deprecated method to simulate images (no added noise) from real galaxy training data.

    This function takes a RealGalaxy from some training set, and manipulates it as needed to
    simulate a (no-noise-added) image from some lower-resolution telescope.  It thus requires a
    target PSF (which could be an image, or one of our base classes) that represents all PSF
    components including the pixel response, and a target pixel scale.

    The default rotation option is to impose a random rotation to make irrelevant any real shears
    in the galaxy training data (optionally, the RNG can be supplied).  This default can be turned
    off by setting `rand_rotate = False` or by requesting a specific rotation angle using the
    `rotation_angle` keyword, in which case `rand_rotate` is ignored.

    Optionally, the user can specify a shear (default 0).  Finally, they can specify a flux
    normalization for the final image, default 1000.

    @param real_galaxy      The RealGalaxy object to use, not modified in generating the
                            simulated image.
    @param target_PSF       The target PSF, either one of our base classes or an Image.
    @param target_pixel_scale  The pixel scale for the final image, in arcsec.
    @param g1               First component of shear to impose (components defined with respect
                            to pixel coordinates), [default: 0]
    @param g2               Second component of shear to impose, [default: 0]
    @param rotation_angle   Angle by which to rotate the galaxy (must be an Angle
                            instance). [default: None]
    @param rand_rotate      Should the galaxy be rotated by some random angle?  [default: True;
                            unless `rotation_angle` is set, then False]
    @param rng              A BaseDeviate instance to use for the random selection or rotation
                            angle. [default: None]
    @param target_flux      The target flux in the output galaxy image, [default: 1000.]
    @param image            As with the GSObject.drawImage() function, if an image is provided,
                            then it will be used and returned.  [default: None, which means an
                            appropriately-sized image will be created.]

    @return a simulated galaxy image.
    """
    from .deprecated import depr
    depr('simReal', 1.5, '',
         'This method has been deprecated due to lack of widespread use.  If you '+
         'have a need for it, please open an issue requesting that it be reinstated.')
    # do some checking of arguments
    if not isinstance(real_galaxy, galsim.RealGalaxy):
        raise RuntimeError("Error: simReal requires a RealGalaxy!")
    if isinstance(target_PSF, galsim.Image):
        target_PSF = galsim.InterpolatedImage(target_PSF, scale=target_pixel_scale)
    if not isinstance(target_PSF, galsim.GSObject):
        raise RuntimeError("Error: target PSF is not an Image or GSObject!")
    if rotation_angle is not None and not isinstance(rotation_angle, galsim.Angle):
        raise RuntimeError("Error: specified rotation angle is not an Angle instance!")
    if (target_pixel_scale < real_galaxy.pixel_scale):
        import warnings
        message = "Warning: requested pixel scale is higher resolution than original!"
        warnings.warn(message)
    import math # needed for pi, sqrt below
    g = math.sqrt(g1**2 + g2**2)
    if g > 1:
        raise RuntimeError("Error: requested shear is >1!")

    # make sure target PSF is normalized
    target_PSF = target_PSF.withFlux(1.0)

    # rotate
    if rotation_angle is not None:
        real_galaxy = real_galaxy.rotate(rotation_angle)
    elif rotation_angle is None and rand_rotate == True:
        if rng is None:
            ud = galsim.UniformDeviate()
        elif isinstance(rng,galsim.BaseDeviate):
            ud = galsim.UniformDeviate(rng)
        else:
            raise TypeError("The rng provided is not a BaseDeviate")
        rand_angle = galsim.Angle(math.pi*ud(), galsim.radians)
        real_galaxy = real_galaxy.rotate(rand_angle)

    # set fluxes
    real_galaxy = real_galaxy.withFlux(target_flux)

    # shear
    if (g1 != 0.0 or g2 != 0.0):
        real_galaxy = real_galaxy.shear(g1=g1, g2=g2)

    # convolve, resample
    out_gal = galsim.Convolve([real_galaxy, target_PSF])
    image = out_gal.drawImage(image=image, scale=target_pixel_scale, method='no_pixel')

    # return simulated image
    return image
コード例 #20
0
ファイル: test_config_value.py プロジェクト: maxmen/GalSim
def test_bool_value():
    """Test various ways to generate a bool value
    """
    import time
    t1 = time.time()

    config = {
        'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' } },

        'val1' : True,
        'val2' : 1,
        'val3' : 0.0,
        'str1' : 'true',
        'str2' : '0',
        'str3' : 'yes',
        'str4' : 'No',
        'cat1' : { 'type' : 'InputCatalog' , 'col' : 4 },
        'cat2' : { 'type' : 'InputCatalog' , 'col' : 5 },
        'ran1' : { 'type' : 'Random' },
        'seq1' : { 'type' : 'Sequence' },
        'seq2' : { 'type' : 'Sequence', 'first' : True, 'repeat' : 2 },
        'list1' : { 'type' : 'List', 'items' : [ 'yes', 'no', 'no' ] },
        'list2' : { 'type' : 'List',
                    'items' : [ 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0 ],
                    'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }
    }

    galsim.config.ProcessInput(config)

    # Test direct values
    val1 = galsim.config.ParseValue(config,'val1',config, bool)[0]
    np.testing.assert_equal(val1, True)

    val2 = galsim.config.ParseValue(config,'val2',config, bool)[0]
    np.testing.assert_equal(val2, True)

    val3 = galsim.config.ParseValue(config,'val3',config, bool)[0]
    np.testing.assert_equal(val3, False)

    # Test conversions from strings
    str1 = galsim.config.ParseValue(config,'str1',config, bool)[0]
    np.testing.assert_equal(str1, True)

    str2 = galsim.config.ParseValue(config,'str2',config, bool)[0]
    np.testing.assert_equal(str2, False)

    str3 = galsim.config.ParseValue(config,'str3',config, bool)[0]
    np.testing.assert_equal(str3, True)

    str4 = galsim.config.ParseValue(config,'str4',config, bool)[0]
    np.testing.assert_equal(str4, False)

    # Test values read from an InputCatalog
    input_cat = galsim.InputCatalog(dir='config_input', file_name='catalog.txt')
    cat1 = []
    cat2 = []
    for k in range(5):
        config['seq_index'] = k
        cat1.append(galsim.config.ParseValue(config,'cat1',config, bool)[0])
        cat2.append(galsim.config.ParseValue(config,'cat2',config, bool)[0])

    np.testing.assert_array_equal(cat1, [ 1, 0, 1, 1, 0 ])
    np.testing.assert_array_equal(cat2, [ 1, 0, 0, 1, 0 ])

    # Test values generated from a uniform deviate
    rng = galsim.UniformDeviate(1234)
    config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed.
    for k in range(6):
        ran1 = galsim.config.ParseValue(config,'ran1',config, bool)[0]
        np.testing.assert_equal(ran1, rng() < 0.5)

    # Test values generated from a Sequence
    seq1 = []
    seq2 = []
    for k in range(6):
        config['seq_index'] = k
        seq1.append(galsim.config.ParseValue(config,'seq1',config, bool)[0])
        seq2.append(galsim.config.ParseValue(config,'seq2',config, bool)[0])

    np.testing.assert_array_equal(seq1, [ 0, 1, 0, 1, 0, 1 ])
    np.testing.assert_array_equal(seq2, [ 1, 1, 0, 0, 1, 1 ])

    # Test values taken from a List
    list1 = []
    list2 = []
    for k in range(5):
        config['seq_index'] = k
        list1.append(galsim.config.ParseValue(config,'list1',config, bool)[0])
        list2.append(galsim.config.ParseValue(config,'list2',config, bool)[0])

    np.testing.assert_array_equal(list1, [ 1, 0, 0, 1, 0 ])
    np.testing.assert_array_equal(list2, [ 0, 1, 1, 1, 0 ])

    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
コード例 #21
0
gal_e_max = 0.8
psf_fwhm = [1.5, 0.7, 0.6, 0.5]
psf_beta = [2.4, 2.5, 2.3, 1.5]
gsparams=galsim.GSParams(folding_threshold=1.e-2,maxk_threshold=2.e-3,\
                         xvalue_accuracy=1.e-4,kvalue_accuracy=1.e-4,\
                         shoot_accuracy=1.e-4,minimum_fft_size=64)
psf1 = galsim.Moffat(fwhm=psf_fwhm[0], beta=psf_beta[0],
                     gsparams=gsparams)  #to be continue
gal1 = galsim.Gaussian(half_light_radius=1, gsparams=gsparams)  #to be continue
psf = psf1
k = 0
galarray = []
galarray0 = []
for i in range(100):
    k += 1
    rng = galsim.UniformDeviate(random_seed + k + 1)
    flux = rng() * (gal_flux_max - gal_flux_min) + gal_flux_min
    this_gal = gal1.withFlux(flux)
    hlr = rng() * (gal_hlr_max - gal_hlr_min) + gal_hlr_min
    this_gal = this_gal.dilate(hlr)
    this_gal = this_gal.shear(g1=0.5, g2=0.5)
    final = galsim.Convolve([this_gal, psf])

    print(rng())

    image0 = galsim.ImageF(2 * nx + 2, ny, scale=pixel_scale)
    fft_image0 = image0[galsim.BoundsI(1, nx, 1, ny)]
    phot_image0 = image0[galsim.BoundsI(nx + 3, 2 * nx + 2, 1, ny)]
    final.drawImage(fft_image0, method='fft')
    final.drawImage(phot_image0, method='phot')
コード例 #22
0
ファイル: test_config_value.py プロジェクト: maxmen/GalSim
def test_float_value():
    """Test various ways to generate a float value
    """
    import time
    t1 = time.time()

    config = {
        'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' } },

        'val1' : 9.9,
        'val2' : int(400),
        'str1' : '8.73',
        'str2' : '2.33e-9',
        'str3' : '6.e-9', 
        'cat1' : { 'type' : 'InputCatalog' , 'col' : 0 },
        'cat2' : { 'type' : 'InputCatalog' , 'col' : 1 },
        'ran1' : { 'type' : 'Random', 'min' : 0.5, 'max' : 3 },
        'ran2' : { 'type' : 'Random', 'min' : -5, 'max' : 0 },
        'gauss1' : { 'type' : 'RandomGaussian', 'sigma' : 1 },
        'gauss2' : { 'type' : 'RandomGaussian', 'sigma' : 3, 'mean' : 4 },
        'gauss3' : { 'type' : 'RandomGaussian', 'sigma' : 1.5, 'min' : -2, 'max' : 2 },
        'gauss4' : { 'type' : 'RandomGaussian', 'sigma' : 0.5, 'min' : 0, 'max' : 0.8 },
        'gauss5' : { 'type' : 'RandomGaussian',
                     'sigma' : 0.3, 'mean' : 0.5, 'min' : 0, 'max' : 0.5 },
        'dist1' : { 'type' : 'RandomDistribution', 'function' : 'config_input/distribution.txt', 
                    'interpolant' : 'linear' },
        'dist2' : { 'type' : 'RandomDistribution', 'function' : 'config_input/distribution2.txt', 
                    'interpolant' : 'linear' },
        'dist3' : { 'type' : 'RandomDistribution', 'function' : 'x*x', 
                    'x_min' : 0., 'x_max' : 2.0 },
        'seq1' : { 'type' : 'Sequence' },
        'seq2' : { 'type' : 'Sequence', 'step' : 0.1 },
        'seq3' : { 'type' : 'Sequence', 'first' : 1.5, 'step' : 0.5 },
        'seq4' : { 'type' : 'Sequence', 'first' : 10, 'step' : -2 },
        'seq5' : { 'type' : 'Sequence', 'first' : 1, 'last' : 2.1, 'repeat' : 2 },
        'list1' : { 'type' : 'List', 'items' : [ 73, 8.9, 3.14 ] },
        'list2' : { 'type' : 'List',
                    'items' : [ 0.6, 1.8, 2.1, 3.7, 4.3, 5.5, 6.1, 7.0, 8.6, 9.3, 10.8, 11.2 ],
                    'index' : { 'type' : 'Sequence', 'first' : 10, 'step' : -3 } }
    }

    galsim.config.ProcessInput(config)

    # Test direct values
    val1 = galsim.config.ParseValue(config,'val1',config, float)[0]
    np.testing.assert_almost_equal(val1, 9.9)

    val2 = galsim.config.ParseValue(config,'val2',config, float)[0]
    np.testing.assert_almost_equal(val2, 400)

    # Test conversions from strings
    str1 = galsim.config.ParseValue(config,'str1',config, float)[0]
    np.testing.assert_almost_equal(str1, 8.73)

    str2 = galsim.config.ParseValue(config,'str2',config, float)[0]
    np.testing.assert_almost_equal(str2, 2.33e-9)

    str3 = galsim.config.ParseValue(config,'str3',config, float)[0]
    np.testing.assert_almost_equal(str3, 6.0e-9)

    # Test values read from an InputCatalog
    input_cat = galsim.InputCatalog(dir='config_input', file_name='catalog.txt')
    cat1 = []
    cat2 = []
    for k in range(5):
        config['seq_index'] = k
        cat1.append(galsim.config.ParseValue(config,'cat1',config, float)[0])
        cat2.append(galsim.config.ParseValue(config,'cat2',config, float)[0])

    np.testing.assert_array_almost_equal(cat1, [ 1.234, 2.345, 3.456, 1.234, 2.345 ])
    np.testing.assert_array_almost_equal(cat2, [ 4.131, -900, 8000, 4.131, -900 ])

    # Test values generated from a uniform deviate
    rng = galsim.UniformDeviate(1234)
    config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed.
    for k in range(6):
        ran1 = galsim.config.ParseValue(config,'ran1',config, float)[0]
        np.testing.assert_almost_equal(ran1, rng() * 2.5 + 0.5)

        ran2 = galsim.config.ParseValue(config,'ran2',config, float)[0]
        np.testing.assert_almost_equal(ran2, rng() * 5 - 5)

    # Test values generated from a Gaussian deviate
    gd = galsim.GaussianDeviate(rng)
    for k in range(6):
        gauss1 = galsim.config.ParseValue(config,'gauss1',config, float)[0]
        gd.setMean(0)
        gd.setSigma(1)
        np.testing.assert_almost_equal(gauss1, gd())

        gauss2 = galsim.config.ParseValue(config,'gauss2',config, float)[0]
        gd.setMean(4)
        gd.setSigma(3)
        np.testing.assert_almost_equal(gauss2, gd())

        gauss3 = galsim.config.ParseValue(config,'gauss3',config, float)[0]
        gd.setMean(0)
        gd.setSigma(1.5)
        gd_val = gd()
        while math.fabs(gd_val) > 2:
            gd_val = gd()
        np.testing.assert_almost_equal(gauss3, gd_val)

        gauss4 = galsim.config.ParseValue(config,'gauss4',config, float)[0]
        gd.setMean(0)
        gd.setSigma(0.5)
        gd_val = math.fabs(gd())
        while gd_val > 0.8:
            gd_val = math.fabs(gd())
        np.testing.assert_almost_equal(gauss4, gd_val)

        gauss5 = galsim.config.ParseValue(config,'gauss5',config, float)[0]
        gd.setMean(0.5)
        gd.setSigma(0.3)
        gd_val = gd()
        if gd_val > 0.5: 
            gd_val = 1-gd_val
        while gd_val < 0:
            gd_val = gd()
            if gd_val > 0.5: 
                gd_val = 1-gd_val
        np.testing.assert_almost_equal(gauss5, gd_val)

    # Test values generated from a distribution in a file
    dd=galsim.DistDeviate(rng,function='config_input/distribution.txt',interpolant='linear')
    for k in range(6):
        dist1 = galsim.config.ParseValue(config,'dist1',config, float)[0]
        np.testing.assert_almost_equal(dist1, dd())
    dd=galsim.DistDeviate(rng,function='config_input/distribution2.txt',interpolant='linear')
    for k in range(6):
        dist2 = galsim.config.ParseValue(config,'dist2',config, float)[0]
        np.testing.assert_almost_equal(dist2, dd())
    dd=galsim.DistDeviate(rng,function=lambda x: x*x,x_min=0.,x_max=2.)
    for k in range(6):
        dist3 = galsim.config.ParseValue(config,'dist3',config, float)[0]
        np.testing.assert_almost_equal(dist3, dd())

    # Test values generated from a Sequence
    seq1 = []
    seq2 = []
    seq3 = []
    seq4 = []
    seq5 = []
    for k in range(6):
        config['seq_index'] = k
        seq1.append(galsim.config.ParseValue(config,'seq1',config, float)[0])
        seq2.append(galsim.config.ParseValue(config,'seq2',config, float)[0])
        seq3.append(galsim.config.ParseValue(config,'seq3',config, float)[0])
        seq4.append(galsim.config.ParseValue(config,'seq4',config, float)[0])
        seq5.append(galsim.config.ParseValue(config,'seq5',config, float)[0])

    np.testing.assert_array_almost_equal(seq1, [ 0, 1, 2, 3, 4, 5 ])
    np.testing.assert_array_almost_equal(seq2, [ 0, 0.1, 0.2, 0.3, 0.4, 0.5 ])
    np.testing.assert_array_almost_equal(seq3, [ 1.5, 2, 2.5, 3, 3.5, 4 ])
    np.testing.assert_array_almost_equal(seq4, [ 10, 8, 6, 4, 2, 0 ])
    np.testing.assert_array_almost_equal(seq5, [ 1, 1, 2, 2, 1, 1 ])

    # Test values taken from a List
    list1 = []
    list2 = []
    for k in range(5):
        config['seq_index'] = k
        list1.append(galsim.config.ParseValue(config,'list1',config, float)[0])
        list2.append(galsim.config.ParseValue(config,'list2',config, float)[0])

    np.testing.assert_array_almost_equal(list1, [ 73, 8.9, 3.14, 73, 8.9 ])
    np.testing.assert_array_almost_equal(list2, [ 10.8, 7.0, 4.3, 1.8, 10.8 ])

    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
コード例 #23
0
def make_plot(args):
    # Initiate some GalSim random number generators.
    rng = galsim.BaseDeviate(args.seed)
    u = galsim.UniformDeviate(rng)

    # The GalSim atmospheric simulation code describes turbulence in the 3D atmosphere as a series
    # of 2D turbulent screens.  The galsim.Atmosphere() helper function is useful for constructing
    # this screen list.

    # First, we estimate a weight for each screen, so that the turbulence is dominated by the lower
    # layers consistent with direct measurements.  The specific values we use are from SCIDAR
    # measurements on Cerro Pachon as part of the 1998 Gemini site selection process
    # (Ellerbroek 2002, JOSA Vol 19 No 9).

    Ellerbroek_alts = [0.0, 2.58, 5.16, 7.73, 12.89, 15.46]  # km
    Ellerbroek_weights = [0.652, 0.172, 0.055, 0.025, 0.074, 0.022]
    Ellerbroek_interp = galsim.LookupTable(Ellerbroek_alts,
                                           Ellerbroek_weights,
                                           interpolant='linear')

    # Use given number of uniformly spaced altitudes
    alts = np.max(Ellerbroek_alts) * np.arange(
        args.nlayers) / (args.nlayers - 1)
    weights = Ellerbroek_interp(alts)  # interpolate the weights
    weights /= sum(weights)  # and renormalize

    # Each layer can have its own turbulence strength (roughly inversely proportional to the Fried
    # parameter r0), wind speed, wind direction, altitude, and even size and scale (though note that
    # the size of each screen is actually made infinite by "wrapping" the edges of the screen.)  The
    # galsim.Atmosphere helper function is useful for constructing this list, and requires lists of
    # parameters for the different layers.

    spd = []  # Wind speed in m/s
    dirn = []  # Wind direction in radians
    r0_500 = []  # Fried parameter in m at a wavelength of 500 nm.
    for i in range(args.nlayers):
        spd.append(
            u() * args.max_speed)  # Use a random speed between 0 and max_speed
        dirn.append(
            u() * 360 *
            galsim.degrees)  # And an isotropically distributed wind direction.
        # The turbulence strength of each layer is specified by through its Fried parameter r0_500,
        # which can be thought of as the diameter of a telescope for which atmospheric turbulence
        # and unaberrated diffraction contribute equally to image resolution (at a wavelength of
        # 500nm).  The weights above are for the refractive index structure function (similar to a
        # variance or covariance), however, so we need to use an appropriate scaling relation to
        # distribute the input "net" Fried parameter into a Fried parameter for each layer.  For
        # Kolmogorov turbulence, this is r0_500 ~ (structure function)**(-3/5):
        r0_500.append(args.r0_500 * weights[i]**(-3. / 5))
        print(
            "Adding layer at altitude {:5.2f} km with velocity ({:5.2f}, {:5.2f}) m/s, "
            "and r0_500 {:5.3f} m.".format(alts[i], spd[i] * dirn[i].cos(),
                                           spd[i] * dirn[i].sin(), r0_500[i]))

    # Generate atmosphere, set the initial screen size and scale.
    atmRng = galsim.BaseDeviate(args.seed + 1)
    fineAtm = galsim.Atmosphere(r0_500=r0_500,
                                L0=args.L0,
                                speed=spd,
                                direction=dirn,
                                altitude=alts,
                                rng=atmRng,
                                screen_size=args.screen_size,
                                screen_scale=args.screen_scale)
    r0 = args.r0_500 * (args.lam / 500.0)**(6. / 5)
    with ProgressBar(args.nlayers) as bar:
        fineAtm.instantiate(kmax=args.kcrit / r0, _bar=bar)
    # `fineAtm` is now an instance of a galsim.PhaseScreenList object.

    # Construct an Aperture object for computing the PSF.  The Aperture object describes the
    # illumination pattern of the telescope pupil, and chooses good sampling size and resolution
    # for representing this pattern as an array.
    aper = galsim.Aperture(diam=args.diam,
                           lam=args.lam,
                           obscuration=args.obscuration,
                           screen_list=fineAtm,
                           pad_factor=args.pad_factor,
                           oversampling=args.oversampling)
    print(repr(aper))

    # Start output
    fig, axes = plt.subplots(nrows=2, ncols=7, figsize=(12, 5))
    FigureCanvasAgg(fig)
    for ax in axes.ravel():
        ax.set_xticks([])
        ax.set_yticks([])

    for icol, shrinkFactor in enumerate([1, 2, 4, 8, 16, 32, 64]):

        if shrinkFactor == 1:
            shrunkenAtm = fineAtm
        else:
            shrunkenAtm = shrink_atm(fineAtm, shrinkFactor)
        print("Drawing with Fourier optics")
        with ProgressBar(args.exptime / args.time_step) as bar:
            psf = shrunkenAtm.makePSF(lam=args.lam,
                                      aper=aper,
                                      exptime=args.exptime,
                                      time_step=args.time_step,
                                      second_kick=False,
                                      _bar=bar)
            img = psf.drawImage(nx=args.nx, ny=args.nx, scale=args.scale)

        try:
            mom = galsim.hsm.FindAdaptiveMom(img)
        except RuntimeError:
            mom = None

        axes[0, icol].imshow(img.array)
        axes[0,
             icol].set_title("scale = {}".format(shrunkenAtm[0].screen_scale))
        if mom is not None:
            axes[0, icol].text(0.5,
                               0.9,
                               "{:6.3f}".format(mom.moments_sigma),
                               transform=axes[0, icol].transAxes,
                               color='w')

        airy = galsim.Airy(lam=args.lam,
                           diam=args.diam,
                           obscuration=args.obscuration)
        firstKick = galsim.Convolve(psf, airy)
        firstKickImg = firstKick.drawImage(nx=args.nx,
                                           ny=args.nx,
                                           scale=args.scale,
                                           method='phot',
                                           n_photons=args.nphot)
        try:
            firstKickMom = galsim.hsm.FindAdaptiveMom(firstKickImg)
        except RuntimeError:
            firstKickMom = None

        axes[1, icol].imshow(img.array)
        if mom is not None:
            axes[1, icol].text(0.5,
                               0.9,
                               "{:6.3f}".format(firstKickMom.moments_sigma),
                               transform=axes[1, icol].transAxes,
                               color='w')

    axes[0, 0].set_ylabel("FFT")
    axes[1, 0].set_ylabel("1st Kick")

    fig.tight_layout()

    dirname, filename = os.path.split(args.outfile)
    if not os.path.exists(dirname):
        os.mkdir(dirname)
    fig.savefig(args.outfile)
コード例 #24
0
ファイル: test_config_value.py プロジェクト: maxmen/GalSim
def test_angle_value():
    """Test various ways to generate an Angle value
    """
    import time
    t1 = time.time()

    config = {
        'input' : { 'catalog' : { 'dir' : 'config_input', 'file_name' : 'catalog.txt' } },

        'val1' : 1.9 * galsim.radians,
        'val2' : -41 * galsim.degrees,
        'str1' : '0.73 radians',
        'str2' : '240 degrees',
        'str3' : '1.2 rad',
        'str4' : '45 deg',
        'str5' : '6 hrs',
        'str6' : '21 hour',
        'str7' : '-240 arcmin',
        'str8' : '1800 arcsec',
        'cat1' : { 'type' : 'Radians' , 
                   'theta' : { 'type' : 'InputCatalog' , 'col' : 10 } },
        'cat2' : { 'type' : 'Degrees' , 
                   'theta' : { 'type' : 'InputCatalog' , 'col' : 11 } },
        'ran1' : { 'type' : 'Random' },
        'seq1' : { 'type' : 'Rad', 'theta' : { 'type' : 'Sequence' } },
        'seq2' : { 'type' : 'Deg', 'theta' : { 'type' : 'Sequence', 'first' : 45, 'step' : 80 } },
        'list1' : { 'type' : 'List',
                    'items' : [ 73 * galsim.arcmin,
                                8.9 * galsim.arcmin,
                                3.14 * galsim.arcmin ] },
    }

    galsim.config.ProcessInput(config)

    # Test direct values
    val1 = galsim.config.ParseValue(config,'val1',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(val1.rad(), 1.9)

    val2 = galsim.config.ParseValue(config,'val2',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(val2.rad(), -41 * math.pi/180)

    # Test conversions from strings
    str1 = galsim.config.ParseValue(config,'str1',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(str1.rad(), 0.73)

    str2 = galsim.config.ParseValue(config,'str2',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(str2 / galsim.degrees, 240)

    str3 = galsim.config.ParseValue(config,'str3',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(str3.rad(), 1.2)

    str4 = galsim.config.ParseValue(config,'str4',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(str4.rad(), math.pi/4)

    str5 = galsim.config.ParseValue(config,'str5',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(str5.rad(), math.pi/2)

    str6 = galsim.config.ParseValue(config,'str6',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(str6.rad(), 7*math.pi/4)

    str7 = galsim.config.ParseValue(config,'str7',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(str7 / galsim.degrees, -4)

    str8 = galsim.config.ParseValue(config,'str8',config, galsim.Angle)[0]
    np.testing.assert_almost_equal(str8 / galsim.degrees, 0.5)

    # Test values read from an InputCatalog
    input_cat = galsim.InputCatalog(dir='config_input', file_name='catalog.txt')
    cat1 = []
    cat2 = []
    for k in range(5):
        config['seq_index'] = k
        cat1.append(galsim.config.ParseValue(config,'cat1',config, galsim.Angle)[0].rad())
        cat2.append(galsim.config.ParseValue(config,'cat2',config, galsim.Angle)[0]/galsim.degrees)

    np.testing.assert_array_almost_equal(cat1, [ 1.2, 0.1, -0.9, 1.2, 0.1 ])
    np.testing.assert_array_almost_equal(cat2, [ 23, 15, 82, 23, 15 ])

    # Test values generated from a uniform deviate
    rng = galsim.UniformDeviate(1234)
    config['rng'] = galsim.UniformDeviate(1234) # A second copy starting with the same seed.
    for k in range(6):
        ran1 = galsim.config.ParseValue(config,'ran1',config, galsim.Angle)[0]
        theta = rng() * 2 * math.pi
        np.testing.assert_almost_equal(ran1.rad(), theta)

    # Test values generated from a Sequence
    seq1 = []
    seq2 = []
    for k in range(6):
        config['seq_index'] = k
        seq1.append(galsim.config.ParseValue(config,'seq1',config, galsim.Angle)[0].rad())
        seq2.append(galsim.config.ParseValue(config,'seq2',config, galsim.Angle)[0]/galsim.degrees)

    np.testing.assert_array_almost_equal(seq1, [ 0, 1, 2, 3, 4, 5 ])
    np.testing.assert_array_almost_equal(seq2, [ 45, 125, 205, 285, 365, 445 ])

    # Test values taken from a List
    list1 = []
    for k in range(5):
        config['seq_index'] = k
        list1.append(galsim.config.ParseValue(config,'list1',config, galsim.Angle)[0]/galsim.arcmin)

    np.testing.assert_array_almost_equal(list1, [ 73, 8.9, 3.14, 73, 8.9 ])

    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
コード例 #25
0
ファイル: galsim_simulation.py プロジェクト: ofersp/wlenet
    def init(self, verbose=True):

        # load the catalogs

        warnings.filterwarnings("ignore")
        rgc_dir, rgc_file = os.path.split(
            os.path.expanduser(self.args.rgc_path))
        fits_file = os.path.splitext(rgc_file)[0] + '_fits.fits'
        mask_info_file = 'real_galaxy_mask_info.fits'

        if verbose:
            print('Loading the RealGalaxyCatalog %s (preload=%d)' %
                  (rgc_file, self.args.preload))

        self.rgc = galsim.RealGalaxyCatalog(rgc_file,
                                            dir=rgc_dir,
                                            preload=self.args.preload)
        self.rgc_catalog = pyfits.getdata(os.path.join(rgc_dir, rgc_file))
        self.fit_catalog = pyfits.getdata(os.path.join(rgc_dir, fits_file))

        if (self.args.mask_dist_min != -1.0) or (self.args.mask_dist_max !=
                                                 -1.0):
            self.mask_info_catalog = pyfits.getdata(
                os.path.join(rgc_dir, mask_info_file))
        else:
            self.mask_info_catalog = None

        # possibly preload the stamps themselves
        if self.args.preload:
            for f in self.rgc.loaded_files:
                for h in self.rgc.loaded_files[f]:
                    h.data

        # possibly use python's initial seed to initialize seed_dynamic
        if self.args.seed_dynamic == -1:
            self.args.seed_dynamic = random.randint(0, 10**8 - 1)

        # possibly load valid_indices from file and then apply extra filter criteria
        no_valid_index_file = self.args.valid_index_path == ''
        self.valid_indices = np.arange(self.rgc.nobjects) if no_valid_index_file else \
                             np.load(os.path.expanduser(self.args.valid_index_path))
        if not no_valid_index_file and verbose:
            print('Using valid-index file %s' % (self.args.valid_index_path))
        self.filter_valid_indices()

        # possibly load weight_indices from file
        no_weight_index_file = self.args.weight_index_path == ''
        if not no_weight_index_file and verbose:
            print('Using weight-index file %s' % (self.args.weight_index_path))
            self.weight_indices = np.load(
                os.path.expanduser(self.args.weight_index_path))
        else:
            self.weight_indices = np.ones(
                (self.rgc.nobjects, )) / self.rgc.nobjects
        assert (self.weight_indices.shape[0] == self.rgc.nobjects)
        assert (np.all(self.weight_indices >= 0))
        assert (abs(np.sum(self.weight_indices) - 1.0) < 1e-6)

        # use seed_static to perform the train/test split
        seed_static = 12345
        if verbose:
            print(
                'Splitting catalog samples to train and test samples using the static seed'
            )
            print('Using the static seed for train/test split')
        np.random.seed(seed_static)
        num_indices = self.valid_indices.shape[0]
        train_test_rv = np.random.rand(self.rgc.nobjects)
        train_mask = train_test_rv < self.args.train_fraction
        test_mask = train_test_rv >= self.args.train_fraction
        valid_mask = np.zeros((self.rgc.nobjects, 1), dtype=bool).flatten()
        valid_mask[self.valid_indices] = True
        self.train_indices = np.where(train_mask * valid_mask)[0]
        self.test_indices = np.where(test_mask * valid_mask)[0]
        self.used_indices = self.train_indices if self.args.test_train == 'train' else self.test_indices

        # renormalize the weights
        self.used_weights = self.weight_indices[self.used_indices]
        self.used_weights = self.used_weights / np.sum(self.used_weights)

        # print catalog summary
        if verbose:
            print('Total catalog galaxies: %d' % (self.rgc.nobjects))
            print('Valid catalog galaxies: %d' % (self.valid_indices.shape[0]))
            print('Train catalog galaxies: %d' % (self.train_indices.shape[0]))
            print('Test catalog galaxies : %d' % (self.test_indices.shape[0]))

        # now use the dynamic seed
        seed_delta_numpy = 50000
        seed_delta_galsim = 55000
        seed_delta_test_train = 60000
        if verbose:
            print('Using the dynamic seed for the rest of this simulation')
        if self.args.test_train == 'test':
            np.random.seed(self.args.seed_dynamic + seed_delta_numpy)
            self.rng = galsim.UniformDeviate(self.args.seed_dynamic +
                                             seed_delta_galsim)
        else:
            np.random.seed(self.args.seed_dynamic + seed_delta_numpy +
                           seed_delta_test_train)
            self.rng = galsim.UniformDeviate(self.args.seed_dynamic +
                                             seed_delta_galsim +
                                             seed_delta_test_train)
コード例 #26
0
gal_stamp = final.drawImage(scale=pixel_scale, nx=32, ny=32)

###
### Do fits with the real sky sigma and this pixel scale
###

image_obslist_realsigma = ngmix.observation.ObsList()

for n in range(n_obs):
    ##
    ## Define noise, add it to stamps
    ##

    # limit size for over-resolved PSF
    this_gal_stamp = final.drawImage(scale=pixel_scale, nx=32, ny=32)
    ud = galsim.UniformDeviate(seed + n + 1)
    noise = galsim.CCDNoise(rng=ud,
                            sky_level=sky_level,
                            gain=gain,
                            read_noise=read_noise)
    this_gal_stamp.addNoise(noise)

    ##
    ## Set up for ngmix fitting
    ##

    psf_cutout = psf_stamp.array
    image_cutout = this_gal_stamp.array

    jj_psf = ngmix.jacobian.DiagonalJacobian(scale=psf_pixel_scale,
                                             x=psf_cutout.shape[0] / 2,
コード例 #27
0
def do_shoot(prof, img, name):
    # For photon shooting, we calculate the number of photons to use based on the target
    # accuracy we are shooting for.  (Pun intended.)
    # For each pixel,
    # uncertainty = sqrt(N_pix) * flux_photon = sqrt(N_tot * flux_pix / flux_tot) * flux_tot / N_tot
    #             = sqrt(flux_pix) * sqrt(flux_tot) / sqrt(N_tot)
    # This is largest for the brightest pixel.  So we use:
    # N = flux_max * flux_tot / photon_shoot_accuracy^2
    photon_shoot_accuracy = 2.e-3
    # The number of decimal places at which to test the photon shooting
    photon_decimal_test = 2

    test_flux = 1.8

    print('Start do_shoot')
    # Test photon shooting for a particular profile (given as prof).
    prof.drawImage(img)
    flux_max = img.array.max()
    print('prof.getFlux = ', prof.getFlux())
    print('flux_max = ', flux_max)
    flux_tot = img.array.sum()
    print('flux_tot = ', flux_tot)
    if flux_max > 1.:
        # Since the number of photons required for a given accuracy level (in terms of
        # number of decimal places), we rescale the comparison by the flux of the
        # brightest pixel.
        prof /= flux_max
        img /= flux_max
        # The formula for number of photons needed is:
        # nphot = flux_max * flux_tot / photon_shoot_accuracy**2
        # But since we rescaled the image by 1/flux_max, it becomes
        nphot = flux_tot / flux_max / photon_shoot_accuracy**2
    elif flux_max < 0.1:
        # If the max is very small, at least bring it up to 0.1, so we are testing something.
        scale = 0.1 / flux_max
        print('scale = ', scale)
        prof *= scale
        img *= scale
        nphot = flux_max * flux_tot * scale * scale / photon_shoot_accuracy**2
    else:
        nphot = flux_max * flux_tot / photon_shoot_accuracy**2
    print('prof.getFlux => ', prof.getFlux())
    print('img.sum => ', img.array.sum())
    print('img.max => ', img.array.max())
    print('nphot = ', nphot)
    img2 = img.copy()

    # Use a deterministic random number generator so we don't fail tests because of rare flukes
    # in the random numbers.
    rng = galsim.UniformDeviate(12345)

    prof.drawImage(img2,
                   n_photons=nphot,
                   poisson_flux=False,
                   rng=rng,
                   method='phot')
    print('img2.sum => ', img2.array.sum())
    #printval(img2,img)
    np.testing.assert_array_almost_equal(
        img2.array,
        img.array,
        photon_decimal_test,
        err_msg="Photon shooting for %s disagrees with expected result" % name)

    # Test normalization
    dx = img.scale
    # Test with a large image to make sure we capture enough of the flux
    # even for slow convergers like Airy (which needs a _very_ large image) or Sersic.
    if 'Airy' in name:
        img = galsim.ImageD(2048, 2048, scale=dx)
    elif 'Sersic' in name or 'DeVauc' in name or 'Spergel' in name:
        img = galsim.ImageD(512, 512, scale=dx)
    else:
        img = galsim.ImageD(128, 128, scale=dx)
    prof = prof.withFlux(test_flux)
    prof.drawImage(img)
    print('img.sum = ', img.array.sum(), '  cf. ', test_flux)
    np.testing.assert_almost_equal(
        img.array.sum(),
        test_flux,
        4,
        err_msg="Flux normalization for %s disagrees with expected result" %
        name)

    scale = test_flux / flux_tot  # from above
    nphot *= scale * scale
    print('nphot -> ', nphot)
    if 'InterpolatedImage' in name:
        nphot *= 10
        print('nphot -> ', nphot)
    prof.drawImage(img,
                   n_photons=nphot,
                   poisson_flux=False,
                   rng=rng,
                   method='phot')
    print('img.sum = ', img.array.sum(), '  cf. ', test_flux)
    np.testing.assert_almost_equal(
        img.array.sum(),
        test_flux,
        photon_decimal_test,
        err_msg=
        "Photon shooting normalization for %s disagrees with expected result" %
        name)
コード例 #28
0
ファイル: demo6.py プロジェクト: philastrophist/GalSim
def main(argv):
    """
    Make a fits image cube using real COSMOS galaxies from a catalog describing the training
    sample.

      - The number of images in the cube matches the number of rows in the catalog.
      - Each image size is computed automatically by GalSim based on the Nyquist size.
      - Both galaxies and stars.
      - PSF is a double Gaussian, the same for each galaxy.
      - Galaxies are randomly rotated to remove the imprint of any lensing shears in the COSMOS
        data.
      - The same shear is applied to each galaxy.
      - Noise is Poisson using a nominal sky value of 1.e6 ADU/arcsec^2,
        the noise in the original COSMOS data.
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo6")

    # Define some parameters we'll use below.

    cat_file_name = 'real_galaxy_catalog_23.5_example.fits'
    dir = 'data'
    # Make output directory if not already present.
    if not os.path.isdir('output'):
        os.mkdir('output')
    cube_file_name = os.path.join('output', 'cube_real.fits')
    psf_file_name = os.path.join('output', 'psf_real.fits')

    random_seed = 1512413
    sky_level = 1.e6  # ADU / arcsec^2
    pixel_scale = 0.16  # arcsec
    gal_flux = 1.e5  # arbitrary choice, makes nice (not too) noisy images
    gal_g1 = -0.027  #
    gal_g2 = 0.031  #
    gal_mu = 1.082  # mu = ( (1-kappa)^2 - g1^2 - g2^2 )^-1
    psf_inner_fwhm = 0.6  # arcsec
    psf_outer_fwhm = 2.3  # arcsec
    psf_inner_fraction = 0.8  # fraction of total PSF flux in the inner Gaussian
    psf_outer_fraction = 0.2  # fraction of total PSF flux in the inner Gaussian
    ngal = 100

    logger.info('Starting demo script 6 using:')
    logger.info('    - real galaxies from catalog %r', cat_file_name)
    logger.info('    - double Gaussian PSF')
    logger.info('    - pixel scale = %.2f', pixel_scale)
    logger.info('    - Applied gravitational shear = (%.3f,%.3f)', gal_g1,
                gal_g2)
    logger.info('    - Poisson noise (sky level = %.1e).', sky_level)

    # Read in galaxy catalog
    # Note: dir is the directory both for the catalog itself and also the directory prefix
    # for the image files listed in the catalog.
    # If the images are in a different directory, you may also specify image_dir, which gives
    # the relative path from dir to wherever the images are located.
    real_galaxy_catalog = galsim.RealGalaxyCatalog(cat_file_name, dir=dir)
    logger.info('Read in %d real galaxies from catalog',
                real_galaxy_catalog.nobjects)

    # Make the double Gaussian PSF
    psf1 = galsim.Gaussian(fwhm=psf_inner_fwhm, flux=psf_inner_fraction)
    psf2 = galsim.Gaussian(fwhm=psf_outer_fwhm, flux=psf_outer_fraction)
    psf = psf1 + psf2
    # Draw the PSF with no noise.
    psf_image = psf.drawImage(scale=pixel_scale)
    # write to file
    psf_image.write(psf_file_name)
    logger.info('Created PSF and wrote to file %r', psf_file_name)

    # Build the images
    all_images = []
    for k in range(ngal):
        logger.debug('Start work on image %d', k)
        t1 = time.time()

        # Initialize the random number generator we will be using.
        rng = galsim.UniformDeviate(random_seed + k + 1)

        gal = galsim.RealGalaxy(real_galaxy_catalog, index=k, flux=gal_flux)
        logger.debug('   Read in training sample galaxy and PSF from file')
        t2 = time.time()

        # Rotate by a random angle
        theta = 2. * math.pi * rng() * galsim.radians
        gal = gal.rotate(theta)

        # Apply the desired shear
        gal = gal.shear(g1=gal_g1, g2=gal_g2)

        # Also apply a magnification mu = ( (1-kappa)^2 - |gamma|^2 )^-1
        # This conserves surface brightness, so it scales both the area and flux.
        gal = gal.magnify(gal_mu)

        # Make the combined profile
        final = galsim.Convolve([psf, gal])

        # Offset by up to 1/2 pixel in each direction
        # We had previously (in demo4 and demo5) used shift(dx,dy) as a way to shift the center of
        # the image.  Since that is applied to the galaxy, the units are arcsec (since the galaxy
        # profile itself doesn't know about the pixel scale).  Here, the offset applies to the
        # drawn image, which does know about the pixel scale, so the units of offset are pixels,
        # not arcsec.  Here, we apply an offset of up to half a pixel in each direction.
        dx = rng() - 0.5
        dy = rng() - 0.5

        # Draw the profile
        if k == 0:
            # Note that the offset argument may be a galsim.PositionD object or a tuple (dx,dy).
            im = final.drawImage(scale=pixel_scale, offset=(dx, dy))
            xsize, ysize = im.array.shape
        else:
            im = galsim.ImageF(xsize, ysize)
            final.drawImage(im, scale=pixel_scale, offset=(dx, dy))

        logger.debug('   Drew image')
        t3 = time.time()

        # Add a constant background level
        background = sky_level * pixel_scale**2
        im += background

        # Add Poisson noise.  This time, we don't give a sky_level, since we have already
        # added it to the image, so we don't want any more added.  The sky_level parameter
        # really defines how much _extra_ sky should be added above what is already in the image.
        im.addNoise(galsim.PoissonNoise(rng))

        logger.debug('   Added Poisson noise')
        t4 = time.time()

        # Store that into the list of all images
        all_images += [im]
        t5 = time.time()

        logger.debug('   Times: %f, %f, %f, %f', t2 - t1, t3 - t2, t4 - t3,
                     t5 - t4)
        logger.info('Image %d: size = %d x %d, total time = %f sec', k, xsize,
                    ysize, t5 - t1)

    logger.info('Done making images of galaxies')

    # Now write the image to disk.
    # We write the images to a fits data cube.
    galsim.fits.writeCube(all_images, cube_file_name)
    logger.info('Wrote image to fits data cube %r', cube_file_name)
コード例 #29
0
def main():
    pr = cProfile.Profile()
    pr.enable()

    rng = galsim.UniformDeviate(8675309)

    wcs = galsim.FitsWCS('../tests/des_data/DECam_00154912_12_header.fits')
    image = galsim.Image(xsize, ysize, wcs=wcs)
    bandpass = galsim.Bandpass('LSST_r.dat', wave_type='nm').thin(0.1)
    base_wavelength = bandpass.effective_wavelength

    angles = galsim.FRatioAngles(fratio, obscuration, rng)
    sensor = galsim.SiliconSensor(rng=rng, nrecalc=nrecalc)

    # Figure out the local_sidereal time from the observation location and time.
    lsst_lat = '-30:14:23.76'
    lsst_long = '-70:44:34.67'
    obs_time = '2012-11-24 03:37:25.023964'  # From the header of the wcs file

    obs = astropy.time.Time(obs_time, scale='utc', location=(lsst_long + 'd', lsst_lat + 'd'))
    local_sidereal_time = obs.sidereal_time('apparent').value

    # Convert the ones we need below to galsim Angles.
    local_sidereal_time *= galsim.hours
    lsst_lat = galsim.Angle.from_dms(lsst_lat)

    times = []
    mem = []
    phot = []

    t0 = time.clock()
    for iobj in range(nobjects):
        sys.stderr.write('.')
        psf = make_psf(rng)
        gal = make_gal(rng)
        obj = galsim.Convolve(psf, gal)

        sed = get_sed(rng)
        waves = galsim.WavelengthSampler(sed=sed, bandpass=bandpass, rng=rng)

        image_pos = get_pos(rng)
        sky_coord = wcs.toWorld(image_pos)
        bounds, offset = calculate_bounds(obj, image_pos, image)

        ha = local_sidereal_time - sky_coord.ra
        dcr = galsim.PhotonDCR(base_wavelength=base_wavelength,
                               obj_coord=sky_coord, HA=ha, latitude=lsst_lat)

        surface_ops = (waves, angles, dcr)

        obj.drawImage(method='phot', image=image[bounds], offset=offset,
                      rng=rng, sensor=sensor,
                      surface_ops=surface_ops)

        times.append(time.clock() - t0)
        mem.append(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
        phot.append(obj.flux)

    image.write('phot.fits')
    phot = np.cumsum(phot)
    make_plots(times, mem, phot)

    pr.disable()
    ps = pstats.Stats(pr).sort_stats('time')
    ps.print_stats(20)
コード例 #30
0
# accuracy we are shooting for.  (Pun intended.)
# For each pixel,
# uncertainty = sqrt(N_pix) * flux_photon = sqrt(N_tot * flux_pix / flux_tot) * flux_tot / N_tot
#             = sqrt(flux_pix) * sqrt(flux_tot) / sqrt(N_tot)
# This is largest for the brightest pixel.  So we use:
# N = flux_max * flux_tot / photon_shoot_accuracy^2
photon_shoot_accuracy = 2.e-3
# The number of decimal places at which to test the photon shooting
photon_decimal_test = 2
# for flux normalization tests
test_flux = 0.7
# for dx tests - avoid 1.0 because factors of dx^2 won't show up!
test_dx = 2.0
# Use a deterministic random number generator so we don't fail tests because of rare flukes
# in the random numbers.
glob_ud = galsim.UniformDeviate(12345)


# do_shoot utility taken from test_SBProfile.py
def do_shoot(prof, img, name):
    print 'Start do_shoot'
    # Test photon shooting for a particular profile (given as prof).
    # Since shooting implicitly convolves with the pixel, we need to compare it to
    # the given profile convolved with a pixel.
    pix = galsim.Pixel(xw=img.getScale())
    compar = galsim.Convolve(prof, pix)
    compar.draw(img)
    flux_max = img.array.max()
    print 'prof.getFlux = ', prof.getFlux()
    print 'compar.getFlux = ', compar.getFlux()
    print 'flux_max = ', flux_max