예제 #1
0
def test_noise():
    real_gal_dir = os.path.join('..', 'examples', 'data')
    real_gal_cat = 'real_galaxy_catalog_23.5_example.fits'
    real_cat = galsim.RealGalaxyCatalog(dir=real_gal_dir,
                                        file_name=real_gal_cat,
                                        preload=True)

    test_seed = 987654
    test_index = 17
    cf_1 = real_cat.getNoise(test_index, rng=galsim.BaseDeviate(test_seed))
    im_2, pix_scale_2, var_2 = real_cat.getNoiseProperties(test_index)
    # Check the variance:
    var_1 = cf_1.getVariance()
    assert var_1 == var_2, 'Inconsistent noise variance from getNoise and getNoiseProperties'
    # Check the image:
    ii = galsim.InterpolatedImage(im_2,
                                  normalization='sb',
                                  calculate_stepk=False,
                                  calculate_maxk=False,
                                  x_interpolant='linear')
    cf_2 = check_dep(galsim.correlatednoise._BaseCorrelatedNoise,
                     galsim.BaseDeviate(test_seed), ii, im_2.wcs)
    cf_2 = cf_2.withVariance(var_2)
    assert cf_1 == cf_2, 'Inconsistent noise properties from getNoise and getNoiseProperties'
예제 #2
0
def test_uncorr_padding():
    """Test for uncorrelated noise padding of InterpolatedImage."""
    import time
    t1 = time.time()

    # Set up some defaults: use weird image sizes / shapes and noise variances.
    decimal_precise=5
    decimal_coarse=2
    orig_nx = 147
    orig_ny = 174
    noise_var = 1.73
    big_nx = 519
    big_ny = 482
    orig_seed = 151241

    # first, make a noise image
    orig_img = galsim.ImageF(orig_nx, orig_ny, scale=1.)
    gd = galsim.GaussianDeviate(orig_seed, mean=0., sigma=np.sqrt(noise_var))
    orig_img.addNoise(galsim.DeviateNoise(gd))

    # make it into an InterpolatedImage with some zero-padding
    # (note that default is zero-padding, by factors of several)
    int_im = galsim.InterpolatedImage(orig_img)
    # draw into a larger image
    big_img = galsim.ImageF(big_nx, big_ny)
    int_im.draw(big_img, scale=1.)
    # check that variance is diluted by expected amount - should be exact, so check precisely!
    # Note that this only works if the big image has the same even/odd-ness in the two sizes.
    # Otherwise the center of the original image will fall between pixels in the big image.
    # Then the variance will be smoothed somewhat by the interpolant.
    big_var_expected = np.var(orig_img.array)*float(orig_nx*orig_ny)/(big_nx*big_ny)
    np.testing.assert_almost_equal(
        np.var(big_img.array), big_var_expected, decimal=decimal_precise,
        err_msg='Variance not diluted by expected amount when zero-padding')

    # make it into an InterpolatedImage with noise-padding
    int_im = galsim.InterpolatedImage(orig_img, noise_pad=noise_var,
                                      noise_pad_size=max(big_nx,big_ny),
                                      rng = galsim.GaussianDeviate(orig_seed))
    # draw into a larger image
    big_img = galsim.ImageF(big_nx, big_ny)
    int_im.draw(big_img, scale=1.)
    # check that variance is same as original - here, we cannot be too precise because the padded
    # region is not huge and the comparison will be, well, noisy.
    np.testing.assert_almost_equal(
        np.var(big_img.array), noise_var, decimal=decimal_coarse,
        err_msg='Variance not correct after padding image with noise')

    # check that if we pass in a RNG, it is actually used to pad with the same noise field
    # basically, redo all of the above steps and draw into a new image, make sure it's the same as
    # previous.
    int_im = galsim.InterpolatedImage(orig_img, noise_pad=noise_var,
                                      noise_pad_size=max(big_nx,big_ny),
                                      rng = galsim.GaussianDeviate(orig_seed))
    big_img_2 = galsim.ImageF(big_nx, big_ny)
    int_im.draw(big_img_2, scale=1.)
    np.testing.assert_array_almost_equal(
        big_img_2.array, big_img.array, decimal=decimal_precise,
        err_msg='Cannot reproduce noise-padded image with same choice of seed')

    # Finally check inputs: what if we give it an input variance that is neg?  A list?
    try:
        np.testing.assert_raises(ValueError,galsim.InterpolatedImage,orig_img,noise_pad=-1.)
    except ImportError:
        print 'The assert_raises tests require nose'

    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
예제 #3
0
def test_HSC_huygensPSF():
    fn = os.path.join(directory, "testdata", "HSC_huygensPSF.txt")
    with open(fn) as f:
        Zarr = np.loadtxt(f, skiprows=21)
    Zarr = Zarr[::-1]  # Need to invert, probably just a Zemax convention...

    telescope = batoid.Optic.fromYaml("HSC_no_obsc.yaml")

    thx = np.deg2rad(0.0)
    thy = np.deg2rad(0.75)
    wavelength = 750e-9
    nx = 512
    dx = 0.25e-6
    print("computing Huygens PSF")
    hPSF = batoid.huygensPSF(telescope,
                             thx,
                             thy,
                             wavelength,
                             nx=nx,
                             projection='zemax',
                             dx=dx,
                             nxOut=256)
    print("Done")

    # Normalize images
    Zarr /= np.sum(Zarr)
    hPSF.array /= np.sum(hPSF.array)
    Zmax = np.max(Zarr)
    Zarr /= Zmax
    hPSF.array /= Zmax

    # Use GalSim InterpolateImage to align and subtract
    ii = galsim.InterpolatedImage(galsim.Image(hPSF.array, scale=0.25),
                                  normalization='sb')

    # Now setup an optimizer to fit for x/y shift
    def resid(params):
        p = params.valuesdict()
        model = ii.shift(p['dx'], p['dy']) * np.exp(p['dlogflux'])
        img = model.drawImage(method='sb', scale=0.25, nx=256, ny=256)
        r = (img.array - Zarr).ravel()
        return r

    params = lmfit.Parameters()
    params.add('dx', value=0.0)
    params.add('dy', value=0.0)
    params.add('dlogflux', value=0.0)
    print("Aligning")
    opt = lmfit.minimize(resid, params)
    print("Done")

    p = opt.params.valuesdict()
    model = ii.shift(p['dx'], p['dy']) * np.exp(p['dlogflux'])
    optImg = model.drawImage(method='sb', scale=0.25, nx=256, ny=256)

    np.testing.assert_allclose(Zarr, optImg.array, rtol=0, atol=3e-2)
    Zmom = galsim.hsm.FindAdaptiveMom(galsim.Image(Zarr, scale=0.25))
    bmom = galsim.hsm.FindAdaptiveMom(optImg)
    np.testing.assert_allclose(Zmom.observed_shape.g1,
                               bmom.observed_shape.g1,
                               rtol=0,
                               atol=0.01)
    np.testing.assert_allclose(Zmom.observed_shape.g2,
                               bmom.observed_shape.g2,
                               rtol=0,
                               atol=1e-7)
    np.testing.assert_allclose(Zmom.moments_sigma,
                               bmom.moments_sigma,
                               rtol=0,
                               atol=0.1)
예제 #4
0
def make_iilist(obs, **kw):
    """
    make a multi-band interpolated image list, as well as the maximum of
    getGoodImageSize from each psf, and corresponding dk

    parameters
    ----------
    obs: real space obs list
        Either Observation, ObsList or MultiBandObsList
    interp: string, optional
        The x interpolant, default 'lanczos15'
    """
    import galsim

    interp = kw.get('interp', DEFAULT_XINTERP)
    mb_obs = get_mb_obs(obs)

    dimlist = []
    dklist = []

    mb_iilist = []
    for band, obs_list in enumerate(mb_obs):
        iilist = []
        for obs in obs_list:

            jac = obs.jacobian
            gsimage = galsim.Image(
                obs.image,
                wcs=jac.get_galsim_wcs(),
            )
            ii = galsim.InterpolatedImage(
                gsimage,
                x_interpolant=interp,
            )
            if hasattr(ii, 'SBProfile'):
                gsvers = 1
            else:
                gsvers = 2

            if obs.has_psf():
                psf_weight = obs.psf.weight

                # normalized
                psf_gsimage = galsim.Image(
                    obs.psf.image / obs.psf.image.sum(),
                    wcs=obs.psf.jacobian.get_galsim_wcs(),
                )

                psf_ii = galsim.InterpolatedImage(
                    psf_gsimage,
                    x_interpolant=interp,
                )
                # make dimensions odd
                if gsvers == 1:
                    dim = 1 + psf_ii.SBProfile.getGoodImageSize(
                        psf_ii.nyquistScale(), )
                else:
                    dim = 1 + psf_ii.getGoodImageSize(psf_ii.nyquist_scale)

            else:
                # make dimensions odd
                if hasattr(ii, 'SBProfile'):
                    dim = 1 + ii.SBProfile.getGoodImageSize(
                        ii.nyquistScale(), )
                else:
                    dim = 1 + ii.getGoodImageSize(ii.nyquist_scale, )
                psf_ii = None
                psf_weight = None

            if gsvers == 1:
                dk = ii.stepK()
            else:
                dk = ii.stepk

            dimlist.append(dim)
            dklist.append(dk)

            iilist.append({
                'wcs': jac.get_galsim_wcs(),
                'scale': jac.scale,
                'ii': ii,
                'weight': obs.weight,
                'meta': obs.meta,
                'psf_ii': psf_ii,
                'psf_weight': psf_weight,
                'psf_meta': obs.psf.meta,
                'realspace_gsimage': gsimage,
            })

        mb_iilist.append(iilist)

    dimarr = numpy.array(dimlist)
    dkarr = numpy.array(dklist)

    imax = dimarr.argmax()

    dim = dimarr[imax]
    dk = dkarr[imax]

    return mb_iilist, dim, dk
예제 #5
0
    def mkFakeGalsimGalaxies(self, fakeCat, band, photoCalib, pixelScale, psf,
                             image):
        """Make images of fake galaxies using GalSim.

        Parameters
        ----------
        band : `str`
        pixelScale : `float`
        psf : `lsst.meas.extensions.psfex.psfexPsf.PsfexPsf`
                    The PSF information to use to make the PSF images
        fakeCat : `pandas.core.frame.DataFrame`
                    The catalog of fake sources to be input
        photoCalib : `lsst.afw.image.photoCalib.PhotoCalib`
                    Photometric calibration to be used to calibrate the fake sources

        Yields
        -------
        galImages : `generator`
                    A generator of tuples of `lsst.afw.image.exposure.exposure.ExposureF` and
                    `lsst.geom.Point2D` of their locations.

        Notes
        -----

        Fake galaxies are made by combining two sersic profiles, one for the bulge and one for the disk. Each
        component has an individual sersic index (n), a, b and position angle (PA). The combined profile is
        then convolved with the PSF at the specified x, y position on the image.

        The names of the columns in the ``fakeCat`` are configurable and are the column names from the
        University of Washington simulations database as default. For more information see the doc strings
        attached to the config options.

        See mkFakeStars doc string for an explanation of calibration to instrumental flux.
        """

        self.log.info("Making %d fake galaxy images" % len(fakeCat))

        for (index, row) in fakeCat.iterrows():
            xy = geom.Point2D(row["x"], row["y"])

            # We put these two PSF calculations within this same try block so that we catch cases
            # where the object's position is outside of the image.
            try:
                correctedFlux = psf.computeApertureFlux(
                    self.config.calibFluxRadius, xy)
                psfKernel = psf.computeKernelImage(xy).getArray()
                psfKernel /= correctedFlux

            except InvalidParameterError:
                self.log.info("Galaxy at %0.4f, %0.4f outside of image" %
                              (row["x"], row["y"]))
                continue

            try:
                flux = photoCalib.magnitudeToInstFlux(
                    row[self.config.magVar % band], xy)
            except LogicError:
                flux = 0

            bulge = galsim.Sersic(row[self.config.nBulge],
                                  half_light_radius=row[self.config.bulgeHLR])
            axisRatioBulge = row[self.config.bBulge] / row[self.config.aBulge]
            bulge = bulge.shear(q=axisRatioBulge,
                                beta=((90 - row[self.config.paBulge]) *
                                      galsim.degrees))

            disk = galsim.Sersic(row[self.config.nDisk],
                                 half_light_radius=row[self.config.diskHLR])
            axisRatioDisk = row[self.config.bDisk] / row[self.config.aDisk]
            disk = disk.shear(q=axisRatioDisk,
                              beta=((90 - row[self.config.paDisk]) *
                                    galsim.degrees))

            gal = disk + bulge
            gal = gal.withFlux(flux)

            psfIm = galsim.InterpolatedImage(galsim.Image(psfKernel),
                                             scale=pixelScale)
            gal = galsim.Convolve([gal, psfIm])
            try:
                galIm = gal.drawImage(scale=pixelScale,
                                      method="real_space").array
            except (galsim.errors.GalSimFFTSizeError, MemoryError):
                continue

            yield (afwImage.ImageF(galIm), xy)
예제 #6
0
def test_IPC_basic():
    import time
    t1 = time.time()

    # Make an image with non-trivially interesting scale.
    g = galsim.Gaussian(sigma=3.7)
    im = g.drawImage(scale=0.25)
    im_save = im.copy()

    # Check for no IPC
    ipc_kernel = galsim.Image(3, 3)
    ipc_kernel.setValue(2, 2, 1.0)
    im_new = im.copy()

    im_new.applyIPC(IPC_kernel=ipc_kernel, edge_treatment='extend')
    np.testing.assert_array_equal(
        im_new.array,
        im.array,
        err_msg="Image is altered for no IPC with edge_treatment = 'extend'")

    im_new.applyIPC(IPC_kernel=ipc_kernel, edge_treatment='wrap')
    np.testing.assert_array_equal(
        im_new.array,
        im.array,
        err_msg="Image is altered for no IPC with edge_treatment = 'wrap'")

    im_new.applyIPC(IPC_kernel=ipc_kernel, edge_treatment='crop')
    np.testing.assert_array_equal(
        im_new.array,
        im.array,
        err_msg="Image is altered for no IPC with edge_treatment = 'crop'")

    # Test with a scalar fill_value
    fill_value = np.pi  # a non-trivial one
    im_new.applyIPC(IPC_kernel=ipc_kernel,
                    edge_treatment='crop',
                    fill_value=fill_value)

    #Input arrays and output arrays will differ at the edges for this option.
    np.testing.assert_array_equal(
        im_new.array[1:-1, 1:-1],
        im.array[1:-1, 1:-1],
        err_msg=
        "Image is altered for no IPC with edge_treatment = 'crop' and with a fill_value"
    )
    # Check if the edges are filled with fill_value
    np.testing.assert_array_equal(
        im_new.array[0, :],
        fill_value,
        err_msg="Top edge is not filled with the correct value by applyIPC")
    np.testing.assert_array_equal(
        im_new.array[-1, :],
        fill_value,
        err_msg="Bottom edge is not filled with the correct value by applyIPC")
    np.testing.assert_array_equal(
        im_new.array[:, 0],
        fill_value,
        err_msg="Left edge is not filled with the correct value by applyIPC")
    np.testing.assert_array_equal(
        im_new.array[:, -1],
        fill_value,
        err_msg="Left edge is not filled with the correct value by applyIPC")

    # Testing for flux conservation
    np.random.seed(1234)
    ipc_kernel = galsim.Image(abs(np.random.randn(3, 3)))  # a random kernel
    ipc_kernel /= ipc_kernel.array.sum(
    )  # but make it normalized so we do not get warnings
    im_new = im.copy()
    # Set edges to zero since flux is not conserved at the edges otherwise
    im_new.array[0, :] = 0.0
    im_new.array[-1, :] = 0.0
    im_new.array[:, 0] = 0.0
    im_new.array[:, -1] = 0.0
    im_new.applyIPC(IPC_kernel=ipc_kernel,
                    edge_treatment='extend',
                    kernel_normalization=True)
    np.testing.assert_almost_equal(
        im_new.array.sum(),
        im.array[1:-1, 1:-1].sum(),
        4,
        err_msg=
        "Normalized IPC kernel does not conserve the total flux for 'extend' option."
    )

    im_new = im.copy()
    im_new.applyIPC(IPC_kernel=ipc_kernel,
                    edge_treatment='wrap',
                    kernel_normalization=True)
    np.testing.assert_almost_equal(
        im_new.array.sum(),
        im.array.sum(),
        4,
        err_msg=
        "Normalized IPC kernel does not conserve the total flux for 'wrap' option."
    )

    # Checking directionality
    ipc_kernel = galsim.Image(3, 3)
    ipc_kernel.setValue(2, 2, 0.875)
    ipc_kernel.setValue(2, 3, 0.125)
    # This kernel should correspond to each pixel getting contribution from the pixel above it.
    im1 = im.copy()
    im1.applyIPC(IPC_kernel=ipc_kernel,
                 edge_treatment='crop',
                 kernel_normalization=False)
    np.testing.assert_array_almost_equal(
        0.875 * im.array[1:-1, 1:-1] + 0.125 * im.array[2:, 1:-1],
        im1.array[1:-1, 1:-1],
        7,
        err_msg="Difference in directionality for up kernel in applyIPC")
    # Checking for one pixel in the central bulk
    np.testing.assert_almost_equal(
        im1(2, 2),
        0.875 * im(2, 2) + 0.125 * im(2, 3),
        7,
        err_msg="Direction is not as intended for up kernel in applyIPC")

    ipc_kernel = galsim.Image(3, 3)
    ipc_kernel.setValue(2, 2, 0.875)
    ipc_kernel.setValue(1, 2, 0.125)
    # This kernel should correspond to each pixel getting contribution from the pixel to its left.
    im1 = im.copy()
    im1.applyIPC(IPC_kernel=ipc_kernel,
                 edge_treatment='crop',
                 kernel_normalization=False)
    np.testing.assert_array_almost_equal(
        im1.array[1:-1, 1:-1],
        im1.array[1:-1, 1:-1],
        7,
        err_msg="Difference in directionality for left kernel in applyIPC")
    # Checking for one pixel in the central bulk
    np.testing.assert_almost_equal(
        im1(2, 3),
        0.875 * im(2, 3) + 0.125 * im(2, 2),
        7,
        err_msg="Direction is not as intended for left kernel in applyIPC")

    # Check using GalSim's native Convolve routine for GSObjects for a realisitic kernel
    ipc_kernel = galsim.Image(
        np.array([[0.01, 0.1, 0.01], [0.1, 1.0, 0.1], [0.01, 0.1, 0.01]]))
    ipc_kernel /= ipc_kernel.array.sum()
    ipc_kernel_int = galsim.InterpolatedImage(ipc_kernel,
                                              x_interpolant='nearest',
                                              scale=im.scale)
    im1 = im.copy()
    im1.applyIPC(IPC_kernel=ipc_kernel,
                 edge_treatment='crop',
                 kernel_normalization=False)
    im2 = im.copy()
    im2_int = galsim.InterpolatedImage(im2, x_interpolant='nearest')
    ipc_kernel_int = galsim.InterpolatedImage(ipc_kernel,
                                              x_interpolant='nearest',
                                              scale=im.scale)
    im_int = galsim.Convolve(ipc_kernel_int, im2_int, real_space=False)
    im_int.drawImage(im2, method='no_pixel', scale=im.scale)
    np.testing.assert_array_almost_equal(
        im1.array,
        im2.array,
        6,
        err_msg="Output of applyIPC does not match the output from Convolve")

    try:
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=RuntimeWarning)
            from scipy import signal
        print "SciPy found installed. Checking IPC kernel convolution against SciPy's `convolve2d`"
        # SciPy is going to emit a warning that we don't want to worry about, so let's deliberately
        # ignore it by going into a `catch_warnings` context.
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")

            # Generate an arbitrary kernel
            np.random.seed(2345)
            ipc_kernel = galsim.Image(abs(np.random.randn(3, 3)))
            ipc_kernel /= ipc_kernel.array.sum()
            # Convolution requires the kernel to be flipped up-down and left-right.
            im_new = im.copy()
            im_new.applyIPC(IPC_kernel=ipc_kernel,
                            edge_treatment='extend',
                            kernel_normalization=False)
            np.testing.assert_array_almost_equal(
                im_new.array,
                signal.convolve2d(im.array,
                                  np.flipud(np.fliplr(ipc_kernel.array)),
                                  mode='same',
                                  boundary='fill'),
                7,
                err_msg=
                "Image differs from SciPy's result using `mode='same'` and "
                "`boundary='fill`.")

            im_new = im.copy()
            im_new.applyIPC(IPC_kernel=ipc_kernel,
                            edge_treatment='crop',
                            kernel_normalization=False)
            np.testing.assert_array_almost_equal(
                im_new.array[1:-1, 1:-1],
                signal.convolve2d(im.array,
                                  np.fliplr(np.flipud(ipc_kernel.array)),
                                  mode='valid',
                                  boundary='fill'),
                7,
                err_msg=
                "Image differs from SciPy's result using `mode=valid'` and "
                "`boundary='fill'`.")

            im_new = im.copy()
            im_new.applyIPC(IPC_kernel=ipc_kernel,
                            edge_treatment='wrap',
                            kernel_normalization=False)
            np.testing.assert_array_almost_equal(
                im_new.array,
                signal.convolve2d(im.array,
                                  np.fliplr(np.flipud(ipc_kernel.array)),
                                  mode='same',
                                  boundary='wrap'),
                7,
                err_msg=
                "Image differs from SciPy's result using `mode=same'` and "
                "boundary='wrap'`.")

    except ImportError:
        # Skip without any warning if SciPy is not installed
        pass

    t2 = time.time()
    print 'time for %s = %.2f' % (funcname(), t2 - t1)
예제 #7
0
def test_HSC_huygensPSF():
    fn = os.path.join(directory, "testdata", "HSC_huygensPSF.txt")
    with open(fn) as f:
        Zarr = np.loadtxt(f, skiprows=21)
    Zarr = Zarr[::-1]  # Need to invert, probably just a Zemax convention...

    telescope = batoid.Optic.fromYaml("HSC_no_obsc.yaml")

    thx = np.deg2rad(0.0)
    thy = np.deg2rad(0.75)
    wavelength = 750e-9
    nx = 128
    dx = 0.25e-6
    print("computing Huygens PSF")
    hPSF = batoid.huygensPSF(
        telescope,
        thx, thy, projection='zemax',
        wavelength=wavelength,
        nx=nx, dx=dx, nxOut=256,
        reference='mean'
    )
    print("Done")

    # Normalize images
    Zarr /= np.sum(Zarr)
    hPSF.array /= np.sum(hPSF.array)
    Zmax = np.max(Zarr)
    Zarr /= Zmax
    hPSF.array /= Zmax

    # Use GalSim InterpolateImage to align and subtract
    ii = galsim.InterpolatedImage(
        galsim.Image(hPSF.array, scale=0.25),
        normalization='sb'
    )

    # Now setup an optimizer to fit for x/y shift
    def modelimg(params, ii=ii):
        dx, dy, dlogflux = params
        model = ii.shift(dx, dy)*np.exp(dlogflux)
        return model.drawImage(method='sb', scale=0.25, nx=256, ny=256)

    def resid(params, ii=ii, Zarr=Zarr):
        img = modelimg(params, ii=ii)
        r = (img.array - Zarr).ravel()
        return r

    kwargs = dict(ii=ii, Zarr=Zarr)
    print("Aligning")
    result = least_squares(resid, np.array([0.0, 0.0, 0.0]), kwargs=kwargs)
    optImg = modelimg(result.x, ii=ii)
    print("Done")

    np.testing.assert_allclose(Zarr, optImg.array, rtol=0, atol=3e-2)
    Zmom = galsim.hsm.FindAdaptiveMom(galsim.Image(Zarr, scale=0.25))
    bmom = galsim.hsm.FindAdaptiveMom(optImg)
    np.testing.assert_allclose(
        Zmom.observed_shape.g1,
        bmom.observed_shape.g1,
        rtol=0, atol=0.01
    )
    np.testing.assert_allclose(
        Zmom.observed_shape.g2,
        bmom.observed_shape.g2,
        rtol=0, atol=1e-7
    )
    np.testing.assert_allclose(
        Zmom.moments_sigma,
        bmom.moments_sigma,
        rtol=0, atol=0.1
    )
예제 #8
0
파일: test.py 프로젝트: rearmstr/desc_bfd
        if not exposure.getBBox().contains(box):
            box_size = min(w, h)
            if box_size % 2 == 1:
                box_size += 1
            box = afwGeom.Box2I(
                afwGeom.Point2I(center.getX() - box_size / 2,
                                center.getY() - box_size / 2),
                afwGeom.Extent2I(box_size, box_size))

        if not exposure.getBBox().contains(box):
            continue
        image = exposure.image[box].array
        # PSF image must also be the same size
        psf_image_base = galsim.ImageF(
            exposure.getPsf().computeKernelImage(center).array)
        psf_image_interp = galsim.InterpolatedImage(psf_image_base, scale=1)
        psf_image = galsim.ImageF(box_size, box_size)
        psf_image_interp.drawImage(psf_image, method='no_pixel')

        ra = measRecord.get('coord_ra').asArcseconds()
        dec = measRecord.get('coord_dec').asArcseconds()

        local_lin_wcs = exposure.getWcs().linearizePixelToSky(
            center, geom.arcseconds)
        jacobian = local_lin_wcs.getLinear().getMatrix()
        jacobian2 = jacobian

        xref = ra - (jacobian[0, 0] *
                     (measRecord.getX() - box.getMinX()) + jacobian[0, 1] *
                     (measRecord.getY() - box.getMinY()))
        yref = dec - (jacobian[1, 0] *
예제 #9
0
def test_OpticalPSF_pupil_plane():
    """Test the ability to generate a PSF using an image of the pupil plane.
    """
    # Test case: lam/diam=0.12, obscuration=0.18, 4 struts of the default width and with rotation
    # from the vertical of -15 degrees.  There are two versions of these tests at different
    # oversampling levels.
    #
    # To (re-)generate the pupil plane images for this test, simply delete
    # tests/Optics_comparison_images/sample_pupil_rolled.fits and
    # tests/Optics_comparison_images/sample_pupil_rolled_oversample.fits.gz,
    # and then rerun this function.  Note that these images are also used in test_ne(), so there
    # may be some racing if this script is tested in parallel before the fits files are regenerated.

    # First test: should get excellent agreement between that particular OpticalPSF with specified
    # options and one from loading the pupil plane image.  Note that this won't work if you change
    # the optical PSF parameters, unless you also regenerate the test image.
    lam_over_diam = 0.12
    obscuration = 0.18
    nstruts = 4
    strut_angle = -15.*galsim.degrees
    scale = 0.055
    ref_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, nstruts=nstruts,
                                oversampling=pp_oversampling, strut_angle=strut_angle,
                                pad_factor=pp_pad_factor)
    if os.path.isfile(os.path.join(imgdir, pp_file)):
        im = galsim.fits.read(os.path.join(imgdir, pp_file))
    else:
        import warnings
        warnings.warn("Could not find file {0}, so generating it from scratch.  This should only "
                      "happen if you intentionally deleted the file in order to regenerate it!"
                      .format(pp_file))
        im = galsim.Image(ref_psf._psf.aper.illuminated.astype(float))
        im.scale = ref_psf._psf.aper.pupil_plane_scale
        print('pupil_plane image has scale = ',im.scale)
        im.write(os.path.join(imgdir, pp_file))
    pp_scale = im.scale
    print('pupil_plane image has scale = ',pp_scale)

    # For most of the tests, we remove this scale, since for achromatic tests, you don't really
    # need it, and it is invalid to give lam_over_diam (rather than lam, diam separately) when
    # there is a specific scale for the pupil plane image.  But see the last test below where
    # we do use lam, diam separately with the input image.
    im.wcs = None
    # This implies that the lam_over_diam value is valid.
    test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration,
                                 oversampling=pp_oversampling, pupil_plane_im=im,
                                 pad_factor=pp_pad_factor)
    im_ref_psf = ref_psf.drawImage(scale=scale)
    im_test_psf = galsim.ImageD(im_ref_psf.array.shape[0], im_ref_psf.array.shape[1])
    im_test_psf = test_psf.drawImage(image=im_test_psf, scale=scale)

    if pp_test_type == 'image':
        np.testing.assert_array_almost_equal(
            im_test_psf.array, im_ref_psf.array, decimal=pp_decimal,
            err_msg="Inconsistent OpticalPSF image for basic model after loading pupil plane.")
    else:
        test_moments = im_test_psf.FindAdaptiveMom()
        ref_moments = im_ref_psf.FindAdaptiveMom()
        np.testing.assert_almost_equal(
            test_moments.moments_sigma, ref_moments.moments_sigma, decimal=pp_decimal,
            err_msg="Inconsistent OpticalPSF image for basic model after loading pupil plane.")

    if do_slow_tests:
        do_pickle(test_psf, lambda x: x.drawImage(nx=20, ny=20, scale=0.07, method='no_pixel'))
        do_pickle(test_psf)

    # Make a smaller pupil plane image to test the pickling of this, even without slow tests.
    factor = 4 if not do_slow_tests else 16
    with assert_warns(galsim.GalSimWarning):
        alt_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration,
                                    oversampling=1., pupil_plane_im=im.bin(factor,factor),
                                    pad_factor=1.)
        do_pickle(alt_psf)

    assert_raises(ValueError, galsim.OpticalPSF, lam_over_diam, pupil_plane_im='pp_file')
    assert_raises(ValueError, galsim.OpticalPSF, lam_over_diam, pupil_plane_im=im,
                  pupil_plane_scale=pp_scale)
    assert_raises(ValueError, galsim.OpticalPSF, lam_over_diam,
                  pupil_plane_im=im.view(scale=pp_scale))
    # These aren't raised until the image is actually used
    with assert_raises(ValueError):
        # not square
        op = galsim.OpticalPSF(lam_over_diam, pupil_plane_im=galsim.Image(im.array[:-2,:]))
        op.drawImage()
    with assert_raises(ValueError):
        # not even sides
        op = galsim.OpticalPSF(lam_over_diam, pupil_plane_im=galsim.Image(im.array[:-1,:-1]))
        op.drawImage()

    # It is supposed to be able to figure this out even if we *don't* tell it the pad factor. So
    # make sure that it still works even if we don't tell it that value.
    test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, pupil_plane_im=im,
                                 oversampling=pp_oversampling)
    im_test_psf = galsim.ImageD(im_ref_psf.array.shape[0], im_ref_psf.array.shape[1])
    im_test_psf = test_psf.drawImage(image=im_test_psf, scale=scale)

    if pp_test_type == 'image':
        np.testing.assert_array_almost_equal(
            im_test_psf.array, im_ref_psf.array, decimal=pp_decimal,
            err_msg="Inconsistent OpticalPSF image for basic model after loading pupil plane without "
            "specifying parameters.")
    else:
        test_moments = im_test_psf.FindAdaptiveMom()
        ref_moments = im_ref_psf.FindAdaptiveMom()
        np.testing.assert_almost_equal(
            test_moments.moments_sigma, ref_moments.moments_sigma, decimal=pp_decimal,
            err_msg="Inconsistent OpticalPSF image for basic model after loading pupil plane without "
            "specifying parameters.")

    # Next test (less trivial): Rotate the struts by +27 degrees, and check that agreement is
    # good. This is making sure that the linear interpolation that is done when rotating does not
    # result in significant loss of accuracy.
    rot_angle = 27.*galsim.degrees
    ref_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, nstruts=nstruts,
                                strut_angle=strut_angle+rot_angle, oversampling=pp_oversampling,
                                pad_factor=pp_pad_factor)
    test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, pupil_plane_im=im,
                                 pupil_angle=rot_angle, oversampling=pp_oversampling,
                                 pad_factor=pp_pad_factor)
    im_ref_psf = ref_psf.drawImage(scale=scale)
    im_test_psf = galsim.ImageD(im_ref_psf.array.shape[0], im_ref_psf.array.shape[1])
    im_test_psf = test_psf.drawImage(image=im_test_psf, scale=scale)
    # We are slightly less stringent here since it should not be exact.
    if pp_test_type == 'image':
        np.testing.assert_array_almost_equal(
            im_test_psf.array, im_ref_psf.array, decimal=pp_decimal-1,
            err_msg="Inconsistent OpticalPSF image for rotated model after loading pupil plane.")
    else:
        test_moments = im_test_psf.FindAdaptiveMom()
        ref_moments = im_ref_psf.FindAdaptiveMom()
        np.testing.assert_almost_equal(
            test_moments.moments_sigma, ref_moments.moments_sigma, decimal=pp_decimal-1,
            err_msg="Inconsistent OpticalPSF image for rotated model after loading pupil plane.")

    # Now include aberrations.  Here we are testing the ability to figure out the pupil plane extent
    # and sampling appropriately.  Those get fed into the routine for making the aberrations.
    defocus = -0.03
    coma1 = 0.03
    spher = -0.02
    ref_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, nstruts=nstruts,
                                strut_angle=strut_angle, defocus=defocus, coma1=coma1, spher=spher,
                                oversampling=pp_oversampling, pad_factor=pp_pad_factor)
    test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, pupil_plane_im=im,
                                 defocus=defocus, coma1=coma1, spher=spher,
                                 oversampling=pp_oversampling, pad_factor=pp_pad_factor)
    im_ref_psf = ref_psf.drawImage(scale=scale)
    im_test_psf = galsim.ImageD(im_ref_psf.array.shape[0], im_ref_psf.array.shape[1])
    im_test_psf = test_psf.drawImage(image=im_test_psf, scale=scale)
    if pp_test_type == 'image':
        np.testing.assert_array_almost_equal(
            im_test_psf.array, im_ref_psf.array, decimal=pp_decimal,
            err_msg="Inconsistent OpticalPSF image for aberrated model after loading pupil plane.")
    else:
        test_moments = im_test_psf.FindAdaptiveMom()
        ref_moments = im_ref_psf.FindAdaptiveMom()
        np.testing.assert_almost_equal(
            test_moments.moments_sigma, ref_moments.moments_sigma, decimal=pp_decimal,
            err_msg="Inconsistent OpticalPSF image for aberrated model after loading pupil plane.")

    # Test for preservation of symmetries: the result should be the same if the pupil plane is
    # rotated by integer multiples of 2pi/(nstruts).
    ref_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, nstruts=nstruts,
                                strut_angle=strut_angle, oversampling=pp_oversampling,
                                pad_factor=pp_pad_factor)
    im_ref_psf = ref_psf.drawImage(scale=scale)
    for ind in range(1,nstruts):
        rot_angle = ind*2.*np.pi/nstruts
        test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, pupil_plane_im=im,
                                     pupil_angle=rot_angle*galsim.radians,
                                     oversampling=pp_oversampling, pad_factor=pp_pad_factor)
        im_test_psf = galsim.ImageD(im_ref_psf.array.shape[0], im_ref_psf.array.shape[1])
        im_test_psf = test_psf.drawImage(image=im_test_psf, scale=scale)
        if pp_test_type == 'image':
            np.testing.assert_array_almost_equal(
                im_test_psf.array, im_ref_psf.array, decimal=pp_decimal,
                err_msg="Inconsistent OpticalPSF image after rotating pupil plane by invariant "
                "angle.")
        else:
            test_moments = im_test_psf.FindAdaptiveMom()
            ref_moments = im_test_psf.FindAdaptiveMom()
            np.testing.assert_almost_equal(
                test_moments.moments_sigma, ref_moments.moments_sigma, decimal=pp_decimal,
                err_msg="Inconsistent OpticalPSF image after rotating pupil plane by invariant "
                "angle.")

    # Test that if we rotate pupil plane with no aberrations, that's equivalent to rotating the PSF
    # itself.  Use rotation angle of 90 degrees so numerical issues due to the interpolation should
    # be minimal.
    rot_angle = 90.*galsim.degrees
    psf_1 = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, pupil_plane_im=im,
                              oversampling=pp_oversampling, pad_factor=pp_pad_factor)
    rot_psf_1 = psf_1.rotate(rot_angle)
    psf_2 = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, pupil_plane_im=im,
                              pupil_angle=rot_angle, oversampling=pp_oversampling,
                              pad_factor=pp_pad_factor)
    im_1 = psf_1.drawImage(scale=scale)
    im_2 = galsim.ImageD(im_1.array.shape[0], im_1.array.shape[1])
    im_2 = psf_2.drawImage(image=im_2, scale=scale)
    if pp_test_type == 'image':
        np.testing.assert_array_almost_equal(
            im_1.array, im_2.array, decimal=pp_decimal,
            err_msg="Inconsistent OpticalPSF image after rotating pupil plane vs. rotating PSF.")
    else:
        test_moments = im_1.FindAdaptiveMom()
        ref_moments = im_2.FindAdaptiveMom()
        np.testing.assert_almost_equal(
            test_moments.moments_sigma, ref_moments.moments_sigma, decimal=pp_decimal,
            err_msg="Inconsistent OpticalPSF image after rotating pupil plane vs. rotating PSF.")

    # Supply the pupil plane at higher resolution, and make sure that the routine figures out the
    # sampling and gets the right image scale etc.
    rescale_fac = 0.77
    ref_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, nstruts=nstruts,
                                strut_angle=strut_angle, oversampling=pp_oversampling,
                                pad_factor=pp_pad_factor/rescale_fac)
    # Make higher resolution pupil plane image via interpolation
    int_im = galsim.InterpolatedImage(galsim.Image(im, scale=1.0, dtype=np.float32),
                                      calculate_maxk=False, calculate_stepk=False,
                                      x_interpolant='linear')
    new_im = int_im.drawImage(scale=rescale_fac, method='no_pixel')
    new_im.wcs = None  # Let OpticalPSF figure out the scale automatically.
    test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration,
                                 pupil_plane_im=new_im, oversampling=pp_oversampling)
    im_ref_psf = ref_psf.drawImage(scale=scale)
    im_test_psf = galsim.ImageD(im_ref_psf.array.shape[0], im_ref_psf.array.shape[1])
    im_test_psf = test_psf.drawImage(image=im_test_psf, scale=scale)
    test_moments = im_test_psf.FindAdaptiveMom()
    ref_moments = im_ref_psf.FindAdaptiveMom()
    if pp_test_type == 'image':
        np.testing.assert_almost_equal(
            test_moments.moments_sigma/ref_moments.moments_sigma-1., 0, decimal=2,
            err_msg="Inconsistent OpticalPSF image for basic model after loading high-res pupil plane.")
    else:
        np.testing.assert_almost_equal(
            test_moments.moments_sigma/ref_moments.moments_sigma-1., 0, decimal=1,
            err_msg="Inconsistent OpticalPSF image for basic model after loading high-res pupil plane.")

    # Now supply the pupil plane at the original resolution, but remove some of the padding.  We
    # want it to properly recognize that it needs more padding, and include it.
    remove_pad = -23
    sub_im = im[im.bounds.withBorder(remove_pad)]
    test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration,
                                 pupil_plane_im=sub_im, oversampling=pp_oversampling,
                                 pad_factor=pp_pad_factor)
    im_test_psf = galsim.ImageD(im_ref_psf.array.shape[0], im_ref_psf.array.shape[1])
    im_test_psf = test_psf.drawImage(image=im_test_psf, scale=scale)
    test_moments = im_test_psf.FindAdaptiveMom()
    ref_moments = im_ref_psf.FindAdaptiveMom()
    np.testing.assert_almost_equal(
        test_moments.moments_sigma/ref_moments.moments_sigma-1., 0, decimal=pp_decimal-3,
        err_msg="Inconsistent OpticalPSF image for basic model after loading less padded pupil plane.")

    # Now supply the pupil plane at the original resolution, with extra padding.
    new_pad = 76
    big_im = galsim.Image(im.bounds.withBorder(new_pad))
    big_im[im.bounds] = im
    test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration,
                                 pupil_plane_im=big_im, oversampling=pp_oversampling,
                                 pad_factor=pp_pad_factor)
    im_test_psf = galsim.ImageD(im_ref_psf.array.shape[0], im_ref_psf.array.shape[1])
    im_test_psf = test_psf.drawImage(image=im_test_psf, scale=scale)
    test_moments = im_test_psf.FindAdaptiveMom()
    ref_moments = im_ref_psf.FindAdaptiveMom()
    np.testing.assert_almost_equal(
        test_moments.moments_sigma, ref_moments.moments_sigma, decimal=pp_decimal-2,
        err_msg="Inconsistent OpticalPSF image size for basic model "
        "after loading more padded pupil plane.")

    # Check for same answer if we use image, array, or filename for reading in array.
    test_psf = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, pupil_plane_im=im,
                                 oversampling=pp_oversampling, pad_factor=pp_pad_factor)
    im_test_psf = test_psf.drawImage(scale=scale)
    test_psf_2 = galsim.OpticalPSF(lam_over_diam, obscuration=obscuration, pupil_plane_im=im.array,
                                   oversampling=pp_oversampling, pad_factor=pp_pad_factor)
    im_test_psf_2 = test_psf_2.drawImage(scale=scale)
    np.testing.assert_almost_equal(
        im_test_psf.array, im_test_psf_2.array, decimal=pp_decimal,
        err_msg="Inconsistent OpticalPSF image from Image vs. array.")

    # The following had used lam_over_diam, but that is now invalid because the fits file
    # has a specific pixel scale.  So we need to provide lam and diam separately so that the
    # units are consistent.
    diam = 500.e-9 / lam_over_diam * galsim.radians / galsim.arcsec
    test_psf_3 = galsim.OpticalPSF(
        lam=500, diam=diam, obscuration=obscuration, oversampling=pp_oversampling,
        pupil_plane_im=os.path.join(imgdir, pp_file),
        pad_factor=pp_pad_factor)
    im_test_psf_3 = test_psf_3.drawImage(scale=scale)
    np.testing.assert_almost_equal(
        im_test_psf.array, im_test_psf_3.array, decimal=pp_decimal,
        err_msg="Inconsistent OpticalPSF image from Image vs. file read-in.")
예제 #10
0
파일: real.py 프로젝트: mardom/GalSim
def simReal(real_galaxy, target_PSF, target_pixel_scale, g1=0.0, g2=0.0, rotation_angle=None, 
            rand_rotate=True, rng=None, target_flux=1000.0, image=None):
    """Function to simulate images (no added noise) from real galaxy training data.

    This function takes a RealGalaxy from some training set, and manipulates it as needed to 
    simulate a (no-noise-added) image from some lower-resolution telescope.  It thus requires a
    target PSF (which could be an image, or one of our base classes) that represents all PSF 
    components including the pixel response, and a target pixel scale.  

    The default rotation option is to impose a random rotation to make irrelevant any real shears 
    in the galaxy training data (optionally, the RNG can be supplied).  This default can be turned 
    off by setting `rand_rotate = False` or by requesting a specific rotation angle using the
    `rotation_angle` keyword, in which case `rand_rotate` is ignored.

    Optionally, the user can specify a shear (default 0).  Finally, they can specify a flux 
    normalization for the final image, default 1000.

    @param real_galaxy         The RealGalaxy object to use, not modified in generating the
                               simulated image.
    @param target_PSF          The target PSF, either one of our base classes or an ImageView/Image.
    @param target_pixel_scale  The pixel scale for the final image, in arcsec.
    @param g1                  First component of shear to impose (components defined with respect
                               to pixel coordinates), [Default `g1 = 0.`]
    @param g2                  Second component of shear to impose, [Default `g2 = 0.`]
    @param rotation_angle      Angle by which to rotate the galaxy (must be a galsim.Angle 
                               instance).
    @param rand_rotate         If `rand_rotate = True` (default) then impose a random rotation on 
                               the training galaxy; this is ignored if `rotation_angle` is set.
    @param rng                 A random number generator to use for selection of the random 
                               rotation angle. (optional, may be any kind of galsim.BaseDeviate 
                               or None)
    @param target_flux         The target flux in the output galaxy image, [Default 
                               `target_flux = 1000.`]
    @param image               As with the GSObject.draw() function, if an image is provided,
                               then it will be used and returned.
                               If `image=None`, then an appropriately sized image will be created.
    @return A simulated galaxy image.  The input RealGalaxy is unmodified. 
    """
    # do some checking of arguments
    if not isinstance(real_galaxy, galsim.RealGalaxy):
        raise RuntimeError("Error: simReal requires a RealGalaxy!")
    for Class in galsim.Image.values() + galsim.ImageView.values():
        if isinstance(target_PSF, Class):
            target_PSF = galsim.InterpolatedImage(target_PSF.view(), dx=target_pixel_scale)
            break
    if not isinstance(target_PSF, galsim.GSObject):
        raise RuntimeError("Error: target PSF is not an Image, ImageView, or GSObject!")
    if rotation_angle != None and not isinstance(rotation_angle, galsim.Angle):
        raise RuntimeError("Error: specified rotation angle is not an Angle instance!")
    if (target_pixel_scale < real_galaxy.pixel_scale):
        import warnings
        message = "Warning: requested pixel scale is higher resolution than original!"
        warnings.warn(message)
    import math # needed for pi, sqrt below
    g = math.sqrt(g1**2 + g2**2)
    if g > 1:
        raise RuntimeError("Error: requested shear is >1!")

    # make sure target PSF is normalized
    target_PSF.setFlux(1.0)

    real_galaxy_copy = real_galaxy.copy()

    # rotate
    if rotation_angle != None:
        real_galaxy_copy.applyRotation(rotation_angle)
    elif rotation_angle == None and rand_rotate == True:
        if rng == None:
            uniform_deviate = galsim.UniformDeviate()
        elif isinstance(rng,galsim.BaseDeviate):
            uniform_deviate = galsim.UniformDeviate(rng)
        else:
            raise TypeError("The rng provided to drawShoot is not a BaseDeviate")
        rand_angle = galsim.Angle(math.pi*uniform_deviate(), galsim.radians)
        real_galaxy_copy.applyRotation(rand_angle)

    # set fluxes
    real_galaxy_copy.setFlux(target_flux)

    # shear
    if (g1 != 0.0 or g2 != 0.0):
        real_galaxy_copy.applyShear(g1=g1, g2=g2)

    # convolve, resample
    out_gal = galsim.Convolve([real_galaxy_copy, target_PSF])
    image = out_gal.draw(image=image, dx = target_pixel_scale)

    # return simulated image
    return image
예제 #11
0
파일: real.py 프로젝트: mardom/GalSim
    def __init__(self, real_galaxy_catalog, index=None, id=None, random=False,
                 rng=None, x_interpolant=None, k_interpolant=None, flux=None, pad_factor=0,
                 noise_pad=False, pad_image=None, use_cache=True, gsparams=None):

        import pyfits
        import numpy as np

        if rng is None:
            rng = galsim.BaseDeviate()
        elif not isinstance(rng, galsim.BaseDeviate):
            raise TypeError("The rng provided to RealGalaxy constructor is not a BaseDeviate")
 
        # Code block below will be for galaxy selection; not all are currently implemented.  Each
        # option must return an index within the real_galaxy_catalog.        
        if index is not None:
            if id is not None or random is True:
                raise AttributeError('Too many methods for selecting a galaxy!')
            use_index = index
        elif id is not None:
            if random is True:
                raise AttributeError('Too many methods for selecting a galaxy!')
            use_index = real_galaxy_catalog._get_index_for_id(id)
        elif random is True:
            uniform_deviate = galsim.UniformDeviate(rng)
            use_index = int(real_galaxy_catalog.nobjects * uniform_deviate()) 
        else:
            raise AttributeError('No method specified for selecting a galaxy!')

        # read in the galaxy, PSF images; for now, rely on pyfits to make I/O errors.
        gal_image = real_galaxy_catalog.getGal(use_index)
        PSF_image = real_galaxy_catalog.getPSF(use_index)
        noise = real_galaxy_catalog.getNoise(use_index, rng, gsparams)

        # save any other relevant information as instance attributes
        self.catalog_file = real_galaxy_catalog.file_name
        self.index = use_index
        self.pixel_scale = float(real_galaxy_catalog.pixel_scale[use_index])

        # handle noise-padding options
        try:
            noise_pad = galsim.config.value._GetBoolValue(noise_pad,'')
            # If it's a bool and True, use the correlated noise specified in the catalog.
            if noise_pad:
                noise_pad = noise
            else:
                noise_pad = 0.
        except:
            # If it's not a bool, or convertible to a bool, leave it alone.
            pass

        self.original_image = galsim.InterpolatedImage(
                gal_image, x_interpolant=x_interpolant, k_interpolant=k_interpolant,
                dx=self.pixel_scale, pad_factor=pad_factor, noise_pad=noise_pad, rng=rng,
                pad_image=pad_image, use_cache=use_cache, gsparams=gsparams)
        # If flux is None, leave flux as given by original image
        if flux != None:
            self.original_image.setFlux(flux)

        # also make the original PSF image, with far less fanfare: we don't need to pad with
        # anything interesting.
        self.original_PSF = galsim.InterpolatedImage(
            PSF_image, x_interpolant=x_interpolant, k_interpolant=k_interpolant, 
            flux=1.0, dx=self.pixel_scale, gsparams=gsparams)
        #self.original_PSF.setFlux(1.0)

        # Calculate the PSF "deconvolution" kernel
        psf_inv = galsim.Deconvolve(self.original_PSF, gsparams=gsparams)
        # Initialize the SBProfile attribute
        GSObject.__init__(
            self, galsim.Convolve([self.original_image, psf_inv], gsparams=gsparams))

        # Save the noise in the image as an accessible attribute
        noise.convolveWith(psf_inv, gsparams)
        self.noise = noise
예제 #12
0
def test_conserve_dc():
    """Test that the conserve_dc option for Lanczos does so.
    Note: the idea of conserving flux is a bit of a misnomer.  No interpolant does so
    precisely in general.  What we are really testing is that a flat background input
    image has a relatively flat output image.
    """
    import time
    t1 = time.time()
    import numpy

    im1_size = 40
    scale1 = 0.23
    init_val = 1.

    im2_size = 100
    scale2 = 0.011  

    im1 = galsim.ImageF(im1_size, im1_size, scale=scale1, init_value=init_val)

    # im2 has a much smaller scale, but the same size, so interpolating an "infinite" 
    # constant field.
    im2 = galsim.ImageF(im2_size, im2_size, scale=scale2)

    for interp in ['linear', 'cubic', 'quintic']:
        print 'Testing interpolant ',interp
        obj = galsim.InterpolatedImage(im1, x_interpolant=interp, normalization='sb')
        obj.draw(im2, normalization='sb')
        print 'The maximum error is ',numpy.max(abs(im2.array-init_val))
        numpy.testing.assert_array_almost_equal(
                im2.array,init_val,5,
                '%s did not preserve a flat input flux using xvals.'%interp)

        # Convolve with a delta function to force FFT drawing.
        delta = galsim.Gaussian(sigma=1.e-8)
        obj2 = galsim.Convolve([obj,delta])
        obj2.draw(im2, normalization='sb')
        print 'The maximum error is ',numpy.max(abs(im2.array-init_val))
        numpy.testing.assert_array_almost_equal(
                im2.array,init_val,5,
                '%s did not preserve a flat input flux using uvals.'%interp)

    for n in [3,4,5,6,7,8]:  # 8 tests the generic formulae, since not specialized.
        print 'Testing Lanczos interpolant with n = ',n
        lan = galsim.Lanczos(n, conserve_dc=True)
        obj = galsim.InterpolatedImage(im1, x_interpolant=lan, normalization='sb')
        obj.draw(im2, normalization='sb')
        print 'The maximum error is ',numpy.max(abs(im2.array-init_val))
        numpy.testing.assert_array_almost_equal(
                im2.array,init_val,5,
                'Lanczos %d did not preserve a flat input flux using xvals.'%n)
    
        # Convolve with a delta function to force FFT drawing.
        delta = galsim.Gaussian(sigma=1.e-8)
        obj2 = galsim.Convolve([obj,delta])
        obj2.draw(im2, normalization='sb')
        print 'The maximum error is ',numpy.max(abs(im2.array-init_val))
        numpy.testing.assert_array_almost_equal(
                im2.array,init_val,5,
                'Lanczos %d did not preserve a flat input flux using uvals.'%n)
 
    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
예제 #13
0
def test_realspace_conv():
    """Test that real-space convolution of an InterpolatedImage matches the FFT result
    """
    import time
    t1 = time.time()

    # Note: It is not usually a good idea to use real-space convolution with an InterpolatedImage.
    # It will almost always be much slower than the FFT convolution.  So it's probably only
    # a good idea if the image is very small and/or you absolutely need to avoid the ringing
    # that can show up in FFT convolutions.
    # That said, we still need to make sure the code is correct.  Especially since it 
    # didn't used to be, as reported in issue #432.  So, here goes.

    # set up image scale and size
    raw_scale = 0.23
    raw_size = 15

    # We draw onto a smaller image so the unit test doesn't take forever!
    target_scale = 0.7
    target_size = 3 

    gal = galsim.Exponential(flux=1.7, half_light_radius=1.2)
    gal_im = gal.draw(scale=raw_scale, image=galsim.ImageD(raw_size,raw_size))

    psf1 = galsim.Gaussian(flux=1, half_light_radius=0.77)
    psf_im = psf1.draw(scale=raw_scale, image=galsim.ImageD(raw_size,raw_size))

    if __name__ == "__main__":
        interp_list = ['linear', 'cubic', 'quintic', 'lanczos3', 'lanczos5', 'lanczos7']
    else:
        interp_list = ['linear', 'cubic', 'quintic']
    for interp in interp_list:
        # Note 1: The Lanczos interpolants pass these tests just fine.  They just take a long 
        # time to run, even with the small images we are working with.  So skip them for regular 
        # unit testing.  Developers working on this should re-enable those while testing.

        # Note 2: I couldn't get 'nearest' to pass the tests.  Specifically the im3 == im4 test.
        # I don't know whether there is a bug in the Nearest class functions (seems unlikely since
        # they are so simple) or in the real-space convolver or if the nature of the Nearest
        # interpolation (with its very large extent in k-space and hard edges in real space) is 
        # such that we don't actually expect the test to pass.  Anyway, it also takes a very long 
        # time to run (before failing), so it's probably not a good idea to use it for 
        # real-space convolution anyway.

        print 'interp = ',interp

        gal = galsim.InterpolatedImage(gal_im, x_interpolant=interp)

        # First convolve with a Gaussian:
        psf = psf1
        c1 = galsim.Convolve([gal,psf], real_space=True)
        c2 = galsim.Convolve([gal,psf], real_space=False)

        im1 = c1.draw(scale=target_scale, image=galsim.ImageD(target_size,target_size))
        im2 = c2.draw(scale=target_scale, image=galsim.ImageD(target_size,target_size))
        np.testing.assert_array_almost_equal(im1.array, im2.array, 5)

        # Now make the psf also an InterpolatedImage:
        psf=galsim.InterpolatedImage(psf_im, x_interpolant=interp, flux=1)
        c3 = galsim.Convolve([gal,psf], real_space=True)
        c4 = galsim.Convolve([gal,psf], real_space=False)

        im3 = c3.draw(scale=target_scale, image=galsim.ImageD(target_size,target_size))
        im4 = c4.draw(scale=target_scale, image=galsim.ImageD(target_size,target_size), wmult=5)
        np.testing.assert_array_almost_equal(im1.array, im3.array, 2)
        # Note: only 2 d.p. since the interpolated image version of the psf is really a different
        # profile from the original.  Especially for the lower order interpolants.  So we don't
        # expect these images to be equal to many decimal places.
        np.testing.assert_array_almost_equal(im3.array, im4.array, 5)

    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
예제 #14
0
def test_corr_padding():
    """Test for correlated noise padding of InterpolatedImage."""
    import time
    t1 = time.time()

    # Set up some defaults for tests.
    decimal_precise=4
    decimal_coarse=2
    imgfile = 'fits_files/blankimg.fits'
    orig_nx = 187
    orig_ny = 164
    big_nx = 319
    big_ny = 322
    orig_seed = 151241

    # Read in some small image of a noise field from HST.
    # Rescale it to have a decently large amplitude for the purpose of doing these tests.
    im = 1.e2*galsim.fits.read(imgfile)
    # Make a CorrrlatedNoise out of it.
    cn = galsim.CorrelatedNoise(im, galsim.BaseDeviate(orig_seed))

    # first, make a noise image
    orig_img = galsim.ImageF(orig_nx, orig_ny, scale=1.)
    orig_img.addNoise(cn)

    # make it into an InterpolatedImage with some zero-padding
    # (note that default is zero-padding, by factors of several)
    int_im = galsim.InterpolatedImage(orig_img)
    # draw into a larger image
    big_img = galsim.ImageF(big_nx, big_ny)
    int_im.draw(big_img, scale=1.)
    # check that variance is diluted by expected amount - should be exact, so check precisely!
    big_var_expected = np.var(orig_img.array)*float(orig_nx*orig_ny)/(big_nx*big_ny)
    np.testing.assert_almost_equal(np.var(big_img.array), big_var_expected, decimal=decimal_precise,
        err_msg='Variance not diluted by expected amount when zero-padding')

    # make it into an InterpolatedImage with noise-padding
    int_im = galsim.InterpolatedImage(orig_img, rng = galsim.GaussianDeviate(orig_seed),
                                      noise_pad = im, noise_pad_size = max(big_nx,big_ny))

    # draw into a larger image
    big_img = galsim.ImageF(big_nx, big_ny)
    int_im.draw(big_img, scale=1.)
    # check that variance is same as original - here, we cannot be too precise because the padded
    # region is not huge and the comparison will be, well, noisy.
    np.testing.assert_almost_equal(np.var(big_img.array), np.var(orig_img.array),
        decimal=decimal_coarse,
        err_msg='Variance not correct after padding image with correlated noise')

    # check that if we pass in a RNG, it is actually used to pad with the same noise field
    # basically, redo all of the above steps and draw into a new image, make sure it's the same as
    # previous.
    int_im = galsim.InterpolatedImage(
        orig_img, rng=galsim.GaussianDeviate(orig_seed), noise_pad=cn,
        noise_pad_size = max(big_nx,big_ny))
    big_img_2 = galsim.ImageF(big_nx, big_ny)
    int_im.draw(big_img_2, scale=1.)
    np.testing.assert_array_almost_equal(big_img_2.array, big_img.array, decimal=decimal_precise,
        err_msg='Cannot reproduce correlated noise-padded image with same choice of seed')

    # Finally, check inputs:
    # what if we give it a screwy way of defining the image padding?
    try:
        np.testing.assert_raises(ValueError,galsim.InterpolatedImage,orig_img,noise_pad=-1.)
    except ImportError:
        print 'The assert_raises tests require nose'
    # also, check that whether we give it a string, image, or cn, it gives the same noise field
    # (given the same random seed)
    infile = 'fits_files/blankimg.fits'
    inimg = galsim.fits.read(infile)
    incf = galsim.CorrelatedNoise(inimg, galsim.GaussianDeviate()) # input RNG will be ignored below
    int_im2 = galsim.InterpolatedImage(orig_img, rng=galsim.GaussianDeviate(orig_seed),
                                       noise_pad=inimg, noise_pad_size = max(big_nx,big_ny))
    int_im3 = galsim.InterpolatedImage(orig_img, rng=galsim.GaussianDeviate(orig_seed),
                                       noise_pad=incf, noise_pad_size = max(big_nx,big_ny))
    big_img2 = galsim.ImageF(big_nx, big_ny)
    big_img3 = galsim.ImageF(big_nx, big_ny)
    int_im2.draw(big_img2, scale=1.)
    int_im3.draw(big_img3, scale=1.)
    np.testing.assert_equal(big_img2.array, big_img3.array,
                            err_msg='Diff ways of specifying correlated noise give diff answers')

    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
예제 #15
0
def test_pad_image():
    """Test padding an InterpolatedImage with a pad_image."""
    import time
    t1 = time.time()

    decimal=2  # all are coarse, since there are slight changes from odd/even centering issues.
    noise_sigma = 1.73
    noise_var = noise_sigma**2
    orig_seed = 12345
    rng = galsim.BaseDeviate(orig_seed)
    noise = galsim.GaussianNoise(rng, sigma=noise_sigma)

    # make the original image 
    orig_nx = 64
    orig_ny = 64
    orig_img = galsim.ImageF(orig_nx, orig_ny, scale=1.)
    galsim.Exponential(scale_radius=1.7,flux=1000).draw(orig_img)
    orig_img.addNoise(noise)
    orig_img.setCenter(0,0)

    # We'll draw into a larger image for the tests
    pad_factor = 4
    big_nx = pad_factor*orig_nx
    big_ny = pad_factor*orig_ny
    big_img = galsim.ImageF(big_nx, big_ny, scale=1.)
    big_img.setCenter(0,0)

    # Use a few different kinds of shapes for that padding. 
    for (pad_nx, pad_ny) in [ (160,160), (179,191), (256,256), (305, 307) ]:
        #print 'pad size = ',pad_nx, pad_ny

        # make the pad_image 
        pad_img = galsim.ImageF(pad_nx, pad_ny, scale=1.)
        pad_img.addNoise(noise)
        pad_img.setCenter(0,0)

        # make an interpolated image padded with the pad_image, and outside of that
        int_im = galsim.InterpolatedImage(orig_img, pad_image=pad_img, use_true_center=False)

        # draw into the larger image
        int_im.draw(big_img, use_true_center=False)

        # check that variance is diluted by expected amount 
        # Note -- we don't use np.var, since that computes the variance relative to the 
        # actual mean value.  We just want sum(I^2)/Npix relative to the nominal I=0 value.
        var1 = np.sum(orig_img.array**2)
        if pad_nx > big_nx and pad_ny > big_ny:
            var2 = np.sum(pad_img[big_img.bounds].array**2)
        else:
            var2 = np.sum(pad_img.array**2) 
        var2 -= np.sum(pad_img[orig_img.bounds].array**2)
        var_expected = (var1 + var2) / (big_nx*big_ny)
        big_img.setCenter(0,0)
        np.testing.assert_almost_equal(
            np.mean(big_img.array**2), var_expected, decimal=decimal,
            err_msg='Variance not correct when padding with image')

        if pad_nx < big_nx and pad_ny < big_ny:
            # now also pad with noise_pad outside of the pad_image
            int_im = galsim.InterpolatedImage(orig_img, pad_image=pad_img, noise_pad=noise_var/2,
                                              noise_pad_size=max(big_nx,big_ny),
                                              rng=rng, use_true_center=False)
            int_im.draw(big_img, use_true_center=False)
    
            var3 = (noise_var/2) * float(big_nx*big_ny - pad_nx*pad_ny)
            var_expected = (var1 + var2 + var3) / (big_nx*big_ny)
            np.testing.assert_almost_equal(
                np.mean(big_img.array**2), var_expected, decimal=decimal,
                err_msg='Variance not correct after padding with image and extra noise')

    t2 = time.time()
    print 'time for %s = %.2f'%(funcname(),t2-t1)
예제 #16
0
파일: instcat.py 프로젝트: LSSTDESC/imSim
    def getObj(self,
               index,
               gsparams=None,
               rng=None,
               bandpass=None,
               chromatic=False,
               exp_time=30):
        params = self.objinfo[index]

        magnorm = self.getMagNorm(index)
        if magnorm >= 50:
            # Mark of invalid object apparently
            return None

        if gsparams is not None:
            gsparams = galsim.GSParams(**gsparams)

        # Make the object according to the values in the objinfo

        # Note: params here starts at 12, so all indices are 12 less than in previous code.
        if params[0].lower() == 'point':
            obj = galsim.DeltaFunction(gsparams=gsparams)

        elif params[0].lower() == 'sersic2d':
            a = float(params[1])
            b = float(params[2])
            if b > a:
                # Invalid, but existing code just lets it pass.
                return None
            pa = float(params[3])
            if self.flip_g2:
                # Previous code first did PA = 360 - params[3]
                # Then beta = 90 + PA
                beta = float(90 - pa) * galsim.degrees
            else:
                beta = float(90 + pa) * galsim.degrees

            n = float(params[4])
            # GalSim can amortize some calculations for Sersics, but only if n is the same
            # as a previous galaxy.  So quantize the n values at 0.05.  There's no way anyone
            # cares about this at higher resolution than that.
            # For now, this is not actually helpful, since n is always either 1 or 4, but if
            # we ever start having more variable n, this will prevent it from redoing Hankel
            # integrals for every galaxy.
            n = round(n * 20.) / 20.

            hlr = (a * b)**0.5  # geometric mean of a and b is close to right.
            # XXX: Note: Previous code had hlr = a, which is wrong. (?)  Galaxies were too large.
            #      Especially when they were more elliptical.  Oops.
            # TODO: Maybe not?  Check if this should be a.
            obj = galsim.Sersic(n=n, half_light_radius=hlr, gsparams=gsparams)
            shear = galsim.Shear(q=b / a, beta=beta)
            obj = obj._shear(shear)
            g1, g2, mu = self.getLens(index)
            obj = obj._lens(g1, g2, mu)

        elif params[0].lower() == 'knots':
            a = float(params[1])
            b = float(params[2])
            if b > a:
                return None
            pa = float(params[3])
            if self.flip_g2:
                beta = float(90 - pa) * galsim.degrees
            else:
                beta = float(90 + pa) * galsim.degrees
            npoints = int(params[4])
            if npoints <= 0:
                # Again, weird, but previous code just lets this pass without comment.
                return None
            hlr = (a * b)**0.5
            obj = galsim.RandomKnots(npoints=npoints,
                                     half_light_radius=hlr,
                                     rng=rng,
                                     gsparams=gsparams)
            shear = galsim.Shear(q=b / a, beta=beta)
            obj = obj._shear(shear)
            # TODO: These look bad in space images (cf. Troxel's talks about Roman sims.)
            #       Should convolve this by a smallish Gaussian *here*:
            #       I'd guess 0.3 arcsec is a good choice for the fwhm of this Gaussian.
            # obj = galsim.Convolve(obj, galsim.Gaussian(fwhm=0.3))
            g1, g2, mu = self.getLens(index)
            obj = obj._lens(g1, g2, mu)

        elif (params[0].endswith('.fits') or params[0].endswith('.fits.gz')):
            fits_file = find_file_path(params[0], get_image_dirs())
            pixel_scale = float(params[1])
            theta = float(params[2])
            obj = galsim.InterpolatedImage(fits_file,
                                           scale=pixel_scale,
                                           gsparams=gsparams)
            if theta != 0.:
                obj = obj.rotate(-theta * galsim.degrees)
            g1, g2, mu = self.getLens(index)
            obj = obj._lens(g1, g2, mu)

        else:
            raise RuntimeError("Do not know how to handle object type: %s" %
                               params[0])

        # The seds are normalized to correspond to magnorm=0.
        # The flux for the given magnorm is 10**(-0.4*magnorm)
        # The constant here, 0.9210340371976184 = 0.4 * log(10)
        flux = math.exp(-0.9210340371976184 * magnorm)

        # This gives the normalization in photons/cm^2/sec.
        # Multiply by area and exptime to get photons.
        fAt = flux * self._rubin_area * exp_time

        sed = self.getSED(index)
        if chromatic:
            return obj.withFlux(fAt) * sed
        else:
            flux = sed.calculateFlux(bandpass) * fAt
            return obj.withFlux(flux)
예제 #17
0
def loadPSFImages(filename):
    """
    Get an achromatic representation of the WFIRST PSF in each passband (originally generated for an
    object with a flat SED).

    If the user has generated WFIRST PSFs and stored their images in each passband using getPSF()
    followed by storePSFImages(), then loadPSFImages() can read in those stored images and
    associated data, and return an InterpolatedImage for each one.  These are intrinsically not
    chromatic objects themselves, so they can be used only if the user does not care about the
    variation of the PSF with wavelength within each passband.  In that case, use of loadPSFImages()
    can represent significant time savings compared to doing the full PSF calculation each time.

    @param filename    Name of file containing the PSF images and metadata from storePSFImages().

    @returns A nested dict containing the GSObject representing the PSF, where the keys are the
    bandpasses, and the values are dicts containing the PSF for each SCA for which results were
    tabulated.
    """
    # Get the image data and metadata.
    hdu, hdu_list, fin = galsim.fits.readFile(filename)
    metadata_hdu = hdu_list.pop()
    im_list = galsim.fits.readMulti(hdu_list=hdu_list)
    bp_list = list(metadata_hdu.data.bandpass)
    # In python3, convert from bytes to str
    bp_list = [str(bp.decode()) for bp in bp_list]
    SCA_list = list(metadata_hdu.data.SCA)
    galsim.fits.closeHDUList(hdu_list, fin)

    # Set up the dict of PSF objects, indexed by bandpass (and then SCA).
    full_PSF_dict = {}
    for band_name in set(bp_list):
        band_PSF_dict = {}

        # Find all indices in `bp_list` that correspond to this bandpass.
        bp_indices = []
        if band_name in bp_list:
            idx = -1
            while True:
                try:
                    idx = bp_list.index(band_name, idx + 1)
                    bp_indices.append(idx)
                except ValueError:
                    break

        for SCA in SCA_list:
            # Now find which element has both the right band_name and is for this SCA.  There might
            # not be any, depending on what exactly was stored.
            use_idx = -1
            for index in bp_indices:
                if SCA_list[index] == SCA:
                    use_idx = index
                    break

            # Now we know which PSF image is the right one.  So we should just make an
            # InterpolatedImage out of it.
            PSF = galsim.InterpolatedImage(im_list[use_idx])
            band_PSF_dict[SCA] = PSF

        full_PSF_dict[band_name] = band_PSF_dict

    return full_PSF_dict
예제 #18
0
    def _process_obs(self):
        """
        add observations as interpolated images

        also keep track of psfs, variances, and noise realizations
        """
        self.images = []
        self.psfs = []
        self.weights = np.zeros(len(self.observations))
        self.noise_images = []

        self._set_coadd_obs()

        for i, obs in enumerate(self.observations):

            offset = self._get_offsets(obs.meta['offset_pixels'])
            #print("offset:",offset)
            psf_offset = self._get_offsets(obs.psf.meta['offset_pixels'])
            #print("psf offset:",psf_offset)
            image_center = self.canonical_center + offset
            psf_image_center = self.psf_canonical_center + psf_offset

            # interplated image, shifted to center of the postage stamp
            jac = obs.jacobian

            wcs = galsim.TanWCS(
                affine=galsim.AffineTransform(
                    jac.dudcol,
                    jac.dudrow,
                    jac.dvdcol,
                    jac.dvdrow,
                    origin=image_center,
                ),
                world_origin=self.sky_center,
            )
            pjac = obs.psf.jacobian
            psf_wcs = galsim.TanWCS(
                affine=galsim.AffineTransform(
                    pjac.dudcol,
                    pjac.dudrow,
                    pjac.dvdcol,
                    pjac.dvdrow,
                    origin=psf_image_center,
                ),
                world_origin=self.sky_center,
            )

            image = galsim.InterpolatedImage(
                galsim.Image(obs.image, wcs=wcs),
                offset=offset,
                x_interpolant=self.interp,
            )

            # always normalizing psf
            psf_image = obs.psf.image.copy()
            psf_image /= psf_image.sum()

            psf = galsim.InterpolatedImage(
                galsim.Image(psf_image, wcs=psf_wcs),
                offset=psf_offset,
                x_interpolant=self.interp,
            )

            self.images.append(image)

            self.psfs.append(psf)

            # assume variance is constant
            wt = obs.weight.max()
            if self.weight_type == 'noise-fwhm':
                fwhm = measure_fwhm(psf_image)
                wt /= fwhm**4
            self.weights[i] = wt

            # use input noise image
            noise_image = galsim.InterpolatedImage(
                galsim.Image(obs.noise, wcs=wcs),
                offset=offset,
                x_interpolant=self.interp,
            )

            self.noise_images.append(noise_image)

        self.weights /= self.weights.sum()
예제 #19
0
def main(argv):
    """
    Make images using constant PSF and variable shear:
      - The main image is 2048 x 2048 pixels.
      - Pixel scale is 0.2 arcsec/pixel, hence the image is about 0.11 degrees on a side.
      - Applied shear is from a cosmological power spectrum read in from file.
      - The PSF is a real one from SDSS, and corresponds to a convolution of atmospheric PSF,
        optical PSF, and pixel response, which has been sampled at pixel centers.  We used a PSF
        from SDSS in order to have a PSF profile that could correspond to what you see with a real
        telescope. However, in order that the galaxy resolution not be too poor, we tell GalSim that
        the pixel scale for that PSF image is 0.2" rather than 0.396".  We are simultaneously lying
        about the intrinsic size of the PSF and about the pixel scale when we do this.
      - The galaxies come from COSMOSCatalog, which can produce either RealGalaxy profiles
        (like in demo10) and parametric fits to those profiles.  We choose 30% of the galaxies
        to use the images, and the other 60% to use the parametric fits
      - The real galaxy images include some initial correlated noise from the original HST
        observation.  However, we whiten the noise of the final image so the final image has
        stationary Gaussian noise, rather than correlated noise.
    """
    logging.basicConfig(format="%(message)s",
                        level=logging.INFO,
                        stream=sys.stdout)
    logger = logging.getLogger("demo11")

    # Define some parameters we'll use below.
    # Normally these would be read in from some parameter file.

    pixel_scale = 0.2  # arcsec/pixel
    image_size = 2048  # size of image in pixels
    image_size_arcsec = image_size * pixel_scale  # size of big image in each dimension (arcsec)
    noise_variance = 5.e4  # ADU^2  (Just use simple Gaussian noise here.)
    nobj = 288  # number of galaxies in entire field
    # (This corresponds to 8 galaxies / arcmin^2)
    grid_spacing = 90.0  # The spacing between the samples for the power spectrum
    # realization (arcsec)
    tel_diam = 4  # Let's figure out the flux for a 4 m class telescope
    exp_time = 300  # exposing for 300 seconds.
    center_ra = 19.3 * galsim.hours  # The RA, Dec of the center of the image on the sky
    center_dec = -33.1 * galsim.degrees

    # The catalog returns objects that are appropriate for HST in 1 second exposures.  So for our
    # telescope we scale up by the relative area and exposure time.  Note that what is important is
    # the *effective* area after taking into account obscuration.  For HST, the telescope diameter
    # is 2.4 but there is obscuration (a linear factor of 0.33).  Here, we assume that the telescope
    # we're simulating effectively has no obscuration factor.  We're also ignoring the pi/4 factor
    # since it appears in the numerator and denominator, so we use area = diam^2.
    hst_eff_area = 2.4**2 * (1. - 0.33**2)
    flux_scaling = (tel_diam**2 / hst_eff_area) * exp_time

    # random_seed is used for both the power spectrum realization and the random properties
    # of the galaxies.
    random_seed = 24783923

    file_name = os.path.join('output', 'tabulated_power_spectrum.fits.fz')

    logger.info('Starting demo script 11')

    # Read in galaxy catalog
    # The COSMOSCatalog uses the same input file as we have been using for RealGalaxyCatalogs
    # along with a second file called real_galaxy_catalog_23.5_examples_fits.fits, which stores
    # the information about the parameteric fits.  There is no need to specify the second file
    # name, since the name is derivable from the name of the main catalog.
    if True:
        # The catalog we distribute with the GalSim code only has 100 galaxies.
        # The galaxies will typically be reused several times here.
        cat_file_name = 'real_galaxy_catalog_23.5_example.fits'
        dir = 'data'
        cosmos_cat = galsim.COSMOSCatalog(cat_file_name, dir=dir)
    else:
        # If you've run galsim_download_cosmos, you can leave out the cat_file_name and dir
        # to use the full COSMOS catalog with 56,000 galaxies in it.
        cosmos_cat = galsim.COSMOSCatalog()
    logger.info('Read in %d galaxies from catalog', cosmos_cat.nobjects)

    # Setup the PowerSpectrum object we'll be using:
    # To do this, we first have to read in the tabulated shear power spectrum, often denoted
    # C_ell(ell), where ell has units of inverse angle and C_ell has units of angle^2.  However,
    # GalSim works in the flat-sky approximation, so we use this notation interchangeably with
    # P(k).  GalSim does not calculate shear power spectra for users, who must be able to provide
    # their own (or use the examples in the repository).
    #
    # Here we use a tabulated power spectrum from iCosmo (http://icosmo.org), with the following
    # cosmological parameters and survey design:
    # H_0 = 70 km/s/Mpc
    # Omega_m = 0.25
    # Omega_Lambda = 0.75
    # w_0 = -1.0
    # w_a = 0.0
    # n_s = 0.96
    # sigma_8 = 0.8
    # Smith et al. prescription for the non-linear power spectrum.
    # Eisenstein & Hu transfer function with wiggles.
    # Default dN/dz with z_med = 1.0
    # The file has, as required, just two columns which are k and P(k).  However, iCosmo works in
    # terms of ell and C_ell; ell is inverse radians and C_ell in radians^2.  Since GalSim tends to
    # work in terms of arcsec, we have to tell it that the inputs are radians^-1 so it can convert
    # to store in terms of arcsec^-1.
    pk_file = os.path.join('data', 'cosmo-fid.zmed1.00.out')
    ps = galsim.PowerSpectrum(pk_file, units=galsim.radians)
    # The argument here is "e_power_function" which defines the E-mode power to use.
    logger.info('Set up power spectrum from tabulated P(k)')

    # Now let's read in the PSF.  It's a real SDSS PSF, which means pixel scale of 0.396".  However,
    # the typical seeing is 1.2" and we want to simulate better seeing, so we will just tell GalSim
    # that the pixel scale is 0.2".  We have to be careful with SDSS PSF images, as they have an
    # added 'soft bias' of 1000 which has been removed before creation of this file, so that the sky
    # level is properly zero.  Also, the file is bzipped, to demonstrate the ability of GalSim
    # handle this kind of compressed file (among others).  We read the image directly into an
    # InterpolatedImage GSObject, so we can manipulate it as needed (here, the only manipulation
    # needed is convolution).  The flux is 1 as needed for a PSF.
    psf_file = os.path.join('data', 'example_sdss_psf_sky0.fits.bz2')
    psf = galsim.InterpolatedImage(psf_file, scale=pixel_scale, flux=1.)
    logger.info('Read in PSF image from bzipped FITS file')

    # Setup the image:
    full_image = galsim.ImageF(image_size, image_size)

    # The default convention for indexing an image is to follow the FITS standard where the
    # lower-left pixel is called (1,1).  However, this can be counter-intuitive to people more
    # used to C or python indexing, where indices start at 0.  It is possible to change the
    # coordinates of the lower-left pixel with the methods `setOrigin`.  For this demo, we
    # switch to 0-based indexing, so the lower-left pixel will be called (0,0).
    full_image.setOrigin(0, 0)

    # As for demo10, we use random_seed for the random numbers required for the
    # whole image.  In this case, both the power spectrum realization and the noise on the
    # full image we apply later.
    rng = galsim.BaseDeviate(random_seed)

    # We want to make random positions within our image.  However, currently for shears from a power
    # spectrum we first have to get shears on a grid of positions, and then we can choose random
    # positions within that.  So, let's make the grid.  We're going to make it as large as the
    # image, with grid points spaced by 90 arcsec (hence interpolation only happens below 90"
    # scales, below the interesting scales on which we want the shear power spectrum to be
    # represented exactly).  The lensing engine wants positions in arcsec, so calculate that:
    ps.buildGrid(grid_spacing=grid_spacing,
                 ngrid=int(math.ceil(image_size_arcsec / grid_spacing)),
                 rng=rng)
    logger.info('Made gridded shears')

    # We keep track of how much noise is already in the image from the RealGalaxies.
    # The default initial value is all pixels = 0.
    noise_image = galsim.ImageF(image_size, image_size)
    noise_image.setOrigin(0, 0)

    # Make a slightly non-trivial WCS.  We'll use a slightly rotated coordinate system
    # and center it at the image center.
    theta = 0.17 * galsim.degrees
    # ( dudx  dudy ) = ( cos(theta)  -sin(theta) ) * pixel_scale
    # ( dvdx  dvdy )   ( sin(theta)   cos(theta) )
    # Aside: You can call numpy trig functions on Angle objects directly, rather than getting
    #        their values in radians first.  Or, if you prefer, you can write things like
    #        theta.sin() or theta.cos(), which are equivalent.
    dudx = numpy.cos(theta) * pixel_scale
    dudy = -numpy.sin(theta) * pixel_scale
    dvdx = numpy.sin(theta) * pixel_scale
    dvdy = numpy.cos(theta) * pixel_scale
    image_center = full_image.true_center
    affine = galsim.AffineTransform(dudx,
                                    dudy,
                                    dvdx,
                                    dvdy,
                                    origin=full_image.true_center)

    # We can also put it on the celestial sphere to give it a bit more realism.
    # The TAN projection takes a (u,v) coordinate system on a tangent plane and projects
    # that plane onto the sky using a given point as the tangent point.  The tangent
    # point should be given as a CelestialCoord.
    sky_center = galsim.CelestialCoord(ra=center_ra, dec=center_dec)

    # The third parameter, units, defaults to arcsec, but we make it explicit here.
    # It sets the angular units of the (u,v) intermediate coordinate system.
    wcs = galsim.TanWCS(affine, sky_center, units=galsim.arcsec)
    full_image.wcs = wcs

    # Now we need to loop over our objects:
    for k in range(nobj):
        time1 = time.time()
        # The usual random number generator using a different seed for each galaxy.
        ud = galsim.UniformDeviate(random_seed + k + 1)

        # Choose a random RA, Dec around the sky_center.
        # Note that for this to come out close to a square shape, we need to account for the
        # cos(dec) part of the metric: ds^2 = dr^2 + r^2 d(dec)^2 + r^2 cos^2(dec) d(ra)^2
        # So need to calculate dec first.
        dec = center_dec + (ud() - 0.5) * image_size_arcsec * galsim.arcsec
        ra = center_ra + (
            ud() - 0.5) * image_size_arcsec / numpy.cos(dec) * galsim.arcsec
        world_pos = galsim.CelestialCoord(ra, dec)

        # We will need the image position as well, so use the wcs to get that
        image_pos = wcs.toImage(world_pos)

        # We also need this in the tangent plane, which we call "world coordinates" here,
        # since the PowerSpectrum class is really defined on that plane, not in (ra,dec).
        uv_pos = affine.toWorld(image_pos)

        # Get the reduced shears and magnification at this point
        g1, g2, mu = ps.getLensing(pos=uv_pos)

        # Now we will have the COSMOSCatalog make a galaxy profile for us.  It can make either
        # a RealGalaxy using the original HST image and PSF, or a parametric model based on
        # parametric fits to the light distribution of the HST observation.  The parametric
        # models are either a Sersic fit to the data or a bulge + disk fit according to which
        # one gave the better chisq value.  We will select a galaxy at random from the catalog.
        # One could easily do this by choosing an index = int(ud() * cosmos_cat.nobjects), but
        # we will instead allow the catalog to choose a random galaxy for us.  It will remove any
        # selection effects involved in postage stamp creation using weights that are stored in
        # the catalog.  (If for some reason you prefer not to do that, you can always choose a
        # purely random index yourself using int(ud() * cosmos_cat.nobjects).)  We employ this
        # random selection by simply failing to specify an index or identifier for a galaxy, in
        # which case it chooses a random one.

        # First determine whether we will make a real galaxy (`gal_type = 'real'`) or a parametric
        # galaxy (`gal_type = 'parametric'`).  The real galaxies take longer to render, so for this
        # script, we just use them 30% of the time and use parametric galaxies the other 70%.

        # We could just use `ud()<0.3` for this, but instead we introduce another Deviate type
        # available in GalSim that we haven't used yet: BinomialDeviate.
        # It takes an N and p value and returns integers according to a binomial distribution.
        # i.e. How many heads you get after N flips if each flip has a chance, p, of being heads.
        binom = galsim.BinomialDeviate(ud, N=1, p=0.3)
        real = binom()

        if real:
            # For real galaxies, we will want to whiten the noise in the image (below).
            # When whitening the image, we need to make sure the original correlated noise is
            # present throughout the whole image, otherwise the whitening will do the wrong thing
            # to the parts of the image that don't include the original image.  The RealGalaxy
            # stores the correct noise profile to use as the gal.noise attribute.  This noise
            # profile is automatically updated as we shear, dilate, convolve, etc.  But we need to
            # tell it how large to pad with this noise by hand.  This is a bit complicated for the
            # code to figure out on its own, so we have to supply the size for noise padding
            # with the noise_pad_size parameter.

            # The large galaxies will render fine without any noise padding, but the postage stamp
            # for the smaller galaxies will be sized appropriately for the PSF, which may make the
            # stamp larger than the original galaxy image.  The psf image is 40 x 40, although
            # the bright part is much more concentrated than that.  If we pad out the galaxy image
            # to at least 40 x sqrt(2), we should be safe even if the galaxy image is rotated
            # with respect to the psf image.
            #     noise_pad_size = 40 * sqrt(2) * 0.2 arcsec/pixel = 11.3 arcsec
            gal = cosmos_cat.makeGalaxy(gal_type='real',
                                        rng=ud,
                                        noise_pad_size=11.3)
        else:
            gal = cosmos_cat.makeGalaxy(gal_type='parametric', rng=ud)

        # Apply a random rotation
        theta = ud() * 2.0 * numpy.pi * galsim.radians
        gal = gal.rotate(theta)

        # Rescale the flux to match our telescope configuration.
        # This automatically scales up the noise variance by flux_scaling**2.
        gal *= flux_scaling

        # Apply the cosmological (reduced) shear and magnification at this position using a single
        # GSObject method.
        gal = gal.lens(g1, g2, mu)

        # Convolve with the PSF.
        final = galsim.Convolve(psf, gal)

        # Account for the fractional part of the position
        # cf. demo9.py for an explanation of this nominal position stuff.
        x_nominal = image_pos.x + 0.5
        y_nominal = image_pos.y + 0.5
        ix_nominal = int(math.floor(x_nominal + 0.5))
        iy_nominal = int(math.floor(y_nominal + 0.5))
        dx = x_nominal - ix_nominal
        dy = y_nominal - iy_nominal
        offset = galsim.PositionD(dx, dy)

        # We use method='no_pixel' here because the SDSS PSF image that we are using includes the
        # pixel response already.
        stamp = final.drawImage(wcs=wcs.local(image_pos),
                                offset=offset,
                                method='no_pixel')

        # Recenter the stamp at the desired position:
        stamp.setCenter(ix_nominal, iy_nominal)

        # Find the overlapping bounds:
        bounds = stamp.bounds & full_image.bounds

        # Now, if we are using a real galaxy, we want to ether whiten or at least symmetrize the
        # noise on the postage stamp to avoid having to deal with correlated noise in any kind of
        # image processing you would want to do on the final image.  (Like measure galaxy shapes.)

        # Galsim automatically propagates the noise correctly from the initial RealGalaxy object
        # through the applied shear, distortion, rotation, and convolution into the final object's
        # noise attribute.  To make the noise fully white, use the image.whitenNoise() method.
        # The returned value is the variance of the Gaussian noise that is present after the
        # whitening process.

        # However, this is often overkill for many applications.  If it is acceptable to merely end
        # up with noise with some degree of symmetry (say 4-fold or 8-fold symmetry), then you can
        # instead have GalSim just add enough noise to make the resulting noise have this kind of
        # symmetry.  Usually this requires adding significantly less additional noise, which means
        # you can have the resulting total variance be somewhat smaller.  The returned variance
        # corresponds to the zero-lag value of the noise correlation function, which will still have
        # off-diagonal elements.  We can do this step using the image.symmetrizeNoise() method.
        if real:
            if True:
                # We use the symmetrizing option here.
                new_variance = stamp.symmetrizeNoise(final.noise, 8)
            else:
                # Here is how you would do it if you wanted to fully whiten the image.
                new_variance = stamp.whitenNoise(final.noise)

            # We need to keep track of how much variance we have currently in the image, so when
            # we add more noise, we can omit what is already there.
            noise_image[bounds] += new_variance

        # Finally, add the stamp to the full image.
        full_image[bounds] += stamp[bounds]

        time2 = time.time()
        tot_time = time2 - time1
        logger.info('Galaxy %d: position relative to center = %s, t=%f s', k,
                    str(uv_pos), tot_time)

    # We already have some noise in the image, but it isn't uniform.  So the first thing to do is
    # to make the Gaussian noise uniform across the whole image.  We have a special noise class
    # that can do this.  VariableGaussianNoise takes an image of variance values and applies
    # Gaussian noise with the corresponding variance to each pixel.
    # So all we need to do is build an image with how much noise to add to each pixel to get us
    # up to the maximum value that we already have in the image.
    max_current_variance = numpy.max(noise_image.array)
    noise_image = max_current_variance - noise_image
    vn = galsim.VariableGaussianNoise(rng, noise_image)
    full_image.addNoise(vn)

    # Now max_current_variance is the noise level across the full image.  We don't want to add that
    # twice, so subtract off this much from the intended noise that we want to end up in the image.
    noise_variance -= max_current_variance

    # Now add Gaussian noise with this variance to the final image.  We have to do this step
    # at the end, rather than adding to individual postage stamps, in order to get the noise
    # level right in the overlap regions between postage stamps.
    noise = galsim.GaussianNoise(rng, sigma=math.sqrt(noise_variance))
    full_image.addNoise(noise)
    logger.info('Added noise to final large image')

    # Now write the image to disk.  It is automatically compressed with Rice compression,
    # since the filename we provide ends in .fz.
    full_image.write(file_name)
    logger.info('Wrote image to %r', file_name)

    # Compute some sky positions of some of the pixels to compare with the values of RA, Dec
    # that ds9 reports.  ds9 always uses (1,1) for the lower left pixel, so the pixel coordinates
    # of these pixels are different by 1, but you can check that the RA and Dec values are
    # the same as what GalSim calculates.
    ra_str = center_ra.hms()
    dec_str = center_dec.dms()
    logger.info('Center of image    is at RA %sh %sm %ss, DEC %sd %sm %ss',
                ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3],
                dec_str[3:5], dec_str[5:])
    for (x, y) in [(0, 0), (0, image_size - 1), (image_size - 1, 0),
                   (image_size - 1, image_size - 1)]:
        world_pos = wcs.toWorld(galsim.PositionD(x, y))
        ra_str = world_pos.ra.hms()
        dec_str = world_pos.dec.dms()
        logger.info('Pixel (%4d, %4d) is at RA %sh %sm %ss, DEC %sd %sm %ss',
                    x, y, ra_str[0:3], ra_str[3:5], ra_str[5:], dec_str[0:3],
                    dec_str[3:5], dec_str[5:])
    logger.info(
        'ds9 reports these pixels as (1,1), (1,2048), etc. with the same RA, Dec.'
    )
예제 #20
0
    def sample(self,
               cat,
               noise=None,
               rng=None,
               x_interpolant=None,
               k_interpolant=None,
               pad_factor=4,
               noise_pad_size=0,
               gsparams=None,
               session_config=None):
        """
        Samples galaxy images from the model
        """
        # If we are sampling for the first time
        if self.module is None:
            self.module = hub.Module(self.file_name)

            self.sess = tf.Session(session_config)
            self.sess.run(tf.global_variables_initializer())

            self.inputs = {}
            for k in self.quantities + self.random_variables:
                tensor_info = self.module.get_input_info_dict()[k]
                self.inputs[k] = tf.placeholder(tensor_info.dtype,
                                                shape=tensor_info.get_shape(),
                                                name=k)

            self.generated_images = self.module(self.inputs)

        # Populate feed dictionary with input data
        feed_dict = {self.inputs[k]: cat[k] for k in self.quantities}

        # If not provided, create a RNG
        if rng is None:
            rng = galsim.BaseDeviate(rng)
            orig_rng = rng.duplicate()

        # Look for requested random_variables
        if 'random_normal' in self.random_variables:
            # Draw a random normal from the galsim RNG
            noise_shape = self.module.get_input_info_dict(
            )['random_normal'].get_shape()
            noise_shape = [len(cat)] + [
                noise_shape[i + 1].value for i in range(len(noise_shape) - 1)
            ]
            noise_array = np.empty(np.prod(noise_shape), dtype=float)
            gd = galsim.random.GaussianDeviate(rng, sigma=1)
            gd.generate(noise_array)
            feed_dict[self.inputs['random_normal']] = noise_array.reshape(
                noise_shape).astype('float32')

        # Run the graph
        x = self.sess.run(self.generated_images, feed_dict=feed_dict)

        # Now, we build an InterpolatedImage for each of these
        ims = []
        for i in range(len(x)):
            im = galsim.Image(np.ascontiguousarray(x[i].reshape(
                (self.stamp_size, self.stamp_size)).astype(np.float64)),
                              scale=self.pixel_size)

            ims.append(
                galsim.InterpolatedImage(im,
                                         x_interpolant=x_interpolant,
                                         k_interpolant=k_interpolant,
                                         pad_factor=pad_factor,
                                         noise_pad_size=noise_pad_size,
                                         noise_pad=noise,
                                         rng=rng,
                                         gsparams=gsparams))
        if len(ims) == 1:
            ims = ims[0]

        return ims
예제 #21
0
def get_CRG(cat, rng, row):
    """Create CRG for a given input parametrs form catsim.
    Bulge + Disk galaxy is created, convolved with HST PSF, drawn in HST V and
    I bands for 1 second exposure. Correlated noise (from AEGIS images)
    is added to each image. SNR in a gaussian elliptical aperture is computed.
    cgr1: The galaxy +psf images + noise correlation function is provided as
    input to CRG with default polynomial SEDs.
    crg2: same as crg1 except, the input galaxy images are padded with noise.
    This enables us to to draw the CRG image larger than the input image,
    and not have boundary edges.
    crg3: same as crg2 except the SEDS of bulge and disk are provided as input
    to CRG.
    @cat    catsim row containig catsim galaxy parametrs.
    @rng    random number generator.
    @row    astropy table to save measurents.
    """
    #  HST scale
    scale = 0.03
    area = 4.437 * 10000  # np.pi * (2.4 * 100 / 2.)**2
    v_exptime = 1  # 2260
    i_exptime = 1  # 2100
    psf_sig = 0.06
    nx, ny = get_npix(cat, scale, psf_sig)
    print "Number of HST pixels:", nx, ny
    b_r, b_g = a_b2re_e(cat['a_b'], cat['b_b'])
    d_r, d_g = a_b2re_e(cat['a_d'], cat['b_d'])
    b_s = galsim.Shear(g=b_g, beta=cat['pa_bulge'] * galsim.degrees)
    d_s = galsim.Shear(g=d_g, beta=cat['pa_disk'] * galsim.degrees)
    input_p = cg_fn.Eu_Args(scale=scale,
                            redshift=cat['redshift'],
                            disk_n=cat['bulge_n'],
                            bulge_n=cat['disk_n'],
                            disk_HLR=d_r,
                            bulge_HLR=b_r,
                            bulge_e=[b_s.e1, b_s.e2],
                            disk_e=[d_s.e1, d_s.e2],
                            psf_sig_o=0.071,
                            psf_w_o=806,
                            bulge_frac=0.5)
    input_p.T_flux = 2
    gal, PSF, con, seds = get_gal(input_p, cat, get_seds=True)
    # get bandpass
    V = cg_fn.get_HST_Bandpass('F606W')
    I = cg_fn.get_HST_Bandpass('F814W')
    c_sed = seds[0] + seds[1]
    temp_d = seds[1] * cat['fluxnorm_disk']
    temp_b = seds[0] * cat['fluxnorm_bulge']
    b_sed_mag = [temp_b.calculateMagnitude(V), temp_b.calculateMagnitude(I)]
    d_sed_mag = [temp_d.calculateMagnitude(V), temp_d.calculateMagnitude(I)]
    row['b_mag'] = b_sed_mag
    row['d_mag'] = d_sed_mag
    gal_im_v = con.drawImage(V,
                             nx=nx,
                             ny=ny,
                             scale=scale,
                             area=area,
                             exptime=v_exptime)
    gal_im_i = con.drawImage(I,
                             nx=nx,
                             ny=ny,
                             scale=scale,
                             area=area,
                             exptime=i_exptime)
    flux = np.array([gal_im_v.array.sum(), gal_im_i.array.sum()])
    snr_num = np.array([np.sum(gal_im_v.array**2), np.sum(gal_im_i.array**2)])
    # correlated noise to add to image
    noise_v = galsim.getCOSMOSNoise(file_name='data/acs_V_unrot_sci_cf.fits',
                                    rng=rng)
    noise_i = galsim.getCOSMOSNoise(file_name='data/acs_I_unrot_sci_cf.fits',
                                    rng=rng)
    # Add noise
    gal_im_v.addNoise(noise_v)
    gal_im_i.addNoise(noise_i)
    var_v = noise_v.getVariance()
    var_i = noise_i.getVariance()
    var = np.array([var_v, var_i])
    # Compute sn_ellip_gauss
    try:
        res_v = galsim.hsm.FindAdaptiveMom(gal_im_v)
        res_i = galsim.hsm.FindAdaptiveMom(gal_im_i)
        aperture_noise_v = np.sqrt(var_v * 2 * np.pi *
                                   (res_v.moments_sigma**2))
        aperture_noise_i = np.sqrt(var_i * 2 * np.pi *
                                   (res_i.moments_sigma**2))
        sn_ellip_gauss_v = res_v.moments_amp / aperture_noise_v
        sn_ellip_gauss_i = res_i.moments_amp / aperture_noise_i
    except:
        sn_ellip_gauss_v = -10
        sn_ellip_gauss_i = -10
    row['HST_sn_ellip_gauss'] = np.array([sn_ellip_gauss_v, sn_ellip_gauss_i])
    row['HST_noise_var'] = var
    row['HST_flux'] = flux
    row['HST_SNR'] = np.sqrt(snr_num / var)
    # covariance function for CRG input
    xi_v = galsim.getCOSMOSNoise(file_name='data/acs_V_unrot_sci_cf.fits',
                                 variance=var_v,
                                 rng=rng)
    xi_i = galsim.getCOSMOSNoise(file_name='data/acs_I_unrot_sci_cf.fits',
                                 variance=var_i,
                                 rng=rng)
    psf_v = cg_fn.get_eff_psf(PSF, c_sed, V)
    psf_i = cg_fn.get_eff_psf(PSF, c_sed, I)
    eff_PSFs = [psf_v, psf_i]
    print "Creating CRG with noise padding"
    cg_size = int(
        max(cat['BulgeHalfLightRadius'], cat['DiskHalfLightRadius'],
            2 * psf_sig) * 12)
    print "pad size", cg_size
    intp_gal_v = galsim.InterpolatedImage(gal_im_v,
                                          noise_pad=noise_v,
                                          noise_pad_size=cg_size)
    gal_im_v_pad = intp_gal_v._pad_image
    intp_gal_i = galsim.InterpolatedImage(gal_im_i,
                                          noise_pad=noise_i,
                                          noise_pad_size=cg_size)
    gal_im_i_pad = intp_gal_i._pad_image
    print "CRG input im shape ", gal_im_v_pad.array.shape[0] * scale
    #  Polynomial SEDs
    images = [gal_im_v_pad, gal_im_i_pad]
    crg1 = galsim.ChromaticRealGalaxy.makeFromImages(images=images,
                                                     bands=[V, I],
                                                     xis=[xi_v, xi_i],
                                                     PSFs=eff_PSFs)
    return crg1
예제 #22
0
def fitcosmosgalaxy(idcosmosgs, srcs, modelspecs, results={}, plot=False, redo=True,
                    modellib="scipy", modellibopts=None, hst2hscmodel=None,
                    resetimages=False, resetfitlogs=False):
    if results is None:
        results = {}
    np.random.seed(idcosmosgs)
    radec = rgcfits[idcosmosgs][1:3]
    imghst = rgcat.getGalImage(idcosmosgs)
    scalehst = rgcfits[idcosmosgs]['PIXEL_SCALE']
    bandhst = rgcat.band[idcosmosgs]
    psfhst = rgcat.getPSF(idcosmosgs)
    if "hsc" in srcs or "hst2hsc" in srcs:
        # Get the HSC dataRef
        spherePoint = geom.SpherePoint(radec[0], radec[1], geom.degrees)
        patch = skymap[tract].findPatch(spherePoint).getIndex()
        patch = ",".join([str(x) for x in patch])
        dataId2 = {"tract": 9813, "patch": patch, "filter": "HSC-I"}
        dataRef = butler.dataRef("deepCoadd", dataId=dataId2)
        # Get the coadd
        exposure = dataRef.get("deepCoadd_calexp")
        scalehsc = exposure.getWcs().getPixelScale().asArcseconds()
        # Get the measurements
        measCat = dataRef.get("deepCoadd_meas")
        # Get and verify match
        distsq = ((radec[0] - np.degrees(measCat["coord_ra"])) ** 2 +
                  (radec[1] - np.degrees(measCat["coord_dec"])) ** 2)
        row = np.int(np.argmin(distsq))
        idHsc = measCat["id"][row]
        dist = np.sqrt(distsq[row]) * 3600
        print('Source distance={:.2e}"'.format(dist))
        # TODO: Threshold distance?
        if dist > 1:
            raise RuntimeError("Nearest HSC source at distance {:.3e}>1; aborting".format(dist))
        # Determine the HSC cutout size (larger than HST due to bigger PSF)
        sizeCutout = np.int(4 + np.ceil(np.max(imghst.array.shape) * scalehst / scalehsc))
        sizeCutout += np.int(sizeCutout % 2)

    # This does all of the necessary setup for each src. It should persist somehow
    for src in srcs:
        srcname = src
        # TODO: None of this actually works in multiband, which it should for HSC one day
        band = rgcat.band[idcosmosgs] if src == "hst" else "HSC-I"
        metadata = {"band": band}
        mask = None
        if src == "hst":
            img = imghst
            psf = psfhst
            sigmainverse = np.power(rgcat.getNoiseProperties(idcosmosgs)[2], -0.5)
        else:
            psf = exposure.getPsf()
            scalehscpsf = psf.getWcs(0).getPixelScale().asArcseconds()
            imgpsf = psf.computeImage().array
            imgpsfgs = gs.InterpolatedImage(gs.Image(imgpsf, scale=scalehscpsf))
            useNoiseReplacer = True
            if useNoiseReplacer:
                noiseReplacer = rebuildNoiseReplacer(exposure, measCat)
                noiseReplacer.insertSource(idHsc)
            cutouthsc = make_cutout.make_cutout_lsst(
                spherePoint, exposure, size=np.floor_divide(sizeCutout, 2))
            idshsc = cutouthsc[4]
            var = exposure.getMaskedImage().getVariance().array[
                  idshsc[3]: idshsc[2], idshsc[1]: idshsc[0]]
            if src == "hst2hsc":
                # The COSMOS GalSim catalog is in the original HST frame, which is rotated by
                # 10-12 degrees from RA/Dec axes; fit for this
                result, anglehst, offsetxhst, offsetyhst = fitcosmosgalaxytransform(
                    radec[0], radec[1], imghst, imgpsfgs, sizeCutout, cutouthsc[0], var, scalehsc, plot=plot
                )
                fluxscale = (10 ** result.x[0])
                metadata["lenhst2hsc"] = scalehst/scalehsc
                metadata["fluxscalehst2hsc"] = fluxscale
                metadata["anglehst2hsc"] = anglehst
                metadata["offsetxhst2hsc"] = offsetxhst
                metadata["offsetyhst2hsc"] = offsetyhst

                realgalaxy = ccat.makeGalaxy(index=idcosmosgs, gal_type="real")

                # Assuming that these images match, add HSC noise back in
                if hst2hscmodel is None:
                    # TODO: Fix this as it's not working by default
                    img = imgoffsetchisq(result.x, returnimg=True, imgref=cutouthsc[0],
                                         psf=imgpsfgs, nx=sizeCutout, ny=sizeCutout, scale=scalehsc)
                    img = gs.Convolve(img, imgpsfgs).drawImage(nx=sizeCutout, ny=sizeCutout,
                                                               scale=scalehsc) * fluxscale
                    # The PSF is now HSTPSF*HSCPSF, and "truth" is the deconvolved HST image/model
                    psf = gs.Convolve(imgpsfgs, psfhst.rotate(anglehst * gs.degrees)).drawImage(
                        nx=imgpsf.shape[1], ny=imgpsf.shape[0], scale=scalehscpsf
                    )
                    psf /= np.sum(psf.array)
                    psf = gs.InterpolatedImage(psf)
                else:
                    fits = results['hst']['fits']['galsim']
                    if hst2hscmodel == 'best':
                        chisqredsmodel = {model: fit['fits'][-1]['chisqred'] for model, fit in fits.items()}
                        modeltouse = min(chisqredsmodel, key=chisqredsmodel.get)
                    else:
                        modeltouse = hst2hscmodel
                    modeltype = fits[modeltouse]['modeltype']
                    srcname = "_".join([src, hst2hscmodel])
                    # TODO: Store the name of the PyProFit model somewhere
                    # In fact there wasn't really any need to store a model since we
                    # can reconstruct it, but let's go ahead and use the unpickled one
                    paramsbest = fits[modeltouse]['fits'][-1]['paramsbestalltransformed']
                    # Apply all of the same rotations and shifts directly to the model
                    modeltouse = results['hst']['models'][modeltype]
                    metadata["hst2hscmodel"] = modeltouse
                    scalefactor = scalehst / scalehsc
                    imghstshape = imghst.array.shape
                    # I'm pretty sure that these should all be converted to arcsec units
                    for param, value in zip(modeltouse.getparameters(fixed=True), paramsbest):
                        param.setvalue(value, transformed=True)
                        valueset = param.getvalue(transformed=False)
                        if param.name == "cenx":
                            valueset = (scalehst * (valueset - imghstshape[1] / 2) + result.x[1]
                                        + sizeCutout / 2)
                        elif param.name == "ceny":
                            valueset = (scalehst * (valueset - imghstshape[0] / 2) + result.x[2]
                                        + sizeCutout / 2)
                        elif param.name == "ang":
                            valueset += np.degrees(anglehst)
                        elif param.name == "re":
                            valueset *= scalehst
                        param.setvalue(valueset, transformed=False)
                    exposuremodel = modeltouse.data.exposures[bandhst][0]
                    exposuremodel.image = proutil.ImageEmpty((sizeCutout, sizeCutout))
                    # Save the GalSim model object
                    modeltouse.evaluate(keepmodels=True, getlikelihood=False, drawimage=False)
                    img = gs.Convolve(exposuremodel.meta['model'], imgpsfgs).drawImage(
                        nx=sizeCutout, ny=sizeCutout, scale=scalehsc) * fluxscale
                    psf = imgpsfgs

                noisetoadd = np.random.normal(scale=np.sqrt(var))
                img += noisetoadd

                if plot:
                    fig2, ax2 = plt.subplots(nrows=2, ncols=3)
                    ax2[0, 0].imshow(np.log10(cutouthsc[0]))
                    ax2[0, 0].set_title("HSC {}".format(band))
                    imghst2hsc = gs.Convolve(
                        realgalaxy.rotate(anglehst * gs.radians).shift(
                            result.x[1], result.x[2]
                        ), imgpsfgs).drawImage(
                        nx=sizeCutout, ny=sizeCutout, scale=scalehsc)
                    imghst2hsc += noisetoadd
                    imgs = (img.array, "my naive"), (imghst2hsc.array, "GS RealGal")
                    descpre = "HST {} - {}"
                    for imgidx, (imgit, desc) in enumerate(imgs):
                        ax2[1, 1 + imgidx].imshow(np.log10(imgit))
                        ax2[1, 1 + imgidx].set_title(descpre.format(bandhst, desc))
                        ax2[0, 1 + imgidx].imshow(np.log10(imgit))
                        ax2[0, 1 + imgidx].set_title((descpre + " + noise").format(
                            bandhst, desc))
            else:
                # TODO: Fix this if desired
                mask = exposure.getMaskedImage().getMask()
                mask = mask.array[ids[3]: ids[2], ids[1]: ids[0]]
                var = var.array[ids[3]: ids[2], ids[1]: ids[0]]
                img = copy.deepcopy(exposure.maskedImage.image)
                if not useNoiseReplacer:
                    footprint = measCat[row].getFootprint()
                    img *= 0
                    footprint.getSpans().copyImage(exposure.maskedImage.image, img)
                psf = imgpsfgs
                mask = mask and img != 0
                img = gs.Image(img[idshsc[3]: idshsc[2], idshsc[1]: idshsc[0]],
                               scale=scalehsc)
            sigmainverse = 1.0 / np.sqrt(var)

        # Having worked out what the image, psf and variance map are, fit PSFs and images
        if srcname not in results:
            results[srcname] = {}
        psfs = {}
        specs = {name: idx for idx, name in enumerate(modelspecs[1])}
        psfmodels = set([(x[specs["psfmodel"]], proutil.str2bool(x[specs["psfpixel"]])) for x in
                          modelspecs[0]])
        engineopts = {
            "gsparams": gs.GSParams(kvalue_accuracy=1e-3, integration_relerr=1e-3, integration_abserr=1e-5)
        }
        fitname = "COSMOS #{}".format(idcosmosgs)
        for psfmodelname, ispsfpixelated in psfmodels:
            psfname = psfmodelname + ("_pixelated" if ispsfpixelated else "")
            if psfmodelname == "empirical":
                psfmodel = psf
                psfexposure = proobj.PSF(band=band, engine="galsim", image=psf.image.array)
            else:
                engineopts["drawmethod"] = "no_pixel" if ispsfpixelated else None
                if redo or psfname not in results[srcname]['psfs']:
                    psfmodel = fitpsf(psf.image.array, psfmodelname, {"galsim": engineopts}, band=band,
                                      plot=plot, modelname=psfmodelname, title=fitname)["galsim"]
                    psfexposure = proobj.PSF(band=band, engine="galsim", model=psfmodel["model"].sources[0],
                                             modelpixelated=ispsfpixelated)
                else:
                    psfmodel = results[srcname]['psfs'][psfname]['model']
                    psfexposure = results[srcname]['psfs'][psfname]['object']
            psfs[psfname] = {"model": psfmodel, "object": psfexposure}
        fitsbyengine = None if redo else results[srcname]['fits']
        models = None if redo else results[srcname]['models']
        fits, models = fitgalaxy(
            img=img, psfs=psfs, sigmainverse=sigmainverse, mask=mask, band=band, modelspecs=modelspecs,
            name=fitname, modellib=modellib, plot=plot, models=models, fitsbyengine=fitsbyengine,
            redoall=redo)
        if resetimages:
            for psfmodelname, psf in psfs.items():
                if "model" in psf:
                    proutil.setexposure(psf["model"]["model"], band, "empty")
            for modelname, model in models.items():
                proutil.setexposure(model, band, "empty")
            for engine, modelfitinfo in fits.items():
                for modelname, modelfits in modelfitinfo.items():
                    if 'fits' in modelfits:
                        for fit in modelfits["fits"]:
                            fit["fitinfo"]["log"] = None
                            # Don't try to pickle pygmo problems for some reason I forget
                            if hasattr(fit["result"], "problem"):
                                fit["result"]["problem"] = None
        if not redo:
            results[srcname] = {'fits': fits, 'models': models, 'psfs': psfs, 'metadata': metadata}

    return results
예제 #23
0
    def processImagesForInsertion(self, fakeCat, wcs, psf, photoCalib, band,
                                  pixelScale):
        """Process images from files into the format needed for insertion.

        Parameters
        ----------
        fakeCat : `pandas.core.frame.DataFrame`
                    The catalog of fake sources to be input
        wcs : `lsst.afw.geom.skyWcs.skyWcs.SkyWc`
                    WCS to use to add fake sources
        psf : `lsst.meas.algorithms.coaddPsf.coaddPsf.CoaddPsf` or
              `lsst.meas.extensions.psfex.psfexPsf.PsfexPsf`
                    The PSF information to use to make the PSF images
        photoCalib : `lsst.afw.image.photoCalib.PhotoCalib`
                    Photometric calibration to be used to calibrate the fake sources
        band : `str`
                    The filter band that the observation was taken in.
        pixelScale : `float`
                    The pixel scale of the image the sources are to be added to.

        Returns
        -------
        galImages : `list`
                    A list of tuples of `lsst.afw.image.exposure.exposure.ExposureF` and
                    `lsst.geom.Point2D` of their locations.
                    For sources labelled as galaxy.
        starImages : `list`
                    A list of tuples of `lsst.afw.image.exposure.exposure.ExposureF` and
                    `lsst.geom.Point2D` of their locations.
                    For sources labelled as star.

        Notes
        -----
        The input fakes catalog needs to contain the absolute path to the image in the
        band that is being used to add images to. It also needs to have the R.A. and
        declination of the fake source in radians and the sourceType of the object.
        """
        galImages = []
        starImages = []

        self.log.info("Processing %d fake images" % len(fakeCat))

        for (imFile, sourceType, mag, x,
             y) in zip(fakeCat[band + "imFilename"].array,
                       fakeCat["sourceType"].array,
                       fakeCat[self.config.magVar % band].array,
                       fakeCat["x"].array, fakeCat["y"].array):

            im = afwImage.ImageF.readFits(imFile)

            xy = geom.Point2D(x, y)

            # We put these two PSF calculations within this same try block so that we catch cases
            # where the object's position is outside of the image.
            try:
                correctedFlux = psf.computeApertureFlux(
                    self.config.calibFluxRadius, xy)
                psfKernel = psf.computeKernelImage(xy).getArray()
                psfKernel /= correctedFlux

            except InvalidParameterError:
                self.log.info("%s at %0.4f, %0.4f outside of image" %
                              (sourceType, x, y))
                continue

            psfIm = galsim.InterpolatedImage(galsim.Image(psfKernel),
                                             scale=pixelScale)
            galsimIm = galsim.InterpolatedImage(galsim.Image(im.array),
                                                scale=pixelScale)
            convIm = galsim.Convolve([galsimIm, psfIm])

            try:
                outIm = convIm.drawImage(scale=pixelScale,
                                         method="real_space").array
            except (galsim.errors.GalSimFFTSizeError, MemoryError):
                continue

            imSum = np.sum(outIm)
            divIm = outIm / imSum

            try:
                flux = photoCalib.magnitudeToInstFlux(mag, xy)
            except LogicError:
                flux = 0

            imWithFlux = flux * divIm

            if sourceType == b"galaxy":
                galImages.append((afwImage.ImageF(imWithFlux), xy))
            if sourceType == b"star":
                starImages.append((afwImage.ImageF(imWithFlux), xy))

        return galImages, starImages
예제 #24
0
def test_table2d_GSInterp():
    def f(x_, y_):
        return 2 * y_ * y_ + 3 * x_ * x_ + 4 * x_ * y_ - np.cos(x_)

    x = np.linspace(0.1, 3.3, 25)
    y = np.linspace(0.2, 10.4, 75)
    yy, xx = np.meshgrid(y,
                         x)  # Note the ordering of both input and output here!

    interpolants = ['lanczos3', 'lanczos3F', 'lanczos7', 'sinc', 'quintic']

    for interpolant in interpolants:
        z = f(xx, yy)
        tab2d = galsim.LookupTable2D(x, y, z, interpolant=interpolant)
        do_pickle(tab2d)
        # Make sure precomputed-hash gets covered
        hash(tab2d)

        # Use InterpolatedImage to validate
        wcs = galsim.JacobianWCS(
            (max(x) - min(x)) / (len(x) - 1),
            0,
            0,
            (max(y) - min(y)) / (len(y) - 1),
        )
        img = galsim.Image(z.T, wcs=wcs)
        ii = galsim.InterpolatedImage(
            img,
            use_true_center=True,
            offset=(galsim.PositionD(img.xmin, img.ymin) - img.true_center),
            x_interpolant=interpolant,
            normalization='sb',
            calculate_maxk=False,
            calculate_stepk=False).shift(min(x), min(y))

        # Check single value functionality.
        x1, y1 = 2.3, 3.2
        np.testing.assert_allclose(tab2d(x1, y1),
                                   ii.xValue(x1, y1),
                                   atol=1e-10,
                                   rtol=0)

        # Check vectorized output
        newx = np.linspace(0.2, 3.1, 15)
        newy = np.linspace(0.3, 10.1, 25)
        newyy, newxx = np.meshgrid(newy, newx)
        np.testing.assert_allclose(
            tab2d(newxx, newyy).ravel(),
            np.array([
                ii.xValue(x_, y_)
                for x_, y_ in zip(newxx.ravel(), newyy.ravel())
            ]).ravel(),
            atol=1e-10,
            rtol=0)
        np.testing.assert_array_almost_equal(tab2d(newxx, newyy),
                                             tab2d(newx, newy, grid=True))

        # Check that edge_mode='wrap' works
        tab2d = galsim.LookupTable2D(x, y, z, edge_mode='wrap')

        ref_dfdx, ref_dfdy = tab2d.gradient(newxx, newyy)
        test_dfdx, test_dfdy = tab2d.gradient(newxx + 3 * tab2d.xperiod, newyy)
        np.testing.assert_array_almost_equal(ref_dfdx, test_dfdx)
        np.testing.assert_array_almost_equal(ref_dfdy, test_dfdy)

        test_dfdx, test_dfdy = tab2d.gradient(newxx,
                                              newyy + 13 * tab2d.yperiod)
        np.testing.assert_array_almost_equal(ref_dfdx, test_dfdx)
        np.testing.assert_array_almost_equal(ref_dfdy, test_dfdy)

        test_dfdx, test_dfdy = tab2d.gradient(newx,
                                              newy + 13 * tab2d.yperiod,
                                              grid=True)
        np.testing.assert_array_almost_equal(ref_dfdx, test_dfdx)
        np.testing.assert_array_almost_equal(ref_dfdy, test_dfdy)

        # Test mix of inside and outside original boundary
        test_dfdx, test_dfdy = tab2d.gradient(
            np.dstack([newxx, newxx + 3 * tab2d.xperiod]),
            np.dstack([newyy, newyy]))

        np.testing.assert_array_almost_equal(ref_dfdx, test_dfdx[:, :, 0])
        np.testing.assert_array_almost_equal(ref_dfdy, test_dfdy[:, :, 0])
        np.testing.assert_array_almost_equal(ref_dfdx, test_dfdx[:, :, 1])
        np.testing.assert_array_almost_equal(ref_dfdy, test_dfdy[:, :, 1])
예제 #25
0
def test_real_galaxy_saved():
    """Test accuracy of various calculations with real RealGalaxy vs. stored SHERA result"""
    ind_real = 0  # index of real galaxy in catalog
    shera_file = 'real_comparison_images/shera_result.fits'
    shera_target_PSF_file = 'real_comparison_images/shera_target_PSF.fits'
    shera_target_pixel_scale = 0.24
    shera_target_flux = 1000.0

    # read in real RealGalaxy from file
    # rgc = galsim.RealGalaxyCatalog(catalog_file, dir=image_dir)
    # This is an alternate way to give the directory -- as part of the catalog file name.
    full_catalog_file = os.path.join(image_dir, catalog_file)
    rgc = galsim.RealGalaxyCatalog(full_catalog_file)
    rg = galsim.RealGalaxy(rgc, index=ind_real)

    # read in expected result for some shear
    shera_image = galsim.fits.read(shera_file)
    shera_target_PSF_image = galsim.fits.read(shera_target_PSF_file)
    shera_target_PSF_image.scale = shera_target_pixel_scale

    # simulate the same galaxy with GalSim
    targ_applied_shear1 = 0.06
    targ_applied_shear2 = -0.04
    tmp_gal = rg.withFlux(shera_target_flux).shear(g1=targ_applied_shear1,
                                                   g2=targ_applied_shear2)
    tmp_psf = galsim.InterpolatedImage(shera_target_PSF_image)
    tmp_gal = galsim.Convolve(tmp_gal, tmp_psf)
    sim_image = tmp_gal.drawImage(scale=shera_target_pixel_scale,
                                  method='no_pixel')

    # there are centroid issues when comparing Shera vs. SBProfile outputs, so compare 2nd moments
    # instead of images
    sbp_res = sim_image.FindAdaptiveMom()
    shera_res = shera_image.FindAdaptiveMom()

    np.testing.assert_almost_equal(
        sbp_res.observed_shape.e1,
        shera_res.observed_shape.e1,
        2,
        err_msg="Error in comparison with SHERA result: e1")
    np.testing.assert_almost_equal(
        sbp_res.observed_shape.e2,
        shera_res.observed_shape.e2,
        2,
        err_msg="Error in comparison with SHERA result: e2")
    np.testing.assert_almost_equal(
        sbp_res.moments_sigma,
        shera_res.moments_sigma,
        2,
        err_msg="Error in comparison with SHERA result: sigma")

    check_basic(rg, "RealGalaxy", approx_maxsb=True)

    # Check picklability
    do_pickle(
        rgc, lambda x: [
            x.getGalImage(ind_real),
            x.getPSFImage(ind_real),
            x.getNoiseProperties(ind_real)
        ])
    do_pickle(
        rgc,
        lambda x: drawNoise(x.getNoise(ind_real, rng=galsim.BaseDeviate(123))))
    do_pickle(
        rg, lambda x: galsim.Convolve([x, galsim.Gaussian(sigma=1.7)]).
        drawImage(nx=20, ny=20, scale=0.7))
    do_pickle(rgc)
    do_pickle(rg)
예제 #26
0
def main(argv=sys.argv[1:]):
    # In non-script code, use getLogger(__name__) at module scope instead.
    logging.basicConfig(format='%(asctime)s %(message)s',
                        level=logging.INFO,
                        handlers=[
                            logging.StreamHandler(sys.stdout),
                            logging.FileHandler('DF_compsub.log', mode='w')
                        ])
    logger = logging.getLogger("DF_compsub.log")
    # Parse the input
    parser = argparse.ArgumentParser(
        description='This script subtract compact objects from Dragonfly image.'
    )
    parser.add_argument('--config',
                        '-c',
                        required=True,
                        help='configuration file')
    args = parser.parse_args()
    #############################################################
    #############################################################
    logger.info('Open configuration file {}'.format(args.config))
    # Open configuration file
    with open(args.config, 'r') as ymlfile:
        cfg = yaml.safe_load(ymlfile)
        config = Config(cfg)

    # 1. subtract background of DF, if desired
    df_image = config.file.df_image
    hi_res_image_blue = config.file.hi_res_image_blue
    hi_res_image_red = config.file.hi_res_image_red

    hdu = fits.open(config.file.df_image)
    df = Celestial(hdu[0].data, header=hdu[0].header)
    if config.DF.sub_bkgval:
        logger.info('Subtract BACKVAL=%.1f of Dragonfly image',
                    float(df.header['BACKVAL']))
        df.image -= float(df.header['BACKVAL'])
    hdu.close()

    # 2. Create magnified DF image, and register high-res images with subsampled DF ones
    f_magnify = config.DF.magnify_factor
    resize_method = config.DF.resize_method
    logger.info('Magnify Dragonfly image with a factor of %.1f:', f_magnify)
    df.resize_image(f_magnify, method=resize_method)
    df.save_to_fits('_df_{}.fits'.format(int(f_magnify)))
    logger.info('Register high resolution image "{0}" with "{1}"'.format(
        hi_res_image_blue, df_image))
    hdu = fits.open(hi_res_image_blue)
    if 'hsc' in hi_res_image_blue:
        array, _ = reproject_interp(hdu[1], df.header)
    else:
        array, _ = reproject_interp(hdu[0], df.header)
    hires_b = Celestial(array, header=df.header)
    hdu.close()
    logger.info('Register high resolution image "{0}" with "{1}"'.format(
        hi_res_image_red, df_image))
    hdu = fits.open(hi_res_image_red)
    if 'hsc' in hi_res_image_red:
        array, _ = reproject_interp(hdu[1], df.header)
    else:
        array, _ = reproject_interp(hdu[0], df.header)
    hires_r = Celestial(array, header=df.header)
    hdu.close()

    # 3. Extract sources on hires images using SEP
    sigma = config.sex.sigma
    minarea = config.sex.minarea
    b = config.sex.b
    f = config.sex.f
    deblend_cont = config.sex.deblend_cont
    deblend_nthresh = config.sex.deblend_nthresh
    sky_subtract = config.sex.sky_subtract
    flux_aper = config.sex.flux_aper
    show_fig = config.sex.show_fig

    logger.info('Build flux models on high-resolution images: Blue band')
    logger.info('    - sigma = %.1f, minarea = %d', sigma, minarea)
    logger.info('    - deblend_cont = %.5f, deblend_nthres = %.1f',
                deblend_cont, deblend_nthresh)
    _, _, b_imflux = Flux_Model(hires_b.image,
                                hires_b.header,
                                sigma=sigma,
                                minarea=minarea,
                                deblend_cont=deblend_cont,
                                deblend_nthresh=deblend_nthresh,
                                save=True)

    logger.info('Build flux models on high-resolution images: Red band')
    logger.info('    - sigma = %.1f, minarea = %d', sigma, minarea)
    logger.info('    - deblend_cont = %.5f, deblend_nthres = %.1f',
                deblend_cont, deblend_nthresh)
    _, _, r_imflux = Flux_Model(hires_r.image,
                                hires_b.header,
                                sigma=sigma,
                                minarea=minarea,
                                deblend_cont=deblend_cont,
                                deblend_nthresh=deblend_nthresh,
                                save=True)

    # 4. Make color correction, remove artifacts as well
    logger.info('Make color correction to blue band, remove artifacts as well')
    col_ratio = (b_imflux / r_imflux)
    col_ratio[np.isnan(col_ratio)
              | np.isinf(col_ratio)] = 0  # remove artifacts
    save_to_fits(col_ratio, '_colratio.fits', header=hires_b.header)

    color_term = config.DF.color_term
    logger.info('### color_term = {}'.format(color_term))
    median_col = np.nanmedian(col_ratio[col_ratio != 0])
    logger.info('### median_color (blue/red) = {:.5f}'.format(median_col))

    fluxratio = col_ratio / median_col
    fluxratio[(fluxratio < 0.1) |
              (fluxratio > 10)] = 1  # remove extreme values
    col_correct = np.power(fluxratio,
                           color_term)  # how to improve this correction?
    save_to_fits(col_correct, '_colcorrect.fits', header=hires_b.header)

    if config.DF.band == 'r':
        hires_3 = Celestial(hires_r.image * col_correct, header=hires_r.header)
    elif config.DF.band == 'g':
        hires_3 = Celestial(hires_b.image * col_correct, header=hires_b.header)
    else:
        raise ValueError('config.DF.band must be "g" or "r"!')

    _ = hires_3.save_to_fits('_hires_{}.fits'.format(int(f_magnify)))

    # 5. Extract sources on hires corrected image
    logger.info(
        'Extracting objects from color-corrected high resolution image with:')
    logger.info('    - sigma = %.1f, minarea = %d', sigma, minarea)
    logger.info('    - deblend_cont = %.5f, deblend_nthres = %.1f',
                deblend_cont, deblend_nthresh)
    objects, segmap = extract_obj(hires_3.image,
                                  b=b,
                                  f=f,
                                  sigma=sigma,
                                  minarea=minarea,
                                  show_fig=False,
                                  flux_aper=flux_aper,
                                  deblend_nthresh=deblend_nthresh,
                                  deblend_cont=deblend_cont)
    objects.write('_hires_obj_cat.fits', format='fits', overwrite=True)

    # 6. Remove bright stars (and certain galaxies)
    logger.info(
        'Remove bright stars from this segmentation map, using SEP results. ')
    logger.info('Bright star limit = {}'.format(config.star.bright_lim))
    seg = copy.deepcopy(segmap)
    mag = config.file.hi_res_zp - 2.5 * np.log10(abs(objects['flux']))
    flag = np.where(mag < config.star.bright_lim)
    for obj in objects[flag]:
        seg = seg_remove_obj(seg, obj['x'], obj['y'])

    objects[flag].write('_bright_stars_3.fits', format='fits', overwrite=True)

    # You can Mask out certain galaxy here.
    logger.info('Remove objects from catalog {}'.format(
        config.file.certain_gal_cat))
    gal_cat = Table.read(config.file.certain_gal_cat, format='ascii')
    seg = mask_out_certain_galaxy(seg, hires_3.header, gal_cat=gal_cat)
    save_to_fits(seg, '_seg_3.fits', header=hires_3.header)

    # 7. Remove artifacts from `hires_3` by color ratio and then smooth it
    # multiply by mask created from ratio of images - this removes all objects that are
    # only in g or r but not in both (artifacts, transients, etc)
    mask = seg * (col_ratio != 0)
    mask[mask != 0] = 1

    # Then blow mask up
    from astropy.convolution import Gaussian2DKernel, Box2DKernel, convolve
    smooth_radius = config.fluxmodel.gaussian_radius
    mask_conv = copy.deepcopy(mask)
    mask_conv[mask_conv > 0] = 1
    mask_conv = convolve(mask_conv.astype(float),
                         Gaussian2DKernel(smooth_radius))
    seg_mask = (mask_conv >= config.fluxmodel.gaussian_threshold)

    hires_fluxmod = Celestial(seg_mask * hires_3.image, header=hires_3.header)
    _ = hires_fluxmod.save_to_fits('_hires_fluxmod.fits')
    logger.info('Flux model from high resolution image has been built!')

    # 8. Build kernel based on some stars
    img_hires = Celestial(hires_3.image.byteswap().newbyteorder(),
                          header=hires_3.header,
                          dataset='cfht_3')
    img_lowres = Celestial(df.image.byteswap().newbyteorder(),
                           header=df.header,
                           dataset='df_3')
    cval = config.kernel.cval

    if isinstance(cval, str) and 'nan' in cval.lower():
        cval = np.nan
    else:
        cval = float(cval)

    logger.info('Build convolving kernel to degrade high resolution image.')
    kernel_med, good_cat = Autokernel(
        img_hires,
        img_lowres,
        int(f_magnify * config.kernel.kernel_size),
        int(f_magnify *
            (config.kernel.kernel_size - config.kernel.kernel_edge)),
        frac_maxflux=config.kernel.frac_maxflux,
        show_figure=config.kernel.show_fig,
        cval=cval,
        nkernels=config.kernel.nkernel)
    # You can also circularize the kernel
    if config.kernel.circularize:
        logger.info('Circularize the kernel.')
        from compsub.utils import circularize
        kernel_med = circularize(kernel_med, n=14)
    save_to_fits(kernel_med, '_kernel_median.fits')

    # 9. Convolve this kernel to high-res image
    # Two options: if you have `galsim` installed, use galsim, it's much faster.
    # Otherwise, use `fconvolve` from iraf.
    # Galsim solution:
    import galsim
    psf = galsim.InterpolatedImage(galsim.Image(kernel_med),
                                   scale=config.DF.pixel_scale / f_magnify)
    gal = galsim.InterpolatedImage(galsim.Image(hires_fluxmod.image),
                                   scale=config.DF.pixel_scale / f_magnify)
    logger.info('Convolving image, this will be a bit slow @_@ ###')
    final = galsim.Convolve([gal, psf])
    image = final.drawImage(scale=config.DF.pixel_scale / f_magnify,
                            nx=hires_3.shape[1],
                            ny=hires_3.shape[0])
    save_to_fits(image.array,
                 '_df_model_{}.fits'.format(int(f_magnify)),
                 header=hires_3.header)

    # Optinally remove low surface brightness objects from model:
    if config.fluxmodel.unmask_lowsb:
        E = hires_fluxmod.image / image.array
        E[np.isinf(E) | np.isnan(E)] = 0.0

        kernel_flux = np.sum(kernel_med)
        print("# Kernel flux = {}".format(kernel_flux))
        E *= kernel_flux
        print('# Maximum of E = {}'.format(np.nanmax(E)))

        im_seg = copy.deepcopy(seg)
        im_highres = copy.deepcopy(hires_fluxmod.image)
        im_ratio = E
        im_highres_new = np.zeros_like(hires_fluxmod.image)
        objects = Table.read('_hires_obj_cat.fits', format='fits')

        # calculate SB limit in counts per pixel
        sb_lim_cpp = 10**(
            (config.fluxmodel.sb_lim - config.file.hi_res_zp) /
            (-2.5)) * (config.file.hi_res_pixelsize / f_magnify)**2
        print('# SB limit in counts / pixel = {}'.format(sb_lim_cpp))

        im_seg_ind = np.where(im_seg > 0)
        im_seg_slice = im_seg[im_seg_ind]
        im_highres_slice = im_highres[im_seg_ind]
        im_highres_new_slice = im_highres_new[im_seg_ind]
        im_ratio_slice = im_ratio[im_seg_ind]

        # loop over objects
        for obj in objects[:1000]:
            ind = np.where(np.isin(im_seg_slice, obj['index']))
            flux_hires = im_highres_slice[ind]
            flux_ratio = im_ratio_slice[ind]
            if ((np.mean(flux_hires) < sb_lim_cpp) and
                (np.mean(flux_ratio) < config.fluxmodel.unmask_ratio)) and (
                    np.mean(flux_ratio) != 0):
                im_highres_new_slice[ind] = 1
                print('# removed object {}'.format(obj['index']))
        im_highres_new[im_seg_ind] = im_highres_new_slice
        save_to_fits(im_highres_new, '_hires_fluxmode_clean_mask.fits')
        # BLow up the mask
        smooth_radius = config.fluxmodel.gaussian_radius
        mask_conv = copy.deepcopy(im_highres_new)
        mask_conv[mask_conv > 0] = 1
        mask_conv = convolve(mask_conv.astype(float),
                             Gaussian2DKernel(smooth_radius))
        seg_mask = (mask_conv >= config.fluxmodel.gaussian_threshold)
        im_highres[seg_mask] = 0

        psf = galsim.InterpolatedImage(galsim.Image(kernel_med),
                                       scale=config.DF.pixel_scale / f_magnify)
        gal = galsim.InterpolatedImage(galsim.Image(im_highres),
                                       scale=config.DF.pixel_scale / f_magnify)
        logger.info('Convolving image, this will be a bit slow @_@ ###')
        final = galsim.Convolve([gal, psf])
        image = final.drawImage(scale=config.DF.pixel_scale / f_magnify,
                                nx=hires_3.shape[1],
                                ny=hires_3.shape[0])
        save_to_fits(image.array,
                     '_df_model_clean_{}.fits'.format(f_magnify),
                     header=hires_3.header)

    df_model = Celestial(image.array, header=hires_3.header)
    res = Celestial(df.image - df_model.image, header=df.header)
    res.save_to_fits('_res_{}.fits'.format(f_magnify))

    df_model.resize_image(1 / f_magnify, method=resize_method)
    df_model.save_to_fits('_df_model.fits')

    res.resize_image(1 / f_magnify, method=resize_method)
    res.save_to_fits('res.fits')
    logger.info(
        'Compact objects has been subtracted from Dragonfly image! Saved as "res.fits".'
    )

    #10. Subtract bright star halos! Only work with those left out in flux model!
    star_cat = Table.read('_bright_stars_3.fits', format='fits')
    star_cat['x'] /= f_magnify
    star_cat['y'] /= f_magnify
    ra, dec = res.wcs.wcs_pix2world(star_cat['x'], star_cat['y'], 0)
    star_cat.add_columns(
        [Column(data=ra, name='ra'),
         Column(data=dec, name='dec')])

    sigma = config.starhalo.sigma
    minarea = config.starhalo.minarea
    deblend_cont = config.starhalo.deblend_cont
    deblend_nthresh = config.starhalo.deblend_nthresh
    sky_subtract = config.starhalo.sky_subtract
    flux_aper = config.starhalo.flux_aper
    show_fig = config.starhalo.show_fig
    logger.info(
        'Extracting objects from compact-object-corrected Dragonfly image with:'
    )
    logger.info('    - sigma = %.1f, minarea = %d', sigma, minarea)
    logger.info('    - deblend_cont = %.5f, deblend_nthres = %.1f',
                deblend_cont, deblend_nthresh)
    objects, segmap = extract_obj(res.image.byteswap().newbyteorder(),
                                  b=64,
                                  f=3,
                                  sigma=sigma,
                                  minarea=minarea,
                                  deblend_nthresh=deblend_nthresh,
                                  deblend_cont=deblend_cont,
                                  sky_subtract=sky_subtract,
                                  show_fig=show_fig,
                                  flux_aper=flux_aper)
    ra, dec = res.wcs.wcs_pix2world(objects['x'], objects['y'], 0)
    objects.add_columns(
        [Column(data=ra, name='ra'),
         Column(data=dec, name='dec')])
    # Match two catalogs
    logger.info(
        'Match detected objects with {} catalog to ensure they are stars.'.
        format(config.starhalo.method))
    temp = match_coordinates_sky(
        SkyCoord(ra=star_cat['ra'], dec=star_cat['dec'], unit='deg'),
        SkyCoord(ra=objects['ra'], dec=objects['dec'], unit='deg'))[0]
    bright_star_cat = objects[np.unique(temp)]
    mag = float(
        df.header['MEDIANZP']) - 2.5 * np.log10(bright_star_cat['flux'])
    bright_star_cat.add_column(Column(data=mag, name='mag'))
    bright_star_cat.write('_bright_star_cat.fits',
                          format='fits',
                          overwrite=True)

    # Extract stars from image
    psf_cat = bright_star_cat[bright_star_cat['fwhm_custom'] <
                              config.starhalo.fwhm_lim]  # FWHM selection
    psf_cat = psf_cat[psf_cat['mag'] < config.starhalo.bright_lim]
    psf_cat.sort('flux')
    psf_cat.reverse()
    psf_cat = psf_cat[:int(config.starhalo.n_stack)]
    logger.info('You get {} stars to be stacked!'.format(len(psf_cat)))

    # Construct and stack `Stars`!!!.
    halosize = config.starhalo.halosize
    padsize = config.starhalo.padsize
    size = 2 * halosize + 1
    stack_set = np.zeros((len(psf_cat), size, size))
    bad_indices = []
    logger.info('Stacking stars!')
    for i, obj in enumerate(psf_cat):
        try:
            sstar = Star(res.image,
                         header=res.header,
                         starobj=obj,
                         halosize=halosize,
                         padsize=padsize)
            if config.starhalo.mask_contam:
                sstar.mask_out_contam(show_fig=False, verbose=False)
            sstar.centralize(method='iraf')
            #sstar.sub_bkg(verbose=False)
            cval = config.starhalo.cval
            if isinstance(cval, str) and 'nan' in cval.lower():
                cval = np.nan
            else:
                cval = float(cval)

            if config.starhalo.norm == 'flux_ann':
                stack_set[i, :, :] = sstar.get_masked_image(
                    cval=cval) / sstar.fluxann
            else:
                stack_set[i, :, :] = sstar.get_masked_image(
                    cval=cval) / sstar.flux

        except Exception as e:
            stack_set[i, :, :] = np.ones((size, size)) * 1e9
            bad_indices.append(i)
            logger.info(e)

    stack_set = np.delete(stack_set, bad_indices, axis=0)
    median_psf = np.nanmedian(stack_set, axis=0)
    median_psf = psf_bkgsub(median_psf, int(config.starhalo.edgesize))
    from astropy.convolution import convolve, Box2DKernel
    median_psf = convolve(median_psf, Box2DKernel(3))
    save_to_fits(median_psf, 'median_psf.fits')
    logger.info(
        'Stars are stacked in to a PSF and saved as "median_psf.fits"!')
    save_to_fits(stack_set, '_stack_bright_stars.fits')

    ## Build starhalo models and then subtract
    logger.info('Draw star halo models onto the image, and subtract them!')
    zp = df.header['MEDIANZP']

    # Make an extra edge, move stars right
    ny, nx = res.image.shape
    im_padded = np.zeros((ny + 2 * halosize, nx + 2 * halosize))
    # Making the left edge empty
    im_padded[halosize:ny + halosize, halosize:nx + halosize] = res.image
    im_halos_padded = np.zeros_like(im_padded)

    for i, obj in enumerate(bright_star_cat):
        spsf = Celestial(median_psf, header=df_model.header)
        x = obj['x']
        y = obj['y']
        x_int = x.astype(np.int)
        y_int = y.astype(np.int)
        dx = -1.0 * (x - x_int)
        dy = -1.0 * (y - y_int)
        spsf.shift_image(-dx, -dy, method='iraf')
        x_int, y_int = x_int + halosize, y_int + halosize
        if config.starhalo.norm == 'flux_ann':
            im_halos_padded[y_int - halosize:y_int + halosize + 1,
                            x_int - halosize:x_int + halosize +
                            1] += spsf.image * obj['flux_ann']
        else:
            im_halos_padded[y_int - halosize:y_int + halosize + 1,
                            x_int - halosize:x_int + halosize +
                            1] += spsf.image * obj['flux']

    im_halos = im_halos_padded[halosize:ny + halosize, halosize:nx + halosize]
    img_sub = res.image - im_halos
    df_model.image += im_halos

    save_to_fits(im_halos, '_df_halos.fits', header=df_model.header)
    save_to_fits(img_sub, '_df_halosub.fits', header=df_model.header)
    save_to_fits(df_model.image, 'df_model_halos.fits', header=df_model.header)

    logger.info(
        'Bright star halos are subtracted! Saved as "df_halosub.fits".')

    # Mask out dirty things!
    if config.clean.clean_img:
        logger.info('Clean the image! Replace relics with noise.')
        model_mask = convolve(df_model.image,
                              Gaussian2DKernel(config.clean.gaussian_radius))
        model_mask[model_mask < config.clean.gaussian_threshold] = 0
        model_mask[model_mask != 0] = 1
        totmask = bright_star_mask(model_mask.astype(bool),
                                   bright_star_cat,
                                   bright_lim=config.clean.bright_lim,
                                   r=config.clean.r)

        # Total mask with noise
        totmask = convolve(totmask.astype(float), Box2DKernel(2))
        totmask[totmask > 0] = 1
        if config.clean.replace_with_noise:
            from compsub.utils import img_replace_with_noise
            final_image = img_replace_with_noise(
                res.image.byteswap().newbyteorder(), totmask)
        else:
            final_image = res.image * (~totmask.astype(bool))
        save_to_fits(final_image, 'final_image.fits', header=res.header)
        logger.info('The final result is saved as "final_image.fits"!')
    # Delete temp files
    if config.clean.clean_file:
        logger.info('Delete all temporary files!')
        os.system('rm -rf _*.fits')

    logger.info('Mission finished!')
예제 #27
0
def test_LSST_huygensPSF(plot=False):
    thxs = [0.0, 0.0, 0.0, 1.176]
    thys = [0.0, 1.225, 1.75, 1.176]
    fns = [
        "LSST_hpsf_0.0_0.0.txt", "LSST_hpsf_0.0_1.225.txt",
        "LSST_hpsf_0.0_1.75.txt", "LSST_hpsf_1.176_1.176.txt"
    ]
    if __name__ != "__main__":
        thxs = thxs[2:3]
        thys = thys[2:3]
        fns = fns[2:3]
    for thx, thy, fn in zip(thxs, thys, fns):
        fn = os.path.join(directory, "testdata", fn)
        with open(fn, encoding='utf-16-le') as f:
            Zpsf = np.loadtxt(f, skiprows=21)
        Zpsf = Zpsf[::
                    -1]  # Need to invert, probably just a Zemax convention...
        Zpsf /= np.max(Zpsf)

        telescope = batoid.Optic.fromYaml("LSST_g_500.yaml")

        thx = np.deg2rad(thx)
        thy = np.deg2rad(thy)
        wavelength = 500e-9

        bpsf = batoid.analysis.huygensPSF(telescope,
                                          thx,
                                          thy,
                                          wavelength,
                                          nx=1024,
                                          reference='chief',
                                          projection='zemax',
                                          dx=0.289e-6,
                                          nxOut=64)
        bpsf.array /= np.max(bpsf.array)

        # Use GalSim InterpolateImage to align and subtract
        ii = galsim.InterpolatedImage(galsim.Image(bpsf.array, scale=1.0),
                                      normalization='sb')

        # Now setup an optimizer to fit for x/y shift
        def resid(params):
            p = params.valuesdict()
            model = ii.shift(p['dx'], p['dy']) * np.exp(p['dlogflux'])
            img = model.drawImage(method='sb', scale=1.0, nx=64, ny=64)
            r = (img.array - Zpsf).ravel()
            return r

        params = lmfit.Parameters()
        params.add('dx', value=0.0)
        params.add('dy', value=0.0)
        params.add('dlogflux', value=0.0)
        print("Aligning")
        opt = lmfit.minimize(resid, params)
        print("Done")
        print(opt.params)

        p = opt.params.valuesdict()
        model = ii.shift(p['dx'], p['dy']) * np.exp(p['dlogflux'])
        optImg = model.drawImage(method='sb', scale=1.0, nx=64, ny=64)

        if plot:
            import matplotlib.pyplot as plt
            fig, axes = plt.subplots(ncols=3, figsize=(10, 3))
            i0 = axes[0].imshow(optImg.array)
            i1 = axes[1].imshow(Zpsf)
            i2 = axes[2].imshow(optImg.array - Zpsf)
            plt.colorbar(i0, ax=axes[0])
            plt.colorbar(i1, ax=axes[1])
            plt.colorbar(i2, ax=axes[2])
            plt.tight_layout()
            plt.show()

            if thy not in [0.0, 1.176]:
                fig, ax = plt.subplots(figsize=(6, 4))
                ax.plot(optImg.array[:, 32], c='g')
                ax.plot(Zpsf[:, 32], c='b')
                ax.plot((optImg.array - Zpsf)[:, 32], c='r')
                plt.show()
예제 #28
0
    def measure(self, measRecord, exposure):
        center = measRecord.getCentroid()

        # currently box must be square for python based measurements
        orig_box = measRecord.getFootprint().getBBox()
        w, h = orig_box.getWidth(), orig_box.getHeight()
        box_size = max(w, h)
        if box_size % 2 == 1:
            box_size += 1
        box = afwGeom.Box2I(
            afwGeom.Point2I(center.getX() - box_size / 2,
                            center.getY() - box_size / 2),
            afwGeom.Extent2I(box_size, box_size))

        if not exposure.getBBox().contains(box):
            box_size = min(w, h)
            if box_size % 2 == 1:
                box_size += 1
            box = afwGeom.Box2I(
                afwGeom.Point2I(center.getX() - box_size / 2,
                                center.getY() - box_size / 2),
                afwGeom.Extent2I(box_size, box_size))

        if not exposure.getBBox().contains(box):
            measRecord.set(self.flag, 1)
            return

        image = exposure.image[box].array

        # PSF image must also be the same size
        psf_image_base = galsim.ImageF(
            exposure.getPsf().computeKernelImage(center).array)
        psf_image_interp = galsim.InterpolatedImage(psf_image_base, scale=1)
        psf_image = galsim.ImageF(box_size, box_size)
        psf_image_interp.drawImage(psf_image, method='no_pixel')

        ra = measRecord.get('coord_ra').asArcseconds()
        dec = measRecord.get('coord_dec').asArcseconds()
        local_lin_wcs = exposure.getWcs().linearizePixelToSky(
            center, geom.arcseconds)

        jacobian = local_lin_wcs.getLinear().getMatrix()
        sky_pos = exposure.getWcs().pixelToSky(center)
        uvref = (sky_pos.getRa().asArcseconds(),
                 sky_pos.getDec().asArcseconds())

        xy_pos = (center.getX() - box.getMinX(), center.getY() - box.getMinY())
        bfd_wcs = bfd.WCS(jacobian, xyref=xy_pos, uvref=uvref)

        noise = measRecord.get("base_Variance_value")

        kdata = bfd.simpleImage(image,
                                uvref,
                                psf_image.array,
                                wcs=bfd_wcs,
                                pixel_noise=noise)
        moment_calc = bfd.MomentCalculator(kdata,
                                           self.weight,
                                           id=measRecord.getId())

        xyshift, error, msg = moment_calc.recenter()
        if error:
            measRecord.set(self.flag, 1)
            measRecord.set(self.centroid_flag, 1)
            return
        else:
            #cov_even, cov_odd = moment_calc.get_covariance()
            covgal = moment_calc.get_covariance()
            moment = moment_calc.get_moment(0, 0)
            measRecord.set(self.moment, np.array(moment.even,
                                                 dtype=np.float32))

            cov_even_save = []
            cov_odd_save = []
            for ii in range(covgal[0].shape[0]):
                cov_even_save.extend(covgal[0][ii][ii:])
            for ii in range(covgal[1].shape[0]):
                cov_odd_save.extend(covgal[1][ii][ii:])
            measRecord.set(self.cov_even,
                           np.array(cov_even_save, dtype=np.float32))
            measRecord.set(self.cov_odd,
                           np.array(cov_odd_save, dtype=np.float32))
            measRecord.set(self.xy, np.array(xyshift, dtype=np.float32))
예제 #29
0
    cosimage_padded = galsim.ImageD(
        int(size_factor * largeim_size * 6) +
        256,  # Note 6 here since 0.18 = 6 * 0.03
        int(size_factor * largeim_size * 6) +
        256)  # large image to beat down noise + padding
    print "Padded underlying COSMOS noise image bounds = " + str(
        cosimage_padded.bounds)

    cosimage.scale = dx_cosmos  # Use COSMOS pixel scale
    cosimage_padded.scale = dx_cosmos  # Use COSMOS pixel scale
    cosimage.addNoise(cn)
    cosimage_padded.addNoise(cn)

    imobj = galsim.InterpolatedImage(cosimage,
                                     calculate_stepk=False,
                                     calculate_maxk=False,
                                     normalization='sb',
                                     dx=dx_cosmos,
                                     x_interpolant=interp)
    cimobj = galsim.Convolve(imobj, psf_shera)

    imobj_padded = galsim.InterpolatedImage(cosimage_padded,
                                            calculate_stepk=False,
                                            calculate_maxk=False,
                                            normalization='sb',
                                            dx=dx_cosmos,
                                            x_interpolant=interp)
    cimobj_padded = galsim.Convolve(imobj_padded, psf_shera)

    convimage1 = galsim.ImageD(int(largeim_size * size_factor),
                               int(largeim_size * size_factor))
    convimage2 = galsim.ImageD(int(largeim_size * size_factor),
예제 #30
0
def main_main(x, gal, psf):
    print(x)
    a = x // (
        inp.NR *
        len(np.arange(inp.mock_reff_min, inp.mock_reff_max,
                      inp.mock_reff_step)) *
        len(np.arange(inp.mock_n_min, inp.mock_n_max, inp.mock_n_step))
    )  #quoziente senza resta of the NR*number of created mock (lenght of hlr * lenght of n)
    mock_image = gal[0]
    mock_reff = float(gal[1])
    mock_n = float(gal[2])

    #Set shear parameters
    lens_g1 = np.arange(inp.lens_g1_min, inp.lens_g1_max, inp.lens_g1_step)[a]
    lens_g2 = 0.0

    #Shape cancellation noise
    mock_g1_rd, mock_g2_rd = mock_g()  #generate the values
    mock_g1 = [-1 * mock_g1_rd,
               mock_g1_rd]  # list of values for shape cancellation noise
    mock_g2 = [-1 * mock_g2_rd,
               mock_g2_rd]  #list of values for shape cancellation noise
    for ellip in range(
            2):  #for the shape cancellation noise (gal and gal 90deg rotated)
        #ID to identify the fits files corresponding to each galaxy
        ID = x + (ellip * (
            inp.NR * len(
                np.arange(inp.mock_reff_min, inp.mock_reff_max,
                          inp.mock_reff_step)) *
            len(np.arange(inp.mock_n_min, inp.mock_n_max, inp.mock_n_step)) *
            len(np.arange(inp.lens_g1_min, inp.lens_g1_max, inp.lens_g1_step)))
                  )
        #Positional shift
        dx, dy = np.random.uniform(-1 * config.dx_shift,
                                   config.dy_shift), np.random.uniform(
                                       -1 * config.dx_shift, config.dy_shift)
        mock_im = galsim.ImageF(config.truth_size, config.truth_size)
        mock_image = mock_image.shear(g1=mock_g1[ellip], g2=mock_g2[ellip])
        mock_image = mock_image.shift(dx, dy)
        mock_image.drawImage(image=mock_im,
                             scale=config.truth_pxscale,
                             method='no_pixel')

        #In order to fix the SNR or the magnitude
        if inp.idx == 0:
            mag_image = inp.mock_mag
        elif inp.idx == 1:
            y = symbols('y')
            Eqflux = solveset(
                Eq(
                    y * config.Eu_gain /
                    (sqrt(y * config.Eu_gain + math.pi * 9 *
                          ((mock_reff / config.truth_pxscale)**2) *
                          (config.Eu_var * config.Eu_gain + config.Eu_rn**2))),
                    inp.Eu_snr), y)
            flux_Eu = float(Eqflux.args[0])
            mag_image = (config.Eu_zp - 2.5 * np.log10(
                (flux_Eu) * config.Eu_gain / config.Eu_expt))
        else:
            print('CHOOSE THE RIGHT INDEX!')

        flux_Eu = (config.Eu_expt / config.Eu_gain) * 10**(
            -0.4 * (mag_image - config.Eu_zp))  #ADU
        flux_HST = (config.HST_expt / config.HST_gain) * 10**(
            -0.4 * (mag_image - config.HST_zp))  #ADU

        #Interpolate the mock image
        mock_image = galsim.InterpolatedImage(mock_im,
                                              scale=config.truth_pxscale)

        #Convolution mock image with Euclid PSF and model fitting of convoluted image. I also add cosmic shear before convolution and CCD noise after the convolution. KSB method for shear measurements is applied
        mock_lens = mock_image.lens(
            lens_g1, lens_g2, config.lens_mu)  #add this where mu is 1 or 1.5
        Eu_conv_image = conv_with_PSF(mock_lens, psf['Euclid_psf'])
        Eu_conv_image = Eu_conv_image * (flux_Eu / (Eu_conv_image.flux))
        Eu_conv_im, Eu_conv_noise = draw_image_conv_noise(Eu_conv_image,
                                                          aim='Eu_conv',
                                                          size_image=mock_im)
        #fit_mod_Eu, fitter_Eu, chi2_Eu, red_chi2_Eu, SNR_Eu, init_flux_Eu = make_a_fit(Eu_conv_im.array, mock_reff, aim='Eu_conv')
        init_flux_Eu = np.sum(Eu_conv_im.array)
        KSB_Eu = galsim.hsm.EstimateShear(Eu_conv_im,
                                          psf['im_Euclid_psf'],
                                          sky_var=config.Eu_var,
                                          shear_est='KSB',
                                          strict='False')
        summary_result(
            aim='Eu_conv',
            mock_n=mock_n,
            reff=mock_reff,
            mock_g1=mock_g1[ellip],
            mock_g2=mock_g2[ellip],
            dx=dx,
            dy=dy,
            lens_g1=lens_g1,
            lens_g2=lens_g2,
            init_flux=init_flux_Eu,
            #fit_mod=fit_mod_Eu,
            #fitter=fitter_Eu,
            KSB=KSB_Eu,
            #chi2=chi2_Eu,
            #red_chi2=red_chi2_Eu,
            #SNR=SNR_Eu,
            ID=ID)

        #Convolution mock image with HST PSF and model fitting of convoluted image. I also add CCD noise after the convolution. KSB method for shear measurements is applied
        HST_conv_image = conv_with_PSF(mock_image, psf['Hubble_psf'])
        HST_conv_image = HST_conv_image * (flux_HST / (HST_conv_image.flux))
        HST_conv_im, HST_conv_noise = draw_image_conv_noise(HST_conv_image,
                                                            aim='HST_conv',
                                                            size_image=mock_im)
        #fit_mod_HST, fitter_HST, chi2_HST, red_chi2_HST, SNR_HST, init_flux_HST = make_a_fit(HST_conv_im.array, mock_reff, aim='HST_conv')
        #KSB_HST = galsim.hsm.EstimateShear(HST_conv_im, psf['im_Hubble_psf'], sky_var=config.HST_var, shear_est='KSB', strict='False')
        #summary_result(aim='HST_conv',
        #mock_n=mock_n,
        #reff=mock_reff,
        #mock_g1=mock_g1[ellip],                                               #mock_g2=mock_g2[ellip],
        #dx=dx,
        #dy=dy,
        #lens_g1=lens_g1,
        #lens_g2=lens_g2,
        #init_flux=init_flux_HST,
        #fit_mod=fit_mod_HST,
        #fitter=fitter_HST,
        #KSB=KSB_HST,
        #chi2=chi2_HST,
        #red_chi2=red_chi2_HST,
        #SNR=SNR_HST,
        #ID=ID)

        #Deconvolution of convoluted image with HST PSF
        HST_image = galsim.InterpolatedImage(HST_conv_im,
                                             scale=HST_conv_im.scale)
        HST_deconv_image = deconv_with_PSF(HST_image, psf['Hubble_psf'])

        #Convolution of deconvoluted (HST IMAGE * HST PSF) image with Euclid PSF
        HST_deconv_lens = HST_deconv_image.lens(
            lens_g1, lens_g2, config.lens_mu)  #add this mu=1.5 or 1
        Euclidiz_image = conv_with_PSF(HST_deconv_lens, psf['Euclid_psf'])

        #Symmetrize the Euclidiz image after the deconvolution by Euclid PSF and rescale for the flux. I add also some extra gaussian noise. Fittong and KSB method for shear measurements are applied
        Euclidiz_sym_im = symmetrize(Euclidiz_image,
                                     HST_conv_noise,
                                     size_image=HST_conv_im)
        Euclidiz = galsim.InterpolatedImage(Euclidiz_sym_im,
                                            scale=Euclidiz_sym_im.scale)
        Euclidiz = Euclidiz * (flux_Eu / flux_HST)
        Euclidiz_im = draw_image_extranoise(Euclidiz,
                                            size_image=Euclidiz_sym_im)
        #fit_mod_Euz, fitter_Euz, chi2_Euz, red_chi2_Euz, SNR_Euz, init_flux_Euz = make_a_fit(Euclidiz_im.array, mock_reff, aim='Euclidiz')
        init_flux_Euz = np.sum(Euclidiz_im.array)
        KSB_Euz = galsim.hsm.EstimateShear(Euclidiz_im,
                                           psf['im_Euclid_psf'],
                                           sky_var=config.Eu_var,
                                           shear_est='KSB',
                                           strict='False')
        summary_result(
            aim='Euclidiz',
            mock_n=mock_n,
            reff=mock_reff,
            mock_g1=mock_g1[ellip],
            mock_g2=mock_g2[ellip],
            dx=dx,
            dy=dy,
            lens_g1=lens_g1,
            lens_g2=lens_g2,
            init_flux=init_flux_Euz,
            #fit_mod=fit_mod_Euz,
            #fitter=fitter_Euz,
            KSB=KSB_Euz,
            #chi2=chi2_Euz,
            #red_chi2=red_chi2_Euz,
            #SNR=SNR_Euz,
            ID=ID)
    return