Beispiel #1
0
 def make_background_noise_image(self,
                                 noise_type='poisson',
                                 noisy=True,
                                 rstate=None):
     if rstate is not None:
         np.random.seed(rstate)
     if noisy:
         image_zodi = make_noise_image(self.image_shape_pix,
                                       type=noise_type,
                                       mean=self.zodi_ct_per_pix.value *
                                       self.texp.value * self.ncoadds,
                                       random_state=rstate)
         image_black = make_noise_image(self.image_shape_pix,
                                        type=noise_type,
                                        mean=self.black_ct_per_pix *
                                        self.texp.value * self.ncoadds,
                                        random_state=rstate)
     else:
         _ = make_noise_image(self.image_shape_pix,
                              type=noise_type,
                              mean=self.zodi_ct_per_pix.value *
                              self.texp.value * self.ncoadds,
                              random_state=rstate)
         _ = make_noise_image(self.image_shape_pix,
                              type=noise_type,
                              mean=self.black_ct_per_pix * self.texp.value *
                              self.ncoadds,
                              random_state=rstate)
         image_zodi = (
             np.zeros(self.image_shape_pix) +
             self.zodi_ct_per_pix.value * self.texp.value * self.ncoadds)
         image_black = (
             np.zeros(self.image_shape_pix) +
             self.black_ct_per_pix * self.texp.value * self.ncoadds)
     return image_zodi + image_black
Beispiel #2
0
def drawpic(filename, data, ra, dec, radius, number):
    sigma_psf = 2.5
    sources = Table()
    size = np.size(data[:,0])
    x = ra
    y = dec
    x1 = x - radius
    x2 = x + radius
    y1 = y - radius
    y2 = y + radius
    sources['x_mean'] = (data[:,0] - x1) / radius / 2. * 256.
    sources['y_mean'] = (data[:,1] - y1) / radius / 2. * 256.
    sources['x_stddev'] = sigma_psf*np.ones(size)
    sources['y_stddev'] = sources['x_stddev']
    sources['theta'] = np.zeros(size)
    sources['flux'] = data[:,2] / np.min(data[:,2]) * 5000
    tshape = (256, 256)
    image = (make_gaussian_sources_image(tshape, sources) + \
            make_noise_image(tshape, distribution='poisson', mean=10.,
                         random_state=12) + \
            make_noise_image(tshape, distribution='gaussian', mean=0.,
                           stddev=10., random_state=12))

    plt.imshow(image, cmap='gray', extent = [x1, x2, y1, y2], interpolation='nearest',
           origin='lower') 
    plt.xlabel('RA(°)')
    plt.ylabel('DEC(°)')
    for i in range(size):
        plt.text(data[i,0],data[i,1],np.str(i), fontsize=12, color = 'w')
    plt.savefig('output/fig/' + np.str(number) + '/'+str(number)+'.jpg')
Beispiel #3
0
    def get_image(self, exp_time: float, open_shutter: bool) -> Image:
        """Simulate an image.

        Args:
            exp_time: Exposure time in seconds.
            open_shutter: Whether the shutter is opened.

        Returns:
            numpy array with image.
        """

        # get now
        now = Time.now()

        # get shape for image
        shape = (int(self.window[3]), int(self.window[2]))

        # create image with Gaussian noise for BIAS
        image = make_noise_image(shape,
                                 distribution='gaussian',
                                 mean=1e3,
                                 stddev=100.)

        # add DARK
        if exp_time > 0:
            image += make_noise_image(shape,
                                      distribution='gaussian',
                                      mean=exp_time / 1e4,
                                      stddev=exp_time / 1e5)

            # add stars and stuff
            if open_shutter:
                # get solar altitude
                sun_alt = self.world.sun_alt

                # get mean flatfield counts
                flat_counts = 30000 / np.exp(-1.28 *
                                             (4.209 + sun_alt)) * exp_time

                # create flat
                image += make_noise_image(shape,
                                          distribution='gaussian',
                                          mean=flat_counts,
                                          stddev=flat_counts / 10.)

                # get catalog with sources
                sources = self._get_sources_table(exp_time)

                # create image
                image += make_gaussian_prf_sources_image(shape, sources)

        # saturate
        image[image > 65535] = 65535

        # create header
        hdr = self._create_header(exp_time, open_shutter, now, image)

        # return it
        return Image(image.astype(np.uint16), header=hdr)
Beispiel #4
0
    def _simulate_image(self, exp_time: float, open_shutter: bool) -> NDArray[Any]:
        """Simulate an image.

        Args:
            exp_time: Exposure time in seconds.
            open_shutter: Whether the shutter is opened.

        Returns:
            numpy array with image.
        """

        # get shape for image
        shape = (int(self.window[3]), int(self.window[2]))

        # create image with Gaussian noise for BIAS
        data = make_noise_image(shape, distribution="gaussian", mean=10, stddev=1.0)

        # non-zero exposure time?
        if exp_time > 0:
            # add DARK
            data += make_noise_image(shape, distribution="gaussian", mean=exp_time / 1e4, stddev=exp_time / 1e5)

            # add stars and stuff
            if open_shutter:
                # get solar altitude
                sun_alt = self.world.sun_alt

                # get mean flatfield counts
                flat_counts = 30000 / np.exp(-1.28 * (4.209 + sun_alt)) * exp_time

                # create flat
                data += make_noise_image(shape, distribution="gaussian", mean=flat_counts, stddev=flat_counts / 10.0)

                # get catalog with sources
                sources = self._get_sources_table(exp_time)

                # filter out all sources outside FoV
                sources = sources[
                    (sources["x_mean"] > 0)
                    & (sources["x_mean"] < shape[1])
                    & (sources["y_mean"] > 0)
                    & (sources["y_mean"] < shape[0])
                ]

                # create image
                data += make_gaussian_sources_image(shape, sources)

        # saturate
        data[data > 65535] = 65535

        # finished
        return cast(NDArray[Any], data).astype(np.uint16)
Beispiel #5
0
def test_find_center_noise_bad_guess():
    image = make_gaussian_sources_image(SHAPE, STARS)
    noise = make_noise_image(SHAPE, distribution='gaussian', mean=0, stddev=5)
    cen2 = spf.find_center(image + noise, [40, 50], max_iters=1)
    # Bad initial guess, noise, should take more than one try...
    with pytest.raises(AssertionError):
        np.testing.assert_allclose(cen2, [30, 40])
Beispiel #6
0
def test_find_center_noise_good_guess():
    image = make_gaussian_sources_image(SHAPE, STARS)
    noise = make_noise_image(SHAPE, distribution='gaussian', mean=0, stddev=5)
    # Trying again with several iterations should work
    cen3 = spf.find_center(image + noise, [31, 41], max_iters=10)
    # Tolerance chosen based on some trial and error
    np.testing.assert_allclose(cen3, [30, 40], atol=0.02)
Beispiel #7
0
def test_find_center_no_star():
    # No star anywhere near the original guess
    image = make_gaussian_sources_image(SHAPE, STARS)
    # Offset the mean from zero to avoid nan center
    noise = make_noise_image(SHAPE, distribution='gaussian',
                             mean=1000, stddev=5, random_state=RANDOM_SEED)
    cen = spf.find_center(image + noise, [50, 200], max_iters=10)
    assert (np.abs(cen[0] - 50) > 1) and (np.abs(cen[1] - 200) > 1)
Beispiel #8
0
 def __init__(self, noise_dev=1.0):
     self.image_shape = [400, 500]
     data_file = get_pkg_data_filename('data/test_sources.csv')
     self._sources = Table.read(data_file)
     self.mean_noise = self.sources['amplitude'].max() / 100
     self.noise_dev = noise_dev
     self._stars = make_gaussian_sources_image(self.image_shape,
                                               self.sources)
     self._noise = make_noise_image(self._stars.shape,
                                    mean=self.mean_noise,
                                    stddev=noise_dev)
Beispiel #9
0
    def _make_noise_image(self, noisy=True, coadd=False):

        if coadd:
            texp = self.texp.value * self.ncoadds
        else:
            texp = self.texp.value

        if noisy:
            image_zodi = make_noise_image(self.image_shape_pix,
                                          type='poisson',
                                          mean=self.zodi_ct_per_pix.value *
                                          texp)
            image_black = make_noise_image(self.image_shape_pix,
                                           type='poisson',
                                           mean=self.black_ct_per_pix * texp)
        else:
            image_zodi = (np.zeros(self.image_shape_pix) +
                          self.zodi_ct_per_pix.value * texp)
            image_black = (np.zeros(self.image_shape_pix) +
                           self.black_ct_per_pix * texp)
        return image_zodi + image_black
Beispiel #10
0
Datei: test.py Projekt: SKIRT/PTS
    def make_noise(self):

        """
        This function ...
        :return:
        """

        # Inform the user
        log.info("Making noise map ...")

        # Make noise
        data = make_noise_image(self.config.shape, type='gaussian', mean=0., stddev=self.config.noise_stddev, random_state=12345)
        self.noise = Frame(data)

        # Mask
        self.noise[self.rotation_mask] = 0.0

        # Plot
        #if self.config.plot: plotting.plot_difference(self.frame, self.real_sky, title="original")
        if self.config.plot: plotting.plot_box(self.noise, title="noise")
Beispiel #11
0
    def make_noise(self, masks=None):

        """
        This function ...
        :param masks:
        :return:
        """

        # Inform the user
        log.info("Adding noise ...")

        # Loop over the frames
        for fltr in self.frames:

            # Get the frame
            frame = self.frames[fltr]

            # Make noise and add it
            data = make_noise_image(frame.shape, type='gaussian', mean=0., stddev=self.config.noise_stddev)
            frame += data

            # Mask
            if masks is not None and fltr in masks: frame[masks[fltr]] = 0.0
Beispiel #12
0
    def make_noise(self):
        """
        This function ...
        :return:
        """

        # Inform the user
        log.info("Making noise map ...")

        # Make noise
        data = make_noise_image(self.config.shape,
                                type='gaussian',
                                mean=0.,
                                stddev=self.config.noise_stddev,
                                random_state=12345)
        self.noise = Frame(data)

        # Mask
        self.noise[self.rotation_mask] = 0.0

        # Plot
        #if self.config.plot: plotting.plot_difference(self.frame, self.real_sky, title="original")
        if self.config.plot: plotting.plot_box(self.noise, title="noise")
Beispiel #13
0
"""

# exactly sample from this page:
# Building an effective Point Spread Function (ePSF)
# https://photutils.readthedocs.io/en/stable/epsf.html#build-epsf

from photutils import datasets

hdu = datasets.load_simulated_hst_star_image()
data = hdu.data

from photutils.datasets import make_noise_image

data += make_noise_image(data.shape,
                         distribution='gaussian',
                         mean=10.,
                         stddev=5.,
                         random_state=12345)

import matplotlib.pyplot as plt
from astropy.visualization import simple_norm
from photutils import datasets
from photutils.datasets import make_noise_image

hdu = datasets.load_simulated_hst_star_image()
data = hdu.data
data += make_noise_image(data.shape,
                         distribution='gaussian',
                         mean=10.,
                         stddev=5.,
                         random_state=12345)
Beispiel #14
0
def fake_image(n_sources=100,
               shape=[512, 512],
               amplitude_r=[0, 20000],
               std_dev=[0, 7],
               random_state=666,
               noise={
                   'type': None,
                   'mean': None,
                   'stddev': None
               }):
    """Creates fake image with Gaussian sources.

    Creates a fake image with gaussian sources, whose parameters can be
    adjusted based on the following arguments.

    A background with spatial fluctuations at various scales is
    created seperately. This is achived by first creating the desired 2D power
    spectrum, which is a radial power law in Fourier space. According to
    litrature, the index of ISM power law distribution is -2.9. Taking the
    inverse FFT of this p_law array gives a background with the desired levels
    of spatial fluctuations in real space.

        Args:
            n_sources(int):       Number of sources
            shape(2-tuple):       Dimensions of the image
            amplitude_r(list):    Range of amplitudes of sources
            std_dev(list):        Range of standard deviations of sources
            random_state(int):    Seed for random number generator
            noise(dictionary):    Parameters for noise
                (i)   type:       Gaussian or Poisson
                (ii)  mean:       Mean value of noise
                (iii) stddev:     Standard deviation of gaussian noise
    """

    param_ranges = [('amplitude', [amplitude_r[0], amplitude_r[1]]),
                    ('x_mean', [0, shape[1]]), ('y_mean', [0, shape[0]]),
                    ('x_stddev', [std_dev[0], std_dev[1]]),
                    ('y_stddev', [std_dev[0], std_dev[1]]),
                    ('theta', [0, np.pi])]
    param_ranges = OrderedDict(param_ranges)
    sources = make_random_models_table(n_sources,
                                       param_ranges,
                                       random_state=random_state)

    if noise['type'] is None:
        sources = make_gaussian_sources_image(shape, sources)
    else:
        sources = sources + make_noise_image(shape,
                                             type=noise['type'],
                                             mean=noise['mean'],
                                             stddev=noise['stddev'])

    # CREATING BACKGROUNG (ISM)
    # The objective is to create a background with different levels of
    # spatial fluctuations built in. This is achived by first creating the
    # desired 2D power spectrum, which is a radial power law in Fourier space.
    # According to litrature, the index of ISM power law distribution is -2.9
    # Taking the inverse FFT of this p_law array gives a background with the
    # desired levels of spatial fluctuations in real space.

    p_law = np.zeros(shape, dtype=float)
    y, x = np.indices(p_law.shape)
    center = np.array([(y.max() - y.min()) / 2.0, (x.max() - x.min()) / 2.0])
    r = np.hypot(x - center[1], y - center[0])

    r_ind = r.astype(int)
    r_max = r.max().astype(int)

    a = np.arange(0.1, r_max + 1.1, 1)  # These values control size of clouds
    b = 10**11 * a**(-2.9)  # This controls magnitude of background

    for i in range(0, r_max + 1):
        p_law[r_ind > i] = b[i]

    magnitude = np.sqrt(p_law)
    phase = 2 * np.pi * np.random.randn(shape[0], shape[1])
    FFT = magnitude * np.exp(1j * phase)
    background = np.abs((fftpack.ifft2(fftpack.fftshift(FFT))))

    sim_sky = sources + background

    return sources, background, sim_sky
Beispiel #15
0
def make_gaia(coords,
              image=os.path.join('.', 'synthetic_gaia.fits'),
              epoch="J2020.5"):
    """
    Creates a synthetic image from GAIA DR2 photometry along the specified coordinates.

    Returns the synthetic image in fits format.

    Parameters:
    coords: Coordinates of centre of image.
    image: File name of image. Default is synthetic_gaia.fits
    epoch: Epoch to translate image. Default is J2020.5
    """
    #####
    ##### Define GMOS parameters
    ##### GMOS FoV is 330 arcsec
    ##### GMOS slit length is 330 arcsec
    ##### GMOS fwhm chosen in 0.3 arcsec
    ##### Image created is 390 arcsec, to accomadte additional overlays
    ##### GMOS read noise is 3.96  (e-/ADU)
    ##### GMOS gain is 1.829  (e- rms)
    #####

    gmosfov = 390 * u.arcsec
    fwhm = 0.3 * u.arcsec
    shape = (1300, 1300)
    zeroarr = np.zeros(shape)
    width = 390 * u.arcsec
    height = 390 * u.arcsec

    ##### Read coordinates and epoch
    coords = coords
    epoch = Time(epoch, format='jyear_str')

    ##### Query GAIA DR2 using astroquery TAP+
    ##### Synchronous query, limit of 2000 rows
    print('Searching GAIA TAP+ for stars over the GMOS FoV...')
    gaiasearch = Gaia.cone_search_async(coordinate=coords,
                                        radius=gmosfov,
                                        verbose=False)
    searchresults = gaiasearch.get_results()
    nstars = len(searchresults)
    print(nstars, 'stars recovered from GAIA TAP+ query over the GMOS FoV')
    if nstars == 2000:
        warnings.warn(
            'Asynchronous TAP+ service query limit reached, image is incomplete.'
        )

    fitshdr = mkfitshdr(coords, zeroarr, image, epoch)
    hdr = fitshdr[0]
    hdu = fitshdr[1]
    hdul = fitshdr[2]
    w = fitshdr[3]

    c = SkyCoord(ra=searchresults['ra'],
                 dec=searchresults['dec'],
                 pm_ra_cosdec=searchresults['pmra'],
                 pm_dec=searchresults['pmdec'],
                 obstime=Time(searchresults['ref_epoch'],
                              format='decimalyear'))

    cnew = c
    ###### in the current implementation, transformation of the image to the epoch is not applied

    ######    cnew = c.apply_space_motion(new_obstime=Time(epoch))
    star_coords = w.wcs_world2pix([cnew.ra.deg], [cnew.dec.deg], 0)

    xvalue = star_coords[0].ravel()
    yvalue = star_coords[1].ravel()
    flux = (searchresults['phot_g_mean_flux'] * u.mag).value

    ##### Error values
    cerr = SkyCoord(ra=searchresults['ra_error'],
                    dec=searchresults['dec_error'])

    xerrvalue = cerr.ra.value
    yerrvalue = cerr.dec.value

    xerrmask = np.nan_to_num(xerrvalue, nan=0.0, posinf=0.0, neginf=0.0)
    yerrmask = np.nan_to_num(yerrvalue, nan=0.0, posinf=0.0, neginf=0.0)
    xerrmask[xerrmask < 1.0] = 1.0
    yerrmask[yerrmask < 1.0] = 1.0
    ##### Error values less than 1.0 lead to incorrect values in photutils. Error values of 1.0 lead to no significiant differences in images compared to no error.

    ##### Create Table of results

    t = Table([flux, xvalue, yvalue, xerrmask, yerrmask],
              names=('flux', 'x_mean', 'y_mean', 'x_stddev', 'y_stddev'),
              dtype=('i8', 'i8', 'i8', 'i8', 'i8'))
    print('Table of stars is being generated as an image')
    zeroarr = make_gaussian_sources_image(shape, t)

    ##### Read noise

    read_noise = np.random.normal(scale=(3.92 / 1.829), size=shape)
    noise_image = make_noise_image(shape, distribution='poisson', mean=0.5)
    #####

    synth_image = zeroarr + noise_image
    #####
    ###### read_noise is disabled
    hdu = fits.PrimaryHDU(synth_image, header=hdr)
    hdul = fits.HDUList([hdu])
    hdul.writeto(image, overwrite=True)
    print('Synthetic GAIA DR2 image is saved as', image)
    return image
from astropy.table import Table
from photutils.datasets import make_gaussian_sources_image
from photutils.datasets import make_noise_image
from astropy.io import fits

nstar = 400
sigma_psf = 2.0
shape = (500, 500)
noise_mean = 5.0
np.random.seed(1000)
# make a table of Gaussian sources
table = Table()
table['flux'] = 1000 - 1000 * np.random.power(2.35, size=nstar)
table['x_mean'] = np.random.randint(0, high=shape[1] - 1, size=nstar)
table['y_mean'] = np.random.randint(0, high=shape[0] - 1, size=nstar)
table['x_stddev'] = sigma_psf * np.ones(nstar)
table['y_stddev'] = table['x_stddev']
table['theta'] = np.radians(np.zeros(nstar))

# make an image of the sources with Poisson noise
image1 = make_gaussian_sources_image(shape, table)
image2 = image1 + make_noise_image(
    shape, distribution='poisson', mean=noise_mean)

fig = plt.figure(figsize=(10, 10))
plt.imshow(image2, origin='lower', interpolation='nearest', cmap='gray')
plt.savefig('image.jpg')

hdu = fits.PrimaryHDU(image2)
hdu.writeto('image.fits', overwrite=True)
Beispiel #17
0
    def prepare_image(self):
        """Creates the image that will be fetched."""

        # Default values
        exposure_params: Dict[str, Any] = dict(
            seed=None,
            shape=[
                self.state["lr_x"] - self.state["ul_x"],
                self.state["lr_y"] - self.state["ul_y"],
            ],
            sources=False,
            noise=False,
            apply_poison_noise=False,
        )

        if isinstance(self._exposure_params, list):
            if len(self._exposure_params) == 0:
                # If the simulation configuration doesn't include an "exposures"
                # section, just add some default noise.
                exposure_params["noise"] = {
                    "distribution": "gaussian",
                    "mean": 1000,
                    "stddev": 20.0,
                }
            else:
                this_exposure = self._exposure_params[self._exposure_idx]

                # If str, file is the image to return
                if isinstance(this_exposure, str):
                    data = astropy.io.fits.getdata(this_exposure)
                    return data

                exposure_params.update(this_exposure)

        image = numpy.zeros(exposure_params["shape"][::-1], dtype="float32")
        if "seed" in exposure_params and exposure_params["seed"] is not None:
            numpy.random.seed(exposure_params["seed"])

        if exposure_params["noise"]:
            image += make_noise_image(image.shape, **exposure_params["noise"])

        if exposure_params["sources"]:
            if "source_table" in exposure_params["sources"]:
                source_table = astropy.table.Table.read(
                    exposure_params["sources"]["source_table"])
            else:
                n_sources = exposure_params["sources"]["n_sources"]
                if isinstance(n_sources, list):
                    n_sources = numpy.random.randint(*n_sources)
                param_ranges = exposure_params["sources"]["param_ranges"]
                source_table = get_source_table(param_ranges, n_sources)

            source_image = make_gaussian_sources_image(
                image.shape,
                source_table=source_table,
            )
            source_image *= self.state["exposure_time"] / 1000.0
            image += source_image

            if exposure_params["apply_poison_noise"]:
                image = apply_poisson_noise(image,
                                            seed=exposure_params["seed"])

        assert isinstance(image, numpy.ndarray)

        image[image > 2**16] = 2**16 - 1

        self.image = image.astype("uint16")
Beispiel #18
0
from photutils.datasets import (make_random_gaussians_table, make_noise_image,
                                make_gaussian_sources_image)

sigma_psf = 2.0
sources = Table()
sources['flux'] = [700, 800, 700, 800]
sources['x_mean'] = [12, 17, 12, 17]
sources['y_mean'] = [15, 15, 20, 20]
sources['x_stddev'] = sigma_psf * np.ones(4)
sources['y_stddev'] = sources['x_stddev']
sources['theta'] = [0, 0, 0, 0]
sources['id'] = [1, 2, 3, 4]
tshape = (32, 32)
image = (
    make_gaussian_sources_image(tshape, sources) +
    make_noise_image(tshape, distribution='poisson', mean=6., random_state=1) +
    make_noise_image(
        tshape, distribution='gaussian', mean=0., stddev=2., random_state=1))

from matplotlib import rcParams

rcParams['font.size'] = 13
import matplotlib.pyplot as plt

plt.imshow(image,
           cmap='viridis',
           aspect=1,
           interpolation='nearest',
           origin='lower')
plt.title('Simulated data')
plt.colorbar(orientation='horizontal', fraction=0.046, pad=0.04)
Beispiel #19
0
    def data_assemble(self, x, y, r_cut, add_mask=5, pick_choice=False):
        """
       Function to pick up the pieces of source, lens light, pollution(mask)
       :param x: x coordinate in pixel unit
       :param y: y coordinate in pixel unit
       :param r_cut: radius size of the data.
       :param add_mask: number of pixels adding around picked pieces
       :param pick_choice: True to only select source region to fit, False to use whole observation to fit
       :return: kwargs_data
       """
        #segmentation components
        obj_masks, center_mask_info, segments_deblend_list = self._seg_image(
            x, y, r_cut=r_cut)
        data_masks_center, _, xcenter, ycenter, c_index = center_mask_info
        image = self.cut_image(x, y, r_cut)
        self.raw_image = image
        src_mask = np.zeros_like(image)
        lens_mask = np.zeros_like(image)
        plu_mask = np.zeros_like(image)
        lenslight_mask_index = []
        if self.interaction:
            self.plot_segmentation(image, segments_deblend_list, xcenter,
                                   ycenter, c_index)
            #source light
            if pick_choice:
                source_mask_index = [
                    int(sidex) for sidex in input(
                        'Selection of data via segmentation index separated by space, e.g., 0 1 :'
                    ).split()
                ]
                for i in source_mask_index:
                    src_mask = src_mask + obj_masks[i]
            #lens light
            lenslightyn = input('Hint: is there lens light? (y/n): ')
            if lenslightyn == 'y':
                lenslight_mask_index = [
                    int(lidex) for lidex in input(
                        'Selection of lens-plane light via segmentation index separated by space, e.g., 0 1 :'
                    ).split()
                ]
                for i in lenslight_mask_index:
                    lens_mask = (lens_mask + obj_masks[i])
            elif lenslightyn == 'n':
                lenslight_mask_index = []
            else:
                raise ValueError("Please input 'y' or 'n' !")
            # contamination
            pluyn = input('Hint: is there contamination? (y/n): ')
            if pluyn == 'y':
                plution_mask_index = [
                    int(pidex) for pidex in input(
                        'Selection of contamination via segmentation index separated by space, e.g., 0 1 :'
                    ).split()
                ]
                for i in plution_mask_index:
                    plu_mask = (plu_mask + obj_masks[i])
            elif pluyn == 'n':
                plu_mask = np.zeros_like(image)
            else:
                raise ValueError("Please input 'y' or 'n' !")
        else:
            src_mask = data_masks_center

        #adding pixels around the selected masks
        selem = np.ones((add_mask, add_mask))
        src_mask = ndimage.binary_dilation(src_mask.astype(np.bool), selem)
        plu_mask_out = ndimage.binary_dilation(plu_mask.astype(np.bool), selem)
        plu_mask_out = (plu_mask_out - 1) * -1

        #select source region to fit, or to use whole observation to fit
        ##select source region to fit
        snr = self.snr
        source_mask = image * src_mask
        _, _, std = sigma_clipped_stats(image, sigma=snr, mask=source_mask)
        tshape = image.shape
        img_bkg = make_noise_image(tshape,
                                   distribution='gaussian',
                                   mean=0.,
                                   stddev=std,
                                   seed=12)
        no_source_mask = (src_mask * -1 + 1) * img_bkg
        picked_data = source_mask + no_source_mask
        ##use whole observation to fit while mask out the contamination
        maskedimg = image * plu_mask_out

        ##orginize the output 'kwargs_data'
        kwargs_data = {}
        if pick_choice:
            kwargs_data[
                'image_data'] = picked_data  #select source region to fit
        else:
            kwargs_data[
                'image_data'] = maskedimg  #use whole observation to fit while mask out the contamination

        if self.background_rms is None:
            kwargs_data['background_rms'] = std
            self.background_rms = std
        else:
            kwargs_data['background_rms'] = np.mean(self.background_rms)
        kwargs_data['exposure_time'] = self.exp_time
        kwargs_data['transform_pix2angle'] = np.array([[1, 0], [0, 1]
                                                       ]) * self.deltaPix
        ra_at_xy_0 = (y - r_cut) * self.deltaPix  # (ra,dec) is (y_img,x_img)
        dec_at_xy_0 = (x - r_cut) * self.deltaPix
        kwargs_data['ra_at_xy_0'] = ra_at_xy_0
        kwargs_data['dec_at_xy_0'] = dec_at_xy_0

        #coordinate of the lens light
        xlenlight, ylenlight = [], []
        if lenslight_mask_index != []:
            for i in lenslight_mask_index:
                xlenlight.append(ra_at_xy_0 + int(xcenter[i]) * self.deltaPix)
                ylenlight.append(dec_at_xy_0 + int(ycenter[i]) * self.deltaPix)

        #for output
        self.data = kwargs_data['image_data']
        self.kwargs_data = kwargs_data
        self.data_mask = src_mask
        self.lens_mask = lens_mask
        self.plu_mask = plu_mask_out
        self.obj_masks = obj_masks
        imageData = ImageData(**kwargs_data)
        self.imageData = imageData
        kwargs_seg = [segments_deblend_list, xcenter, ycenter, c_index]

        return kwargs_data, kwargs_seg, [xlenlight, ylenlight]
Beispiel #20
0
num_sources = 250
min_flux = 500
max_flux = 1000
min_xmean = 16
max_xmean = 240
sigma_psf = 2.0
# generate artificial image
ground_truth = make_random_gaussians(num_sources, [min_flux, max_flux],
                                     [min_xmean, max_xmean],
                                     [min_xmean, max_xmean],
                                     [sigma_psf, sigma_psf],
                                     [sigma_psf, sigma_psf],
                                     random_state=123)
shape = (256, 256)
image = (make_gaussian_sources(shape, ground_truth) +
         make_noise_image(shape, type='poisson', mean=1., random_state=123))

ground_truth.write('input.html')
# estimate background as the median after sigma clipping the sources
_, bkg, std = sigma_clipped_stats(image, sigma=3.0, iters=5)

# find potential sources with daofind
sources = daofind(image - bkg, threshold=4.0*std,
                  fwhm=sigma_psf*gaussian_sigma_to_fwhm)
intab = Table(names=['id', 'x_0', 'y_0', 'flux_0'],
              data=[sources['id'], sources['xcentroid'],
                    sources['ycentroid'], sources['flux']])
intab.write('intab.html')

groups = daogroup(intab, crit_separation=2.0*sigma_psf*gaussian_sigma_to_fwhm)
Beispiel #21
0
from astropy.visualization import simple_norm
from photutils import datasets

hdu = datasets.load_simulated_hst_star_image()
data = hdu.data
from photutils.datasets import make_noise_image
data +=  make_noise_image(data.shape, type='gaussian', mean=10.,
                          stddev=5., random_state=12345)

from photutils import find_peaks
peaks_tbl = find_peaks(data, threshold=500.)

from astropy.table import Table
stars_tbl = Table()
stars_tbl['x'] = peaks_tbl['x_peak']
stars_tbl['y'] = peaks_tbl['y_peak']

from astropy.stats import sigma_clipped_stats
mean_val, median_val, std_val = sigma_clipped_stats(data, sigma=2.,
                                                    iters=None)
data -= median_val

from astropy.nddata import NDData
nddata = NDData(data=data)

from photutils.psf import extract_stars
stars = extract_stars(nddata, stars_tbl, size=25)

import matplotlib.pyplot as plt
nrows = 5
ncols = 5
Beispiel #22
0
	def generate_bkg(self):
		""" Generate bkg data """
		shape = (self.ny, self.nx)
		bkg_data = make_noise_image(shape, type='gaussian', mean=self.bkg_level, stddev=self.bkg_rms)
		return bkg_data
Beispiel #23
0
impact_parameter = [0.2, 0.8, 1.2, 1.6, 2.0]
FWHM_qso = 1.57
plate_scale = 1
for i in range(0, 5):

    x_qso = 50
    y_qso = 50
    x_host = x_qso + impact_parameter[i]
    y_host = 51
    x = np.arange(0, 101, plate_scale)
    y = np.arange(0, 101, plate_scale)
    X, Y = np.meshgrid(x, y)
    QSO = gaussian_2d(X, Y, x_qso, y_qso, FWHM_qso / 2.355, FWHM_qso / 2.355)
    host = gaussian_2d(X, Y, x_host, y_host, FWHM_qso / 2.355,
                       FWHM_qso / 2.355)
    plt.imshow(QSO + make_noise_image((101, 101), type='poisson', mean=0.001))
    print(x_qso)
    print(y_qso)
    print(X)
    print(Y)

shape = (101, 101)
image1 = make_noise_image(shape, type='gaussian', mean=0., stddev=5.)
image2 = make_noise_image(shape, type='poisson', mean=5.)

# plot the images
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(image1, origin='lower', interpolation='nearest')
ax1.set_title('Gaussian noise ($\mu=0$, $\sigma=5.$)')
ax2.imshow(image2, origin='lower', interpolation='nearest')
ax2.set_title('Poisson noise ($\mu=5$)')
Beispiel #24
0
from astropy.modeling.models import Gaussian2D
import matplotlib.pyplot as plt
import numpy as np

from photutils.datasets import make_noise_image
from photutils.isophote import EllipseGeometry, Ellipse
from photutils import EllipticalAperture

g = Gaussian2D(100., 75, 75, 20, 12, theta=40. * np.pi / 180.)
ny = nx = 150
y, x = np.mgrid[0:ny, 0:nx]

noise = make_noise_image((ny, nx),
                         distribution='gaussian',
                         mean=0.,
                         stddev=2.,
                         random_state=12345)
data = g(x, y) + noise

#==========================================

geometry = EllipseGeometry(x0=75, y0=75, sma=20, eps=0.5, pa=20. * np.pi / 180)

aper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma,
                          geometry.sma * (1 - geometry.eps), geometry.pa)

plt.imshow(data, origin='lower')
aper.plot(color='white')
plt.show()

ellipse = Ellipse(data, geometry)
Beispiel #25
0
def limiting_magnitude_prob(syntax, image, model=None, r_table=None):
    '''
    syntax - dict
        dDtionary of input paramters
    image - np.array
        Image of region of interest with target in center of image
    model - function
        - psf function from autophot
    '''
    try:

        from photutils import CircularAperture
        import matplotlib.pyplot as plt
        import numpy as np
        import matplotlib.gridspec as gridspec
        import random
        from scipy.optimize import curve_fit
        import warnings
        from photutils.datasets import make_noise_image
        # from autophot.packages.functions import mag
        from photutils import DAOStarFinder
        from astropy.stats import sigma_clipped_stats
        # from matplotlib.ticker import MultipleLocator
        from mpl_toolkits.axes_grid1 import make_axes_locatable
        from autophot.packages.rm_bkg import rm_bkg

        from astropy.visualization import ZScaleInterval

        import logging

        logger = logging.getLogger(__name__)

        limiting_mag_figure = plt.figure(figsize=set_size(240, aspect=1.5))

        gs = gridspec.GridSpec(2, 2, hspace=0.5, wspace=0.2)
        ax0 = limiting_mag_figure.add_subplot(gs[:, :-1])

        ax1 = limiting_mag_figure.add_subplot(gs[-1, -1])
        ax2 = limiting_mag_figure.add_subplot(gs[:-1, -1])

        # level for detection - Rule of thumb ~ 5 is a good detection level
        level = syntax['lim_SNR']

        logger.info('Limiting threshold: %d sigma' % level)

        image_no_surface, surface = rm_bkg(image, syntax, image.shape[0] / 2,
                                           image.shape[0] / 2)

        # =============================================================================
        # find and mask sources in close up
        # =============================================================================

        image_mean, image_median, image_std = sigma_clipped_stats(
            image,
            sigma=syntax['source_sigma_close_up'],
            maxiters=syntax['iters'])

        daofind = DAOStarFinder(fwhm=syntax['fwhm'],
                                threshold=syntax['bkg_level'] * image_std)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            # ignore no sources warning
            sources = daofind(image - image_median)

        if sources != None:
            positions = list(
                zip(np.array(sources['xcentroid']),
                    np.array(sources['ycentroid'])))

            positions.append((image.shape[0] / 2, image.shape[1] / 2))

        else:
            positions = [(image.shape[0] / 2, image.shape[1] / 2)]

        # "size" of source
        source_size = syntax['image_radius']

        pixel_number = int(np.ceil(np.pi * source_size**2))

        # Mask out target region
        mask_ap = CircularAperture(positions, r=source_size)

        mask = mask_ap.to_mask(method='center')

        mask_sumed = [i.to_image(image.shape) for i in mask]

        if len(mask_sumed) != 1:
            mask_sumed = sum(mask_sumed)
        else:
            mask_sumed = mask_sumed[0]

        mask_sumed[mask_sumed > 0] = 1

        logging.info('Number of pixels in star: %d' % pixel_number)

        # Mask out center region
        mask_image = (image_no_surface) * (1 - mask_sumed)

        vmin, vmax = (ZScaleInterval(nsamples=1500)).get_limits(mask_image)

        excluded_points = mask_image == 0
        exclud_x = excluded_points[0]
        exclud_y = excluded_points[1]

        exclud_zip = list(zip(exclud_x, exclud_y))

        included_points = np.where(mask_image != 0)

        includ_x = list(included_points[0])
        includ_y = list(included_points[1])

        includ_zip = list(zip(includ_x, includ_y))

        # ax2.scatter(exclud_y,exclud_x,color ='black',marker = 'X',alpha = 0.5  ,label = 'excluded_pixels',zorder = 1)
        ax2.scatter(includ_y,
                    includ_x,
                    color='red',
                    marker='x',
                    alpha=0.5,
                    label='included_pixels',
                    zorder=2)

        number_of_points = 300

        fake_points = {}

        if len(includ_zip) < pixel_number:
            includ_zip = includ_zip + exclud_zip

        for i in range(number_of_points):
            fake_points[i] = []
            # count = 0
            random_pixels = random.sample(includ_zip, pixel_number)
            xp_ran = [i[0] for i in random_pixels]
            yp_ran = [i[1] for i in random_pixels]

            fake_points[i].append([xp_ran, yp_ran])

        fake_sum = {}
        for i in range(number_of_points):

            fake_sum[i] = []

            for j in fake_points[i]:

                for k in range(len(j[0])):

                    fake_sum[i].append(image_no_surface[j[0][k]][j[1][k]])

        fake_mags = {}

        for f in fake_sum.keys():

            fake_mags[f] = np.sum(fake_sum[f])

# =============================================================================
#     Histogram
# =============================================================================

        hist, bins = np.histogram(list(fake_mags.values()),
                                  bins=len(list(fake_mags.values())),
                                  density=True)

        center = (bins[:-1] + bins[1:]) / 2

        sigma_guess = np.nanstd(list(fake_mags.values()))
        mean_guess = np.nanmean(list(fake_mags.values()))
        A_guess = np.nanmax(hist)

        def gauss(x, a, x0, sigma):
            return a * np.exp(-(x - x0)**2 / (2 * sigma**2))

        popt, pcov = curve_fit(gauss,
                               center,
                               hist,
                               p0=[A_guess, mean_guess, sigma_guess],
                               absolute_sigma=True)

        mean = popt[1]
        std = abs(popt[2])

        logging.info('Mean: %s - std: %s' % (round(mean, 3), round(std, 3)))

        if syntax['probable_detection_limit']:

            beta = float(syntax['probable_detection_limit_beta'])

            def detection_probability(n, sigma, beta):
                from scipy.special import erfinv
                '''

                Probabilistic upper limit computation base on:
                http://web.ipac.caltech.edu/staff/fmasci/home/mystats/UpperLimits_FM2011.pdf

                Assuming Gassauin nose distribution

                n: commonly used threshold value integer above some background level

                sigma: sigma value from noise distribution found from local area around source

                beta: Detection probability


                '''
                flux_upper_limit = (n +
                                    np.sqrt(2) * erfinv(2 * beta - 1)) * sigma

                return flux_upper_limit

            logging.info("Using Probable detection limit [b' = %d%% ]" %
                         (100 * beta))

            f_ul = mean + detection_probability(level, std, beta)

            logging.info("Flux Upper limit: %.3f" % f_ul)

        else:
            f_ul = abs(mean + level * std)
            logging.info('Detection at %s std: %.3f' % (level, f_ul))

        # =============================================================================
        # Plot histogram of background values
        # =============================================================================

        line_kwargs = dict(alpha=0.5, color='black', ls='--')

        # the histogram of the data
        n, bins, patches = ax0.hist(list(fake_mags.values()),
                                    density=True,
                                    bins=30,
                                    facecolor='blue',
                                    alpha=1,
                                    label='Pseudo-Flux\nDistribution')

        ax0.axvline(mean, **line_kwargs)
        ax0.axvline(mean + 1 * std, **line_kwargs)
        ax0.text(mean + 1 * std,
                 np.max(n),
                 r'$1\sigma$',
                 rotation=-90,
                 va='top')
        ax0.axvline(mean + 2 * std, **line_kwargs)
        ax0.text(mean + 2 * std,
                 np.max(n),
                 r'$2\sigma$',
                 rotation=-90,
                 va='top')

        if syntax['probable_detection_limit']:

            ax0.axvline(f_ul, **line_kwargs)
            ax0.text(f_ul,
                     np.max(n),
                     r"$\beta'$ = %d%%" % (100 * beta),
                     rotation=-90,
                     va='top')

        else:
            ax0.axvline(f_ul, **line_kwargs)
            ax0.text(mean + level * std,
                     np.max(n),
                     r'$' + str(level) + r'\sigma$',
                     rotation=-90,
                     va='top')

        x_fit = np.linspace(ax0.get_xlim()[0], ax0.get_xlim()[1], 250)

        ax0.plot(x_fit, gauss(x_fit, *popt), label='Gaussian Fit', color='red')

        ax0.ticklabel_format(axis='y', style='sci', scilimits=(-2, 0))
        ax0.yaxis.major.formatter._useMathText = True

        ax0.set_xlabel('Pseudo-Flux')
        ax0.set_ylabel('Normalised Probability')

        im2 = ax2.imshow(image - surface,
                         origin='lower',
                         aspect='auto',
                         interpolation='nearest')
        divider = make_axes_locatable(ax2)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        cb = limiting_mag_figure.colorbar(im2, cax=cax)
        cb.ax.set_ylabel('Counts', rotation=270, labelpad=10)

        cb.formatter.set_powerlimits((0, 0))
        cb.ax.yaxis.set_offset_position('left')
        cb.update_ticks()

        ax2.set_title('Image - Surface')

        # =============================================================================
        # Convert counts to magnitudes
        # =============================================================================

        flux = f_ul / syntax['exp_time']

        mag_level = -2.5 * np.log10(flux)

        # =============================================================================
        # We now have an upper and lower estimate of the the limiting magnitude
        # =============================================================================
        '''
        Visual display of limiting case

        if PSF model is available use that

        else

        use a gaussian profile with the same number of counts
        '''
        fake_sources = np.zeros(image.shape)

        try:

            if syntax['c_counts']:
                pass

            model_label = 'PSF'

            def mag2image(m):
                '''
                Convert magnitude to height of PSF
                '''
                Amplitude = (syntax['exp_time'] /
                             (syntax['c_counts'] + syntax['r_counts'])) * (
                                 10**(m / -2.5))

                return Amplitude

            # PSF model that matches close-up shape around target
            def input_model(x, y, flux):
                return model(x, y, 0, flux, r_table, pad_shape=image.shape)

        except:
            '''
            if PSF model isn't available - use Gaussian instead

            '''
            logging.info('PSF model not available - Using Gaussian')
            model_label = 'Gaussian'

            sigma = syntax['fwhm'] / 2 * np.sqrt(2 * np.log(2))

            def mag2image(m):
                '''
                Convert magnitude to height of Gaussian
                '''

                #  Volumne/counts under 2d gaussian for a magnitude m
                volume = (10**(m / -2.5)) * syntax['exp_time']

                # https://en.wikipedia.org/wiki/Gaussian_function
                Amplitude = volume / (2 * np.pi * sigma**2)

                return Amplitude

            #  Set up grid

            def input_model(x, y, A):

                x = np.arange(0, image.shape[0])
                xx, yy = np.meshgrid(x, x)

                from autophot.packages.functions import gauss_2d, moffat_2d

                if syntax['use_moffat']:
                    model = moffat_2d(
                        (xx, yy), x, y, 0, A,
                        syntax['image_params']).reshape(image.shape)

                else:
                    model = gauss_2d(
                        (xx, yy), x, y, 0, A,
                        syntax['image_params']).reshape(image.shape)

                return model

        # =============================================================================
        #  What magnitude do you want this target to be?
        # =============================================================================

        mag2image = mag2image

        inject_source_mag = mag2image(mag_level)

        # =============================================================================
        # Random well-spaced points to plot
        # =============================================================================

        random_sources = sample_with_minimum_distance(
            n=[int(syntax['fwhm']),
               int(image.shape[0] - syntax['fwhm'])],
            k=syntax['inject_sources_random_number'],
            d=int(syntax['fwhm'] / 2))
        import math

        def PointsInCircum(r, n=100):
            return [(math.cos(2 * math.pi / n * x) * r + image.shape[1] / 2,
                     math.sin(2 * math.pi / n * x) * r + image.shape[0] / 2)
                    for x in range(0, n)]

        random_sources = PointsInCircum(2 * syntax['fwhm'], n=3)
        x = [abs(i[0]) for i in random_sources]
        y = [abs(i[1]) for i in random_sources]

        print(x)
        print(y)

        # =============================================================================
        # Inject sources
        # =============================================================================

        try:
            if syntax['inject_source_random']:

                for i in range(0, len(x)):

                    fake_source_i = input_model(x[i], y[i], inject_source_mag)

                    if syntax['inject_source_add_noise']:

                        nan_idx = np.isnan(fake_source_i)
                        fake_source_i[nan_idx] = 0
                        fake_source_i[fake_source_i < 0] = 0

                        fake_source_i = make_noise_image(
                            fake_source_i.shape,
                            distribution='poisson',
                            mean=fake_source_i,
                            random_state=np.random.randint(0, 1e3))
                        # fake_source_i[nan_idx] = np.nan1

                    fake_sources += fake_source_i
                    ax1.scatter(x[i],
                                y[i],
                                marker='o',
                                s=150,
                                facecolors='none',
                                edgecolors='r',
                                alpha=0.5)
                    ax1.annotate(str(i), (x[i], -.5 + y[i]),
                                 color='r',
                                 alpha=0.5,
                                 ha='center')

            if syntax['inject_source_on_target']:

                fake_source_on_target = input_model(image.shape[1] / 2,
                                                    image.shape[0] / 2,
                                                    inject_source_mag)

                if syntax['inject_source_add_noise']:
                    nan_idx = np.isnan(fake_source_on_target)
                    fake_source_on_target[nan_idx] = 1e-6
                    fake_source_on_target[fake_source_on_target < 0] = 0

                    fake_source_on_target = make_noise_image(
                        fake_source_on_target.shape,
                        distribution='poisson',
                        mean=fake_source_on_target,
                        random_state=np.random.randint(0, 1e3))

                fake_sources += fake_source_on_target

                ax1.scatter(image.shape[1] / 2,
                            image.shape[0] / 2,
                            marker='o',
                            s=150,
                            facecolors='none',
                            edgecolors='black',
                            alpha=0.5)
                ax1.annotate('On\nTarget',
                             (image.shape[1] / 2, -1 + image.shape[0] / 2),
                             color='black',
                             alpha=0.5,
                             ha='center')

            im1 = ax1.imshow(
                image - surface + fake_sources,
                # vmin = vmin,
                # vmax = vmax,
                aspect='auto',
                # norm = norm,
                origin='lower',
                interpolation='nearest')
            ax1.set_title(' Fake [%s] Sources ' % model_label)

        except Exception as e:
            logging.exception(e)
            im1 = ax1.imshow(
                image - surface,
                origin='lower',
                aspect='auto',
            )
            ax1.set_title('[ERROR] Fake Sources [%s]' % model_label)

        # plt.colorbar(im1,ax=ax1)
        divider = make_axes_locatable(ax1)
        cax = divider.append_axes("right", size="5%", pad=0.05)
        cb = limiting_mag_figure.colorbar(im1, cax=cax)
        cb.ax.set_ylabel('Counts', rotation=270, labelpad=10)
        # cb = fig.colorbar(im)
        cb.formatter.set_powerlimits((0, 0))
        cb.ax.yaxis.set_offset_position('left')
        cb.update_ticks()

        ax0.legend(loc='lower center',
                   bbox_to_anchor=(0.5, 1.02),
                   ncol=2,
                   frameon=False)

        limiting_mag_figure.savefig(
            syntax['write_dir'] + 'limiting_mag_porb.pdf',
            # box_extra_artists=([l]),
            bbox_inches='tight',
            format='pdf')
        plt.close('all')

    # master try/except
    except Exception as e:
        print('limit issue')
        logging.exception(e)

    syntax['maglim_mean'] = mean
    syntax['maglim_std'] = std

    return mag_level, syntax
Beispiel #26
0
def test_detection(StarFinder_Algorithm, sigma_psf, amplitude, PSF_size=1):
    '''create an image with mock sources and try to detect them
    
    Parameters
    ----------
        
    StarFinder_Algorithm:
         Class to detect stars
    
    sigma_psf:
        standard deviation of the PSF of the mock sources 
    
    amplitude:
        amplitude of the mock sources
        
    PSF_size: 
        The StarFinder_Algorithm need to know the sigma of the sources
        they try to detect. This parameter changes the provided size 
        compared to the sigma of the mock sources.
    '''

    # create mock data
    n_sources = 20
    tshape = (256, 256)

    param_ranges = OrderedDict([('amplitude', [amplitude, amplitude * 1.2]),
                                ('x_mean', [0, tshape[0]]),
                                ('y_mean', [0, tshape[1]]),
                                ('x_stddev', [sigma_psf, sigma_psf]),
                                ('y_stddev', [sigma_psf, sigma_psf]),
                                ('theta', [0, 0])])

    sources = make_random_gaussians_table(n_sources,
                                          param_ranges,
                                          random_state=1234)

    image = (make_gaussian_sources_image(tshape, sources) + make_noise_image(
        tshape, type='poisson', mean=6., random_state=1) + make_noise_image(
            tshape, type='gaussian', mean=0., stddev=2., random_state=34234))

    fwhm = gaussian_sigma_to_fwhm * sigma_psf

    mean, median, std = sigma_clipped_stats(image, sigma=3.0)

    StarFinder = StarFinder_Algorithm(fwhm=fwhm * PSF_size,
                                      threshold=3. * std,
                                      sharplo=0.1,
                                      sharphi=1.0,
                                      roundlo=-.2,
                                      roundhi=.2)

    sources_mock = StarFinder(image)

    # for consistent table output
    for col in sources_mock.colnames:
        sources_mock[col].info.format = '%.8g'

    string = str(StarFinder_Algorithm).split(
        '.')[-1][:-2] + f' sig={sigma_psf} A={amplitude}'
    print(f'{string}: {len(sources_mock):} of {n_sources} sources found')

    positions = np.transpose(
        [sources_mock['xcentroid'], sources_mock['ycentroid']])
    apertures = CircularAperture(positions, r=fwhm)

    return image, apertures, sources, string