Ejemplo n.º 1
0
def centroid_airy(data, coords, rad=30, returnFit=False):
    if isinstance(data, str):
        data = pyfits.getdata(data)

    # Transpose x and y b/c reasons
    center_y, center_x = coords
    dslice = data[center_x - rad:center_x + rad, center_y - rad:center_y + rad]

    # Construct a grid of coordinates
    x, y = np.mgrid[0:dslice.shape[0], 0:dslice.shape[1]]
    x -= dslice.shape[0] / 2.
    y -= dslice.shape[1] / 2.

    p_init = models.AiryDisk2D(np.max(dslice), 0, 0, rad)
    p = fitter(p_init, x, y, dslice)

    # Rescale coordinates to match data
    px = center_y + p.y_0
    py = center_x + p.x_0

    if returnFit:
        return px, py, p

    else:
        return px, py
Ejemplo n.º 2
0
def log_likelihoodG(theta, x, y, data, var, size=21):
    """
    Logarithm of the likelihood function.
    """
    #unpack the parameters
    amplitude, center_x, center_y, radius, focus, width_x, width_y = theta

    #1)Generate a model Airy disc
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape((size, size))

    #2)Apply Focus
    f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
    focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape((size, size))
    model = signal.convolve2d(adata, focusdata, mode='same')

    #3)Apply CCD diffusion, approximated with a Gaussian
    CCD = models.Gaussian2D(1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.)
    CCDdata = CCD.eval(x, y, 1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.).reshape((size, size))
    model = signal.convolve2d(model, CCDdata, mode='same').flatten()

    #true for Gaussian errors, but not really true here because of mixture of Poisson and Gaussian noise
    lnL = - 0.5 * np.sum((data - model)**2 / var)

    return lnL
Ejemplo n.º 3
0
def fit_airy_2d(data, x=None, y=None):
    """Fit an AiryDisk2D model to the data."""
    delta = int(len(data) / 2)  # guess the center
    ldata = len(data)

    if not x:
        x = delta
    if not y:
        y = delta
    fixed_pars = {"x_0": True, "y_0": True}  # hold these constant
    yy, xx = np.mgrid[:ldata, :ldata]

    # fit model to the data
    fit = fitting.LevMarLSQFitter()

    # AiryDisk2D(amplitude, x_0, y_0, radius)
    model = models.AiryDisk2D(np.max(data),
                              x_0=x,
                              y_0=y,
                              radius=delta,
                              fixed=fixed_pars)
    with warnings.catch_warnings():
        # Ignore model warnings for new_plot_window
        warnings.simplefilter('ignore')
        results = fit(model, xx, yy, data)

    return results
Ejemplo n.º 4
0
def _simpleExample(CCDx=10, CCDy=10):
    spot = np.zeros((21, 21))
    #Create the coordinates x and y
    x = np.arange(0, spot.shape[1])
    y = np.arange(0, spot.shape[0])
    #Put the coordinates in a mesh
    xx, yy = np.meshgrid(x, y)

    peak, center_x, center_y, radius, focus, width_x, width_y = (200000, 10.1,
                                                                 9.95, 0.5,
                                                                 0.5, 0.03,
                                                                 0.06)
    amplitude = _amplitudeFromPeak(peak,
                                   center_x,
                                   center_y,
                                   radius,
                                   x_0=CCDx,
                                   y_0=CCDy)
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(xx, yy, amplitude, center_x, center_y,
                      radius).reshape(spot.shape)
    f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
    focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus,
                       0.).reshape(spot.shape)
    foc = signal.convolve2d(adata, focusdata, mode='same')
    fileIO.writeFITS(foc, 'TESTfocus.fits', int=False)
    CCDdata = np.array(
        [[0.0, width_y, 0.0],
         [width_x, (1. - width_y - width_y - width_x - width_x), width_x],
         [0.0, width_y, 0.0]])
    model = signal.convolve2d(foc, CCDdata, mode='same')
    #save model
    fileIO.writeFITS(model, 'TESTkernel.fits', int=False)
Ejemplo n.º 5
0
def log_likelihoodC(theta, x, y, data, var, size=21):
    """
    Logarithm of the likelihood function.
    """
    #unpack the parameters
    amplitude, center_x, center_y, radius, focus, width_x, width_y, width_d = theta

    #1)Generate a model Airy disc
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape((size, size))

    #2)Apply Focus
    f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
    focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape((size, size))
    focusmodel = signal.convolve2d(adata, focusdata, mode='same')

    #3)Apply CCD diffusion kernel
    kernel = np.array([[width_d, width_y, width_d],
                       [width_x, 1., width_x],
                       [width_d, width_y, width_d]])
    kernel /= kernel.sum()
    #model = ndimage.convolve(focusmodel, kernel)
    model = signal.convolve2d(focusmodel, kernel, mode='same').flatten()

    #true for Gaussian errors, but not really true here because of mixture of Poisson and Gaussian noise
    lnL = - 0.5 * np.sum((data - model)**2 / var)

    return lnL
Ejemplo n.º 6
0
	def model_psf(self, model, radius, psf_resolution, shape=256, **kwargs):
		"""Models the PSF given the desired model function and kwargs.

		Args:
			model (str):
				Must be either 'airydisk' or 'gaussian'.
			radius (int, float, astropy.unit.Quantity):
				Radius of the PSF model that is the radius of the first zero in an AiryDisk model or the standard
				deviation of the Gaussian model. Scalar values will be interpreted in units of arcseconds.
			psf_resolution (int, float, astropy.unit.Quantity):
				Resolution of the model PSF, equivalent to the pixel scale of the array. Scalar values will be
				interpreted in units of arcseconds.
			shape (int, optional):
				Size of the model PSF along both axes.
			kwargs are forwarded to the model function.
		"""

		# Check input parameters
		if not isinstance(model, str):
			raise SpecklepyTypeError('model_psf', 'model', type(model), 'str')

		if isinstance(radius, Quantity):
			self.radius = radius
		elif isinstance(radius, (int, float)):
			logger.warning(f"Interpreting scalar type radius as {radius} arcsec")
			self.radius = Quantity(f"{radius} arcsec")
		elif isinstance(radius, str):
			self.radius = Quantity(radius)
		else:
			raise SpecklepyTypeError('model_psf', 'radius', type(radius), 'Quantity')

		if isinstance(psf_resolution, Quantity):
			self.psf_resolution = psf_resolution
		elif isinstance(psf_resolution, (int, float)):
			logger.warning(f"Interpreting scalar type psf_resolution as {psf_resolution} arcsec")
			self.psf_resolution = Quantity(f"{psf_resolution} arcsec")
		elif isinstance(psf_resolution, str):
			self.psf_resolution = Quantity(psf_resolution)
		else:
			raise SpecklepyTypeError('model_psf', 'psf_resolution', type(psf_resolution), 'Quantity')

		if isinstance(shape, int):
			center = (shape / 2, shape / 2)
			shape = (shape, shape)
		elif isinstance(shape, tuple):
			center = (shape[0] / 2, shape[1] / 2)
		else:
			raise SpecklepyTypeError('model_psf', 'shape', type(shape), 'int or tuple')

		if model.lower() == 'airydisk':
			model = models.AiryDisk2D(x_0=center[0], y_0=center[1], radius=float(self.radius / self.psf_resolution))
		elif model.lower() == 'gaussian':
			stddev = float(self.radius / self.psf_resolution)
			model = models.Gaussian2D(x_mean=center[0], y_mean=center[1], x_stddev=stddev, y_stddev=stddev)
		else:
			raise SpecklepyValueError('model_psf', 'model', model, 'either AiryDisk or Gaussian')

		y, x = np.mgrid[0:shape[0], 0:shape[1]]
		self.psf = model(x, y)
		self.psf = self.normalize(self.psf)
Ejemplo n.º 7
0
def test_model_integer_indexing(int_type):
    """Regression for PR 12561; verify that compound model components
     can be accessed by integer index"""
    gauss = models.Gaussian2D()
    airy = models.AiryDisk2D()
    compound = gauss + airy

    assert compound[int_type(0)] == gauss
    assert compound[int_type(1)] == airy
Ejemplo n.º 8
0
def fit_airy_2d(data, x=None, y=None, sigma_factor=0):
    """Fit an AiryDisk2D model to the data.

    Parameters
    ----------

    data: 2D float array
        should be a 2d array, the initial center is used to estimate
        the fit center
    x: float (optional)
        xcenter location
    y: float (optional)
        ycenter location
    sigma_factor: float (optional)
        If sigma_factor > 0 then clipping will be performed
        on the data during the model fit

    Returns
    -------
    The the fit model for Airy2D function

    """
    delta = int(len(data) / 2)  # guess the center
    ldata = len(data)

    if x is None:
        x = delta
    if y is None:
        y = delta
    fixed_pars = {"x_0": True, "y_0": True}  # hold these constant
    yy, xx = np.mgrid[:ldata, :ldata]

    # Initialize the fitter
    fitter = fitting.LevMarLSQFitter()
    if sigma_factor > 0:
        fit = fitting.FittingWithOutlierRemoval(fitter,
                                                sigma_clip,
                                                niter=3,
                                                sigma=sigma_factor)
    else:
        fit = fitter

    # AiryDisk2D(amplitude, x_0, y_0, radius) + constant
    model = (models.AiryDisk2D(
        np.max(data), x_0=x, y_0=y, radius=delta, fixed=fixed_pars) +
             models.Polynomial2D(c0_0=data.min(), degree=0))
    with warnings.catch_warnings():
        # Ignore model warnings for new_plot_window
        warnings.simplefilter('ignore')
        results = fit(model, xx, yy, data)

    if sigma_factor > 0:
        return results[1]
    else:
        return results
Ejemplo n.º 9
0
def test_model_string_indexing():
    """Regression for PR 12561; verify that compound model components
     can be accessed by indexing with model name"""
    gauss = models.Gaussian2D()
    gauss.name = 'Model1'
    airy = models.AiryDisk2D()
    airy.name = 'Model2'
    compound = gauss + airy

    assert compound['Model1'] == gauss
    assert compound['Model2'] == airy
Ejemplo n.º 10
0
def _peakFromTruth(theta, size=21):
    """
    Derive the peak value from the parameters used for simulations.
    """
    amplitude, center_x, center_y, radius, focus, width_x, width_y = theta
    x = np.arange(0, size)
    y = np.arange(0, size)
    x, y = np.meshgrid(x, y)
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(x, y, amplitude, center_x, center_y, radius)
    return adata.max()
Ejemplo n.º 11
0
	def compute_airy_model(self, size=256, wavelength=None, first_zero_radius=4):
		"""
		compute_airy_model() computes a diffraction limited PSF, based on an
		Airy model on grid with shape = (size, size). The corresponding pixel
		scale (the psf_resolution) is computed radius of the first zero
		(first_zero_radius=12, in pix), the telescope diameter and a wavelength.
		Therefore, please provide the telescope instance with a wavelength
		attribute.
		"""
		if wavelength is None:
			try:
				wavelength = self.wavelength
			except:
				wavelength = 1*u.micron
		center = int(size / 2)
		Airy = models.AiryDisk2D(amplitude=1e6, x_0=center, y_0=center, radius=first_zero_radius)
		xdata, ydata = np.meshgrid(np.arange(0, size), np.arange(0, size))
		self.psf = self._normalize(Airy(xdata, ydata))
		self.psf_resolution = 1.2196698912665045 * np.arctan(wavelength / self.diameter).to(u.mas) / first_zero_radius
Ejemplo n.º 12
0
def log_likelihoodJoint(theta, x, y, data, var, size=21):
    """
    Logarithm of the likelihood function for joint fitting. Not really sure if this is right...
    """
    #unpack the parameters
    #[xpos, ypos]*images) +[amplitude, radius, focus])
    images = len(theta[:-5]) / 2
    amplitude, radius, focus, width_x, width_y = theta[-5:]

    data[data < 1.] = 0.

    lnL = 0.
    for tmp in xrange(images):
        #X and Y are always in pairs
        center_x = theta[2*tmp]
        center_y = theta[2*tmp+1]

        #1)Generate a model Airy disc
        airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
        adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape((size, size))

        #2)Apply Focus, no normalisation as smoothing
        f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
        focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape((size, size))
        model = signal.convolve2d(adata, focusdata, mode='same')

        #3)Apply CCD diffusion, approximated with a Gaussian -- max = 1 as centred
        CCD = models.Gaussian2D(1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.)
        CCDdata = CCD.eval(x, y, 1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.).reshape((size, size))
        model = signal.convolve2d(model, CCDdata, mode='same').flatten()

        lnL += - 0.5 * np.sum((data[tmp].flatten() - model)**2 / var[tmp].flatten())
        # model[model < 1.] = 1.
        # lnL1 = - np.sum(model - data*np.log(model))
        # lnL2 = - 0.5 * np.sum((data - model)**2 / var)
        # #lnL += np.logaddexp(lnL1, lnL2)
        # lnL += lnL1 + lnL2

    return lnL
Ejemplo n.º 13
0
def log_likelihood(theta, x, y, data, var, size):
    """
    Logarithm of the likelihood function.
    """
    #unpack the parameters
    peak, center_x, center_y, radius, focus, width_x, width_y = theta

    #1)Generate a model Airy disc
    amplitude = _amplitudeFromPeak(peak,
                                   center_x,
                                   center_y,
                                   radius,
                                   x_0=int(size[0] / 2. - 0.5),
                                   y_0=int(size[1] / 2. - 0.5))
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(x, y, amplitude, center_x, center_y,
                      radius).reshape(size)

    #2)Apply Focus
    f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
    focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus,
                       0.).reshape(size)
    model = signal.convolve2d(adata, focusdata, mode='same')

    #3)Apply CCD diffusion, approximated with a Gaussian
    CCDdata = np.array(
        [[0.0, width_y, 0.0],
         [width_x, (1. - width_y - width_y - width_x - width_x), width_x],
         [0.0, width_y, 0.0]])
    model = signal.convolve2d(model, CCDdata, mode='same').flatten()

    #true for Gaussian errors
    #lnL = - 0.5 * np.sum((data - model)**2 / var)
    #Gary B. said that this should be from the model not data so recompute var (now contains rn**2)
    var += model.copy()
    lnL = -(np.size(var) * np.sum(np.log(var))) - (0.5 * np.sum(
        (data - model)**2 / var))

    return lnL
Ejemplo n.º 14
0
def log_likelihood(theta, x, y, data, var, size=21):
    """
    Logarithm of the likelihood function.
    """
    #unpack the parameters
    amplitude, center_x, center_y, radius, focus, width_x, width_y = theta

    #1)Generate a model Airy disc
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape((size, size))

    #2)Apply Focus
    f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
    focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape((size, size))
    model = signal.convolve2d(adata, focusdata, mode='same')

    #3)Apply CCD diffusion, approximated with a Gaussian
    CCD = models.Gaussian2D(1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.)
    CCDdata = CCD.eval(x, y, 1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.).reshape((size, size))
    model = signal.convolve2d(model, CCDdata, mode='same').flatten()

    #true for Gaussian errors, but not really true here because of mixture of Poisson and Gaussian noise
    #lnL = - 0.5 * np.sum((data - model)**2 / var)
    #others...
    lnL = - 2. * np.sum((((data - model)**2) + np.abs(data - model))/var) #does not get the amplitude easily right
    #using L1 norm would be true for exponential distribution
    #lnL = - np.sum(np.abs(data - model) / var)

    # data[data < 1.] = 0.
    # model[model < 1.] = 1.
    # lnL1 = - np.sum(model - data*np.log(model))
    # lnL2 = - 0.5 * np.sum((data - model)**2 / var)
    # #lnL = np.logaddexp(lnL1, lnL2)
    # lnL = lnL1 + lnL2

    return lnL
Ejemplo n.º 15
0
 astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
 astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
 astmodels.Scale(3.4),
 astmodels.RotateNative2Celestial(5.63, -72.5, 180),
 astmodels.Multiply(3),
 astmodels.Multiply(10 * u.m),
 astmodels.RotateCelestial2Native(5.63, -72.5, 180),
 astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
 astmodels.Mapping((0, 1), n_inputs=3),
 astmodels.Shift(2. * u.deg),
 astmodels.Scale(3.4 * u.deg),
 astmodels.RotateNative2Celestial(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg),
 astmodels.RotateCelestial2Native(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg),
 astmodels.RotationSequence3D([1.2, 2.3, 3.4, .3], 'xyzx'),
 astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, .3], 'xyzy'),
 astmodels.AiryDisk2D(amplitude=10., x_0=0.5, y_0=1.5),
 astmodels.Box1D(amplitude=10., x_0=0.5, width=5.),
 astmodels.Box2D(amplitude=10., x_0=0.5, x_width=5., y_0=1.5, y_width=7.),
 astmodels.Const1D(amplitude=5.),
 astmodels.Const2D(amplitude=5.),
 astmodels.Disk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5.),
 astmodels.Ellipse2D(amplitude=10., x_0=0.5, y_0=1.5, a=2., b=4.,
                     theta=0.1),
 astmodels.Exponential1D(amplitude=10., tau=3.5),
 astmodels.Gaussian1D(amplitude=10., mean=5., stddev=3.),
 astmodels.Gaussian2D(amplitude=10.,
                      x_mean=5.,
                      y_mean=5.,
                      x_stddev=3.,
                      y_stddev=3.),
 astmodels.KingProjectedAnalytic1D(amplitude=10., r_core=5., r_tide=2.),
Ejemplo n.º 16
0
def forwardModelTest(file, CCDPSFmodel='Gaus', out='Data', gain=3.1, size=10, spotx=2888, spoty=3514,
                     burn=100, run=200, nwalkers=1000):
    """
    A single file to quickly test if the method works
    """
    #get data and convert to electrons
    print '\n\n\n'
    print '_'*120
    print 'Processing:', file

    o = pf.getdata(file)*gain

    #roughly the correct location - to avoid identifying e.g. cosmic rays
    data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy()

    #maximum position within the cutout
    y, x = m.maximum_position(data)

    #spot and the peak pixel within the spot, this is also the CCD kernel position
    spot = data[y-size:y+size+1, x-size:x+size+1].copy()
    CCDy, CCDx = m.maximum_position(spot)

    bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) #works for read o
    rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20])

    print 'Readnoise (e):', rn
    if rn < 2. or rn > 6.:
        print 'NOTE: suspicious readout noise estimate...'
    print 'ADC offset (e):', bias

    #remove bias
    spot -= bias

    #save to file
    fileIO.writeFITS(spot, out+'small.fits', int=False)

    #make a copy ot generate error array
    data = spot.copy().flatten()
    data[data + rn**2 < 0.] = 0.  #set highly negative values to zero
    #assume errors scale as sqrt of the values + readnoise
    sigma = np.sqrt(data + rn**2)
    #variance is the true noise model
    var = sigma**2

    #maximum value
    max = np.max(spot)
    print 'Maximum Value:', max

    #fit a simple model
    print 'Least Squares Fitting...'
    gaus = models.Gaussian2D(spot.max(), size, size, x_stddev=0.5, y_stddev=0.5)
    gaus.theta.fixed = True  #fix angle
    p_init = gaus
    fit_p = fitting.LevMarLSQFitter()
    stopy, stopx = spot.shape
    X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1))
    p = fit_p(p_init, X, Y, spot)
    print p
    model = p(X, Y)

    fileIO.writeFITS(model, out+'BasicModelG.fits', int=False)
    fileIO.writeFITS(model - spot, out+'BasicModelResidualG.fits', int=False)

    airy = models.AiryDisk2D(spot.max(), size, size, 0.6)
    p_init = airy
    fit_p = fitting.LevMarLSQFitter()
    a = fit_p(p_init, X, Y, spot)
    print a
    model = p(X, Y)

    fileIO.writeFITS(model, out+'BasicModelA.fits', int=False)
    fileIO.writeFITS(model - spot, out+'BasicModelResidualA.fits', int=False)

    #goodness of fit
    gof = (1./(len(data)-5.)) * np.sum((model.flatten() - data)**2 / var)
    print 'GoF:', gof
    print 'Done'

    #MCMC based fitting
    if 'Gaus' in CCDPSFmodel:
        ndim = 7
        print 'Model with a Gaussian CCD PSF, %i dimensions' % ndim

        #Choose an initial set of positions for the walkers - fairly large area not to bias the results
        #amplitude, center_x, center_y, radius, focus, width_x, width_y = theta
        p0 = np.zeros((nwalkers, ndim))
        p0[:, 0] = np.random.uniform(max, 2.*max, size=nwalkers)     # amplitude
        p0[:, 1] = np.random.uniform(7., 14., size=nwalkers)         # x
        p0[:, 2] = np.random.uniform(7., 14., size=nwalkers)         # y
        p0[:, 3] = np.random.uniform(.1, 1., size=nwalkers)          # radius
        p0[:, 4] = np.random.uniform(.1, 1., size=nwalkers)          # focus
        p0[:, 5] = np.random.uniform(.1, 0.5, size=nwalkers)         # width_x
        p0[:, 6] = np.random.uniform(.1, 0.5, size=nwalkers)         # width_y

        # Initialize the sampler with the chosen specs.
        #Create the coordinates x and y
        x = np.arange(0, spot.shape[1])
        y = np.arange(0, spot.shape[0])
        #Put the coordinates in a mesh
        xx, yy = np.meshgrid(x, y)

        #Flatten the arrays
        xx = xx.flatten()
        yy = yy.flatten()

        #initiate sampler
        pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword
        sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posteriorG, args=[xx, yy, data, var], pool=pool)

        # Run a burn-in and set new starting position
        print "Burning-in..."
        pos, prob, state = sampler.run_mcmc(p0, burn)
        best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]
        pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)
        # Reset the chain to remove the burn-in samples.
        sampler.reset()

        # Starting from the final position in the burn-in chain
        print "Running MCMC..."
        pos, prob, state = sampler.run_mcmc(pos, burn)
        sampler.reset()
        pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)

        # Print out the mean acceptance fraction
        print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)

        #Get the index with the highest probability
        maxprob_index = np.argmax(prob)

        #Get the best parameters and their respective errors and print best fits
        params_fit = pos[maxprob_index]
        errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]
        _printResults2(params_fit, errors_fit, model=CCDPSFmodel)

        #Best fit model
        amplitude, center_x, center_y, radius, focus, width_x, width_y = params_fit
        airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
        adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape)
        f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
        focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape)
        foc = signal.convolve2d(adata, focusdata, mode='same')
        CCD = models.Gaussian2D(1., CCDx, CCDy, width_x, width_y, 0.)
        CCDdata = CCD.eval(xx, yy, 1., CCDx, CCDy, width_x, width_y, 0.).reshape(spot.shape)
        model = signal.convolve2d(foc, CCDdata, mode='same')
        #save model
        fileIO.writeFITS(model, out+'model.fits', int=False)

        #residuals
        fileIO.writeFITS(model - spot, out+'residual.fits', int=False)
        fileIO.writeFITS(((model-spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False)

        #results
        _printFWHM(width_x, width_y, errors_fit[5], errors_fit[6])

        #plot
        samples = sampler.chain[:, burn:, :].reshape((-1, ndim))
        fig = triangle.corner(samples,
                              labels=['amplitude', 'center_x', 'center_y', 'radius', 'focus', 'width_x', 'width_y'])
        fig.savefig(out+'Triangle.png')

    elif 'Cross' in CCDPSFmodel:
        ndim = 8
        print 'Model with a Cross CCD PSF, %i dimensions' % ndim

        #amplitude, center_x, center_y, radius, focus, width_x, width_y, width_d = theta
        # Choose an initial set of positions for the walkers using the Gaussian fit
        p0 = [np.asarray([1.3*max,#p.amplitude.value,
                          p.x_mean.value,
                          p.y_mean.value,
                          np.max([p.x_stddev.value, p.y_stddev.value]),
                          0.5,
                          0.08,
                          0.1,
                          0.01]) + 1e-3*np.random.randn(ndim) for i in xrange(nwalkers)]

        # Initialize the sampler with the chosen specs.
        #Create the coordinates x and y
        x = np.arange(0, spot.shape[1])
        y = np.arange(0, spot.shape[0])
        #Put the coordinates in a mesh
        xx, yy = np.meshgrid(x, y)

        #Flatten the arrays
        xx = xx.flatten()
        yy = yy.flatten()

        #initiate sampler
        pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword
        sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posteriorC, args=[xx, yy, data, var], pool=pool)


        # Run a burn-in and set new starting position
        print "Burning-in..."
        pos, prob, state = sampler.run_mcmc(p0, burn)
        best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]
        pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)
        # Reset the chain to remove the burn-in samples.
        sampler.reset()

        # Starting from the final position in the burn-in chain
        print "Running MCMC..."
        pos, prob, state = sampler.run_mcmc(pos, burn)
        sampler.reset()
        pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)

        # Print out the mean acceptance fraction
        print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)

        #Get the index with the highest probability
        maxprob_index = np.argmax(prob)

        #Get the best parameters and their respective errors and print best fits
        params_fit = pos[maxprob_index]
        errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]
        _printResults2(params_fit, errors_fit, model=CCDPSFmodel)

        #Best fit model
        amplitude, center_x, center_y, radius, focus, width_x, width_y, width_d = params_fit
        airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
        adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape)
        f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
        focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape)
        foc = signal.convolve2d(adata, focusdata, mode='same')

        #3)Apply CCD diffusion kernel
        kernel = np.array([[width_d, width_y, width_d],
                           [width_x, 1., width_x],
                           [width_d, width_y, width_d]])
        kernel /= kernel.sum()
        model = signal.convolve2d(foc, kernel, mode='same')

        #save model
        fileIO.writeFITS(model, out+'model.fits', int=False)

        #residuals
        fileIO.writeFITS(model - spot, out+'residual.fits', int=False)
        fileIO.writeFITS(((model-spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False)

        #results
        print kernel
        gaus = models.Gaussian2D(kernel.max(), 1.5, 1.5, x_stddev=0.3, y_stddev=0.3)
        gaus.theta.fixed = True
        p_init = gaus
        fit_p = fitting.LevMarLSQFitter()
        stopy, stopx = kernel.shape
        X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1))
        p = fit_p(p_init, X, Y, kernel)
        #print p
        _printFWHM(p.x_stddev.value, p.y_stddev.value, errors_fit[5], errors_fit[6])

        #plot
        samples = sampler.chain[:, burn:, :].reshape((-1, ndim))
        fig = triangle.corner(samples,
                              labels=['amplitude', 'center_x', 'center_y', 'radius', 'focus', 'width_x', 'width_y', 'width_d'])
        fig.savefig(out+'Triangle.png')

    # a simple goodness of fit
    gof = (1./(len(data)-ndim)) * np.sum((model.flatten() - data)**2 / var)
    print 'GoF:', gof, ' Maximum difference:', np.max(np.abs(model - spot))
Ejemplo n.º 17
0
def forwardModel(file,
                 out='Data',
                 wavelength=None,
                 gain=3.1,
                 size=10,
                 burn=500,
                 spotx=2888,
                 spoty=3514,
                 run=700,
                 simulation=False,
                 truths=None):
    """
    Forward models the spot data found from the input file. Can be used with simulated and real data.

    Notes:
    - emcee is run three times as it is important to have a good starting point for the final run.
    """
    print '\n\n\n'
    print '_' * 120
    print 'Processing:', file
    #get data and convert to electrons
    o = pf.getdata(file) * gain

    if simulation:
        data = o
    else:
        #roughly the correct location - to avoid identifying e.g. cosmic rays
        data = o[spoty - (size * 3):spoty + (size * 3) + 1,
                 spotx - (size * 3):spotx + (size * 3) + 1].copy()

    #maximum position within the cutout
    y, x = m.maximum_position(data)

    #spot and the peak pixel within the spot, this is also the CCD kernel position
    spot = data[y - size:y + size + 1, x - size:x + size + 1].copy()
    CCDy, CCDx = m.maximum_position(spot)
    print 'CCD Kernel Position (within the postage stamp):', CCDx, CCDy

    #bias estimate
    if simulation:
        bias = 9000.
        rn = 4.5
    else:
        bias = np.median(o[spoty - size:spoty + size,
                           spotx - 220:spotx - 20])  #works for read o
        rn = np.std(o[spoty - size:spoty + size, spotx - 220:spotx - 20])

    print 'Readnoise (e):', rn
    if rn < 2. or rn > 6.:
        print 'NOTE: suspicious readout noise estimate...'
    print 'ADC offset (e):', bias

    #remove bias
    spot -= bias

    #save to file
    fileIO.writeFITS(spot, out + 'small.fits', int=False)

    #make a copy ot generate error array
    data = spot.copy().flatten()
    #assume that uncertanties scale as sqrt of the values + readnoise
    #sigma = np.sqrt(data/gain + rn**2)
    tmp = data.copy()
    tmp[tmp + rn**2 < 0.] = 0.  #set highly negative values to zero
    var = tmp.copy() + rn**2
    #Gary B. said that actually this should be from the model or is biased,
    #so I only pass the readout noise part now

    #fit a simple model
    print 'Least Squares Fitting...'
    gaus = models.Gaussian2D(spot.max(),
                             size,
                             size,
                             x_stddev=0.5,
                             y_stddev=0.5)
    gaus.theta.fixed = True  #fix angle
    p_init = gaus
    fit_p = fitting.LevMarLSQFitter()
    stopy, stopx = spot.shape
    X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1))
    p = fit_p(p_init, X, Y, spot)
    print p
    model = p(X, Y)
    fileIO.writeFITS(model, out + 'BasicModel.fits', int=False)
    fileIO.writeFITS(model - spot, out + 'BasicModelResidual.fits', int=False)

    #goodness of fit
    gof = (1. / (np.size(data) - 5.)) * np.sum(
        (model.flatten() - data)**2 / var)
    print 'GoF:', gof
    print 'Done\n\n'

    #maximum value
    max = np.max(spot)
    peakrange = (0.9 * max, 1.7 * max)
    sum = np.sum(spot)

    print 'Maximum Value:', max
    print 'Sum of the values:', sum
    print 'Peak Range:', peakrange

    #MCMC based fitting
    print 'Bayesian Model Fitting...'
    nwalkers = 1000

    # Initialize the sampler with the chosen specs.
    #Create the coordinates x and y
    x = np.arange(0, spot.shape[1])
    y = np.arange(0, spot.shape[0])
    #Put the coordinates in a mesh
    xx, yy = np.meshgrid(x, y)

    #Flatten the arrays
    xx = xx.flatten()
    yy = yy.flatten()

    print 'Fitting full model...'
    ndim = 7

    #Choose an initial set of positions for the walkers - fairly large area not to bias the results
    p0 = np.zeros((nwalkers, ndim))
    #peak, center_x, center_y, radius, focus, width_x, width_y = theta
    p0[:, 0] = np.random.normal(max, max / 100., size=nwalkers)  # peak value
    p0[:, 1] = np.random.normal(p.x_mean.value, 0.1, size=nwalkers)  # x
    p0[:, 2] = np.random.normal(p.y_mean.value, 0.1, size=nwalkers)  # y
    print 'Using initial guess [radius, focus, width_x, width_y]:', [
        1.5, 0.6, 0.02, 0.03
    ]
    p0[:, 3] = np.random.normal(1.5, 0.01, size=nwalkers)  # radius
    p0[:, 4] = np.random.normal(0.6, 0.01, size=nwalkers)  # focus
    p0[:, 5] = np.random.normal(0.02, 0.0001, size=nwalkers)  # width_x
    p0[:, 6] = np.random.normal(0.03, 0.0001, size=nwalkers)  # width_y

    #initiate sampler
    pool = Pool(
        cores
    )  #A hack Dan gave me to not have ghost processes running as with threads keyword
    #sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[xx, yy, data, var, peakrange, spot.shape],
    sampler = emcee.EnsembleSampler(
        nwalkers,
        ndim,
        log_posterior,
        args=[xx, yy, data, rn**2, peakrange, spot.shape],
        pool=pool)

    # Run a burn-in and set new starting position
    print "Burning-in..."
    pos, prob, state = sampler.run_mcmc(p0, burn)
    maxprob_index = np.argmax(prob)
    params_fit = pos[maxprob_index]
    print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)
    print 'Estimate:', params_fit
    sampler.reset()

    print "Running MCMC..."
    pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)
    print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)

    #Get the index with the highest probability
    maxprob_index = np.argmax(prob)

    #Get the best parameters and their respective errors and print best fits
    params_fit = pos[maxprob_index]
    errors_fit = [sampler.flatchain[:, i].std() for i in xrange(ndim)]
    _printResults(params_fit, errors_fit)

    #Best fit model
    peak, center_x, center_y, radius, focus, width_x, width_y = params_fit
    amplitude = _amplitudeFromPeak(peak,
                                   center_x,
                                   center_y,
                                   radius,
                                   x_0=CCDx,
                                   y_0=CCDy)
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(xx, yy, amplitude, center_x, center_y,
                      radius).reshape(spot.shape)
    f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
    focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus,
                       0.).reshape(spot.shape)
    foc = signal.convolve2d(adata, focusdata, mode='same')
    CCDdata = np.array(
        [[0.0, width_y, 0.0],
         [width_x, (1. - width_y - width_y - width_x - width_x), width_x],
         [0.0, width_y, 0.0]])
    fileIO.writeFITS(CCDdata, 'kernel.fits', int=False)
    model = signal.convolve2d(foc, CCDdata, mode='same')
    #save model
    fileIO.writeFITS(model, out + 'model.fits', int=False)

    #residuals
    fileIO.writeFITS(model - spot, out + 'residual.fits', int=False)
    fileIO.writeFITS(((model - spot)**2 / var.reshape(spot.shape)),
                     out + 'residualSQ.fits',
                     int=False)

    # a simple goodness of fit
    gof = (1. / (np.size(data) - ndim)) * np.sum(
        (model.flatten() - data)**2 / var)
    maxdiff = np.max(np.abs(model - spot))
    print 'GoF:', gof, ' Maximum difference:', maxdiff
    if maxdiff > 2e3 or gof > 4.:
        print '\nFIT UNLIKELY TO BE GOOD...\n'
    print 'Amplitude estimate:', amplitude

    #plot
    samples = sampler.chain.reshape((-1, ndim))
    extents = None
    if simulation:
        extents = [(0.91 * truth, 1.09 * truth) for truth in truths]
        extents[1] = (truths[1] * 0.995, truths[1] * 1.005)
        extents[2] = (truths[2] * 0.995, truths[2] * 1.005)
        extents[3] = (0.395, 0.425)
        extents[4] = (0.503, 0.517)
        truths[0] = _peakFromTruth(truths)
        print truths
    fig = triangle.corner(
        samples,
        labels=['peak', 'x', 'y', 'radius', 'focus', 'width_x', 'width_y'],
        truths=truths)  #, extents=extents)
    fig.savefig(out + 'Triangle.png')
    plt.close()
    pool.close()
Ejemplo n.º 18
0
data = hdu[0].data.squeeze()
w = WCS(hdu[0].header, naxis=2)
y, x = np.mgrid[:data.shape[0], :data.shape[1]]

# In[17]:

# Fit the data using a Gaussian
g_init = models.Gaussian2D(amplitude=1.,
                           x_mean=data.shape[0] // 2,
                           y_mean=data.shape[1] // 2,
                           x_stddev=1.,
                           y_stddev=1.)
g_init.amplitude.fixed = True
fit_g = fitting.LevMarLSQFitter()
g_air = models.AiryDisk2D(amplitude=1.,
                          x_0=data.shape[0] // 2,
                          y_0=data.shape[1] // 2,
                          radius=1)
g_air.amplitude.fixed = True
g = fit_g(g_init, x, y, data, weights=(data / data.max())**2)
g_air = fit_g(g_air, x, y, data, weights=(data / data.max())**2)

# In[18]:

fig = plt.figure(figsize=(27, 10))
gs = GridSpec(nrows=1, ncols=4, wspace=0.05)
ax = fig.add_subplot(gs[0], projection=w)
ax.imshow(data,
          origin='lower',
          interpolation='nearest',
          vmin=0,
          vmax=1,
Ejemplo n.º 19
0
def forwardModel(file, out='Data', gain=3.1, size=10, burn=20, spotx=2888, spoty=3514, run=50,
                 simulation=False, truths=None):
    """
    Forward models the spot data found from the input file. Can be used with simulated and real data.

    Notes:
    - The emcee is run three times as it is important to have a good starting point for the final run.
    - It is very important to have the amplitude well estimated, otherwise it is difficult to get good parameter estimates.
    """
    print '\n\n\n'
    print '_'*120
    print 'Processing:', file
    #get data and convert to electrons
    o = pf.getdata(file)*gain

    if simulation:
        data = o
    else:
        #roughly the correct location - to avoid identifying e.g. cosmic rays
        data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy()

    #maximum position within the cutout
    y, x = m.maximum_position(data)

    #spot and the peak pixel within the spot, this is also the CCD kernel position
    spot = data[y-size:y+size+1, x-size:x+size+1].copy()
    CCDy, CCDx = m.maximum_position(spot)
    print 'CCD Kernel Position (within the postage stamp):', CCDx, CCDy

    #bias estimate
    if simulation:
        bias = 9000.
        rn = 4.5
    else:
        bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) #works for read o
        rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20])

    print 'Readnoise (e):', rn
    if rn < 2. or rn > 6.:
        print 'NOTE: suspicious readout noise estimate...'
    print 'ADC offset (e):', bias

    #remove bias
    spot -= bias

    #save to file
    fileIO.writeFITS(spot, out+'small.fits', int=False)

    #make a copy ot generate error array
    data = spot.copy().flatten()
    data[data + rn**2 < 0.] = 0.  #set highly negative values to zero
    #assume errors scale as sqrt of the values + readnoise
    #sigma = np.sqrt(data/gain + rn**2)
    var = data.copy() + rn**2

    #maximum value
    max = np.max(spot)
    print 'Maximum Value:', max

    #MCMC based fitting
    print 'Bayesian Fitting...'
    ndim = 7
    nwalkers = 1000

    #Choose an initial set of positions for the walkers - fairly large area not to bias the results
    #amplitude, center_x, center_y, radius, focus, width_x, width_y = theta
    p0 = np.zeros((nwalkers, ndim))
    p0[:, 0] = np.random.uniform(max, 2.*max, size=nwalkers)     # amplitude
    p0[:, 1] = np.random.uniform(7., 14., size=nwalkers)         # x
    p0[:, 2] = np.random.uniform(7., 14., size=nwalkers)         # y
    p0[:, 3] = np.random.uniform(.1, 1., size=nwalkers)          # radius
    p0[:, 4] = np.random.uniform(.1, 1., size=nwalkers)          # focus
    p0[:, 5] = np.random.uniform(.1, 0.5, size=nwalkers)         # width_x
    p0[:, 6] = np.random.uniform(.1, 0.5, size=nwalkers)         # width_y

    # Initialize the sampler with the chosen specs.
    #Create the coordinates x and y
    x = np.arange(0, spot.shape[1])
    y = np.arange(0, spot.shape[0])
    #Put the coordinates in a mesh
    xx, yy = np.meshgrid(x, y)

    #Flatten the arrays
    xx = xx.flatten()
    yy = yy.flatten()

    #initiate sampler
    pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword
    sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[xx, yy, data, var], pool=pool)

    # Run a burn-in and set new starting position
    print "Burning-in..."
    pos, prob, state = sampler.run_mcmc(p0, burn)
    best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]
    pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)
    # Reset the chain to remove the burn-in samples.
    sampler.reset()

    # Starting from the final position in the burn-in chain
    print "Running MCMC..."
    pos, prob, state = sampler.run_mcmc(pos, burn)
    sampler.reset()
    pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)

    # Print out the mean acceptance fraction
    print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)

    #Get the index with the highest probability
    maxprob_index = np.argmax(prob)

    #Get the best parameters and their respective errors and print best fits
    params_fit = pos[maxprob_index]
    errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]
    amplitudeE, center_xE, center_yE, radiusE, focusE, width_xE, width_yE = errors_fit
    _printResults(params_fit, errors_fit)

    #Best fit model
    amplitude, center_x, center_y, radius, focus, width_x, width_y = params_fit
    airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
    adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape)
    f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
    focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape)
    foc = signal.convolve2d(adata, focusdata, mode='same')
    CCD = models.Gaussian2D(1., CCDx, CCDy, width_x, width_y, 0.)
    CCDdata = CCD.eval(xx, yy, 1., CCDx, CCDy, width_x, width_y, 0.).reshape(spot.shape)
    model = signal.convolve2d(foc, CCDdata, mode='same')
    #save model
    fileIO.writeFITS(model, out+'model.fits', int=False)

    #residuals
    fileIO.writeFITS(model - spot, out+'residual.fits', int=False)
    fileIO.writeFITS(((model - spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False)

    # a simple goodness of fit
    gof = (1./(np.size(data) - ndim)) * np.sum((model.flatten() - data)**2 / var)
    print 'GoF:', gof, ' Maximum difference:', np.max(np.abs(model - spot))

    #results and save results
    _printFWHM(width_x, width_y, errors_fit[5], errors_fit[6])
    res = dict(wx=width_x, wy=width_y, wxerr=width_xE, wyerr=width_yE, out=out,
               peakvalue=max, CCDmodel=CCD, CCDmodeldata=CCDdata, GoF=gof)
    fileIO.cPickleDumpDictionary(res, out+'.pkl')

    #plot
    samples = sampler.chain.reshape((-1, ndim))
    extents = None
    if simulation:
        extents = [(0.91*truth, 1.09*truth) for truth in truths]
        extents[1] = (truths[1]*0.995, truths[1]*1.005)
        extents[2] = (truths[2]*0.995, truths[2]*1.005)
        extents[3] = (0.395, 0.425)
        extents[4] = (0.503, 0.517)
    fig = triangle.corner(samples,
                          labels=['amplitude', 'x', 'y', 'radius', 'focus', 'width_x', 'width_y'],
                          truths=truths)#, extents=extents)
    fig.savefig(out+'Triangle.png')

    pool.close()
Ejemplo n.º 20
0
def create_synth_psf(model='gauss',
                     shape=(9, 9),
                     amplitude=1,
                     x_mean=None,
                     y_mean=None,
                     fwhm=4,
                     theta=0,
                     gamma=None,
                     alpha=1.5,
                     radius=None,
                     msdi=False):
    """ Creates a synthetic 2d or 3d PSF with a 2d model: Airy disk, Gaussian or
    Moffat, depending on ``model``.

    Parameters
    ----------
    model : {'gauss', 'moff', 'airy'}, str optional
        Model to be used to create the synthetic PSF.
    shape : tuple of ints, optional
        Shape of the output 2d array.
    amplitude : float, optional
        Value of the amplitude of the 2d distribution.
    x_mean : float or None, optional
        Value of the centroid in X of the distributions: the mean of the
        Gaussian or the location of the maximum of the Moffat or Airy disk
        models. If None, the centroid is placed at the center of the array.
    y_mean : float or None, optional
        Value of the centroid in Y of the distributions: the mean of the
        Gaussian or the location of the maximum of the Moffat or Airy disk
        models. If None, the centroid is placed at the center of the array.
    fwhm : float, tuple of floats, list or np.ndarray, optional
        FWHM of the model in pixels. For the Gaussian case, it controls the
        standard deviation of the Gaussian. If a tuple is given, then the
        Gaussian will be elongated (fwhm in x, fwhm in y). For the Moffat, it is
        related to the gamma and alpha parameters. For the Airy disk, it is
        related to the radius (of the first zero) parameter. If ``msdi`` is True
        then ``fwhm`` must be a list of 1d np.ndarray (for example for
        SPHERE/IFS this sounds like a reasonable FWHM: np.linspace(4.5,6.7,39)).
    theta : float, optional
        Rotation angle in degrees of the Gaussian.
    gamma : float or None, optional
        Gamma parameter of core width of the Moffat model. If None, then it is
        calculated to correspond to the given ``fwhm``.
    alpha : float, optional
        Power index of the Moffat model.
    radius : float or None, optional
        The radius of the Airy disk (radius of the first zero). If None, then it
        is calculated to correspond to the given ``fwhm``.
    msdi : bool, optional
        Creates a 3d PSF, for emulating an IFS PSF.

    Returns
    -------
    im : numpy ndarray
        2d array with given ``shape`` and containing the synthetic PSF.

    Notes
    -----
    http://docs.astropy.org/en/stable/api/astropy.modeling.functional_models.Gaussian2D.html
    http://docs.astropy.org/en/stable/api/astropy.modeling.functional_models.Moffat2D.html
    http://docs.astropy.org/en/stable/api/astropy.modeling.functional_models.AiryDisk2D.html

    https://www.gnu.org/software/gnuastro/manual/html_node/PSF.html
    web.ipac.caltech.edu/staff/fmasci/home/astro_refs/PSFtheory.pdf
    web.ipac.caltech.edu/staff/fmasci/home/astro_refs/PSFsAndSampling.pdf
    """
    # 2d case
    if not msdi:
        sizex, sizey = shape
        if x_mean is None or y_mean is None:
            y_mean, x_mean = frame_center(np.zeros((sizey, sizex)))
        x = np.arange(sizex)
        y = np.arange(sizey)
        x, y = np.meshgrid(x, y)

        if model == 'gauss':
            if np.isscalar(fwhm):
                fwhm_y = fwhm
                fwhm_x = fwhm
            else:
                fwhm_x, fwhm_y = fwhm
            gauss = models.Gaussian2D(amplitude=amplitude,
                                      x_mean=x_mean,
                                      y_mean=y_mean,
                                      x_stddev=fwhm_x * gaussian_fwhm_to_sigma,
                                      y_stddev=fwhm_y * gaussian_fwhm_to_sigma,
                                      theta=np.deg2rad(theta))
            im = gauss(x, y)
        elif model == 'moff':
            if gamma is None and fwhm is not None:
                gamma = fwhm / (2. * np.sqrt(2**(1 / alpha) - 1))
            moffat = models.Moffat2D(amplitude=amplitude,
                                     x_0=x_mean,
                                     y_0=y_mean,
                                     gamma=gamma,
                                     alpha=alpha)
            im = moffat(x, y)
        elif model == 'airy':
            if radius is None and fwhm is not None:
                diam_1st_zero = (fwhm * 2.44) / 1.028
                radius = diam_1st_zero / 2.
            airy = models.AiryDisk2D(amplitude=amplitude,
                                     x_0=x_mean,
                                     y_0=y_mean,
                                     radius=radius)
            im = airy(x, y)
        return im
    # 3d case
    else:
        if np.isscalar(fwhm):
            raise ValueError('`Fwhm` must be a 1d vector')

        cube = []
        for fwhm_i in fwhm:
            cube.append(
                create_synth_psf(model, shape, amplitude, x_mean, y_mean,
                                 fwhm_i, theta, gamma, alpha, radius))
        cube = np.array(cube)
        return cube
Ejemplo n.º 21
0
def fit_2D_Airy(box,
                center=None,
                fixed_center=False,
                max_center_offset=None,
                radius=None,
                zoom_factor=1.0,
                mask=None,
                amplitude=None):
    """
    This function ...
    :param box:
    :param center:
    :param fixed_center:
    :param deviation_center:
    :param radius:
    :param x_shift:
    :param y_shift:
    :param zoom_factor:
    :param mask:
    :return:
    """

    # Get the dimensions of the box
    box_ysize = box.shape[0]
    box_xsize = box.shape[1]

    # Set the initial guess for the center of the model (the one that is specified, otherwise the center of the box)
    init_x0 = center.x if center is not None else 0.5 * (box_xsize - 1)
    init_y0 = center.y if center is not None else 0.5 * (box_ysize - 1)

    # Set the initial radius for the model (the one that is specified, otherwise one tenth of the width of the box)
    init_radius = radius if radius is not None else 0.1 * box_xsize

    # Initialize an empty dictionary to specify fixed parameters
    fixed_parameters = {}

    if fixed_center:

        fixed_parameters['x_0'] = True
        fixed_parameters['y_0'] = True

    # Initialize an empty dictionary to specify bounds
    bounds = {}

    if max_center_offset is not None:

        bounds['x_mean'] = [
            init_x0 - max_center_offset, init_x0 + max_center_offset
        ]
        bounds['y_mean'] = [
            init_y0 - max_center_offset, init_y0 + max_center_offset
        ]

    # Fit the data using astropy.modeling
    airy_init = models.AiryDisk2D(amplitude=1.,
                                  x_0=init_x0,
                                  y_0=init_y0,
                                  radius=init_radius,
                                  fixed=fixed_parameters,
                                  bounds=bounds)
    fit_model = fitting.LevMarLSQFitter()

    x_values = []
    y_values = []
    z_values = []

    for x in range(box_xsize):
        for y in range(box_ysize):

            # If no mask is specified or the pixel is not masked, add the coordinates and value to the appropriate lists
            if mask is None or not mask[y, x]:

                x_values.append(x)
                y_values.append(y)
                z_values.append(box[y, x])

    # Ignore model linearity warning from the fitter
    with warnings.catch_warnings():

        warnings.simplefilter('ignore')
        airy = fit_model(
            airy_init, x_values, y_values,
            z_values)  # What comes out is the model with the parameters set

    # Adjust the position of the model to a different coordinate frame
    if zoom_factor > 1.0:

        airy.x_0.value = airy.x_0.value / zoom_factor
        airy.y_0.value = airy.y_0.value / zoom_factor
        airy.radius /= zoom_factor

    # Return the fitted two-dimensional Airy Disk model
    return airy
Ejemplo n.º 22
0
def autocorrelation_fit(im2, rough_sep = 44.93, rough_pa=0., cutout_sz=5, background_subtracted=False,
             plot_cutout=False, plot_resids=False):
    """ Fits to the distance of the second peak in the autocorrelation (i.e. the binary separation).
        This will perform a Levenberg-Marquardt fit over a small, fixed region around the peak. 
        The model used for the fit will depend on the options set.
        
        rough_sep, rough_pa: Defaults 44.93 pixels and 0 degrees
            These define the position of the region used for the fit.
        cutout_sz: Default 5 pixels
            The radius of the box used for the fit.
        background_subtracted: Default False
            If True, the model used for the fit will be an Airy Disk + constant background.
            If False, the model will be a Gaussian + planar background.
            These seemed to work best.
    """
    
    rough_pos = np.round([rough_sep*np.sin(rough_pa),rough_sep*np.cos(rough_pa)]).astype(int)
    calc_pos = [rough_sep*np.sin(rough_pa),rough_sep*np.cos(rough_pa)]
    
    cutout = np.copy(im2[im2.shape[0]//2+rough_pos[0]-cutout_sz:im2.shape[0]//2+rough_pos[0]+cutout_sz,
                 im2.shape[1]//2+rough_pos[1]-cutout_sz:im2.shape[1]//2+rough_pos[1]+cutout_sz])

    cutout /= np.max(cutout)
    if plot_cutout:
        plt.imshow(cutout)

    x,y = np.indices(cutout.shape)
    x = x + rough_pos[0] - cutout_sz
    y = y + rough_pos[1] - cutout_sz
    
    # Fit a Gaussian
    fit = fitting.LevMarLSQFitter()
    
    if background_subtracted:
        gauss = models.AiryDisk2D(amplitude=cutout.max(),x_0=calc_pos[0],y_0=calc_pos[1],radius=3.54)        
        bckgd = models.Const2D(amplitude=0.6)
    else:
        gauss = models.Gaussian2D(amplitude = cutout.max(),x_stddev=1.36,y_stddev=1.36,
                                  x_mean=calc_pos[0],y_mean=calc_pos[1])

        bckgd = models.Planar2D(slope_x = -0.00564816,slope_y=-0.02378304,intercept=1.01)
        gauss.fixed['theta']=True
        
    cutout_model = gauss + bckgd

    # fit the data with the fitter
    fitted_model = fit(cutout_model,x,y,cutout,maxiter=100000,acc=1e-7);
    
    # Rename the parameters so the output looks the same
    if background_subtracted:
        fitted_model.x_mean_0 = fitted_model.x_0_0
        fitted_model.y_mean_0 = fitted_model.y_0_0
        
    if plot_resids:
        test = fitted_model(x,y)
        plt.figure(figsize=(12,4))
        plt.clf()
        plt.subplot(131)
        plt.imshow(cutout,origin='lowerleft',vmin=0.7,vmax=1);plt.colorbar()
        plt.subplot(132)
        plt.imshow(test,origin='lowerleft',vmin=0.7,vmax=1);plt.colorbar()
        plt.subplot(133)
        plt.imshow(cutout-test,origin='lowerleft',vmin=-0.05,vmax=0.05);plt.colorbar()
    return fitted_model
Ejemplo n.º 23
0
def larkin_transform_fit(params,transformed_im,plot=True,fit_airy=False,rough_sep_pix = 45):
    """ Function to fit to the Larkin Transform of the image.
    params = [x0, y0, flux0, width0, sep, pa, flux1, width1]
    x0, y0 = position of primary relative to the central pixel
    flux0, flux1 = flux of primary and secondary
    width0,width1 = width of model for the primary and secondary
    sep,pa = separation and position angle of the binary
    
    transformed_im = larkin transformed image
    rough_sep_pix = approximate separation of the binary. Used for the fixed windowing, not the fit.
    
    plot: If set, will display a plot of the primary, model and residuals, and secondary, model and residuals
    fit_airy: If False (default) with fit sqrt(AiryDisk) to the data. If True, will fit AiryDisk.
    
    returns:
        [primary_fit,secondary_fit] astropy models
    """
    x0,y0,flux0,width0,sep,pa,flux1,width1 = params
    
    rough_sep_pix = int(round(rough_sep_pix))
    
    im_shape = transformed_im.shape
    y,x = np.indices(im_shape)
    y -= im_shape[0]//2
    x -= im_shape[1]//2
    
    x1 = x0 + sep*np.cos(pa*np.pi/180.)
    y1 = y0 + sep*np.sin(pa*np.pi/180.)

    fit = fitting.LevMarLSQFitter()
    
    if fit_airy:
        prim = models.AiryDisk2D(amplitude=flux0,x_0=x0,y_0=y0,radius=width0)
        sec = models.AiryDisk2D(amplitude=flux1,x_0=x1,y_0=y1,radius=width1)
    else:
        # Sqrt(AiryDisk), i.e. J1(r)/r
        prim = larkin_model(amplitude=flux0,x_0=x0,y_0=y0,radius=width0,background=0.)
        sec = larkin_model(amplitude=flux1,x_0=x1,y_0=y1,radius=width1,background=0.)

    
    # Fit the primary and secondary separately
    # Cut out a region
    cutsz = 6 # was 4
    cut_prim_im = transformed_im[im_shape[0]//2-cutsz:im_shape[0]//2+cutsz,
                                im_shape[1]//2-cutsz:im_shape[1]//2+cutsz]
    x_prim = x[im_shape[0]//2-cutsz:im_shape[0]//2+cutsz,
               im_shape[1]//2-cutsz:im_shape[1]//2+cutsz]
    y_prim = y[im_shape[0]//2-cutsz:im_shape[0]//2+cutsz,
               im_shape[1]//2-cutsz:im_shape[1]//2+cutsz]
    prim_fit = fit(prim,x_prim,y_prim,cut_prim_im,maxiter=100000,acc=1e-9)
    
    cut_sec_im = transformed_im[im_shape[0]//2-cutsz:im_shape[0]//2+cutsz,
                                im_shape[1]//2-cutsz+rough_sep_pix:im_shape[1]//2+cutsz+rough_sep_pix]
    x_sec = x[im_shape[0]//2-cutsz:im_shape[0]//2+cutsz,
               im_shape[1]//2-cutsz+rough_sep_pix:im_shape[1]//2+cutsz+rough_sep_pix]
    y_sec = y[im_shape[0]//2-cutsz:im_shape[0]//2+cutsz,
               im_shape[1]//2-cutsz+rough_sep_pix:im_shape[1]//2+cutsz+rough_sep_pix]
    sec_fit = fit(sec,x_sec,y_sec,cut_sec_im,maxiter=100000,acc=1e-9)

    
    if plot:
#         plt.imshow(model(x,y))
        m1 = prim_fit(x_prim,y_prim)
        m2 = sec_fit(x_sec,y_sec)
        
        plt.subplot(231)
        plt.imshow(cut_prim_im,origin='lower')
        plt.subplot(232)
        plt.imshow(m1,origin='lower')
        plt.subplot(233)
        plt.imshow(cut_prim_im-m1,origin='lower',vmin=-0.05,vmax=0.05)
        
        plt.subplot(234)
        plt.imshow(cut_sec_im,origin='lower')
        plt.subplot(235)
        plt.imshow(m2,origin='lower')
        plt.subplot(236)
        plt.imshow(cut_sec_im-m2,origin='lower',vmin=-0.05,vmax=0.05)
        print(prim_fit.parameters)
        print(sec_fit.parameters)
    
    sep = np.sqrt((prim_fit.x_0-sec_fit.x_0)**2 + (prim_fit.y_0-sec_fit.y_0)**2)

#     return sep
    return prim_fit,sec_fit
Ejemplo n.º 24
0
 def __init__(self, radius, **kwargs):
     self._model = models.AiryDisk2D(1, 0, 0, radius)
     self._default_size = _round_up_to_odd_integer(8 * radius)
     super().__init__(**kwargs)
     self.normalize()
Ejemplo n.º 25
0
def forwardModelJointFit(files, out, wavelength, gain=3.1, size=10, burn=50, run=100,
                         spotx=2888, spoty=3514, simulated=False, truths=None):
    """
    Forward models the spot data found from the input files. Models all data simultaneously so that the Airy
    disc centroid and shift from file to file. Assumes that the spot intensity, focus, and the CCD PSF kernel
    are the same for each file. Can be used with simulated and real data.
    """
    print '\n\n\n'
    print '_'*120

    images = len(files)
    orig = []
    image = []
    noise = []
    peakvalues = []
    for file in files:
        print file
        #get data and convert to electrons
        o = pf.getdata(file)*gain

        if simulated:
            data = o
        else:
            #roughly the correct location - to avoid identifying e.g. cosmic rays
            data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy()

        #maximum position within the cutout
        y, x = m.maximum_position(data)

        #spot and the peak pixel within the spot, this is also the CCD kernel position
        spot = data[y-size:y+size+1, x-size:x+size+1].copy()
        orig.append(spot.copy())

        #bias estimate
        if simulated:
            bias = 9000.
            rn = 4.5
        else:
            bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20])
            rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20])

        print 'Readnoise (e):', rn
        if rn < 2. or rn > 6.:
            print 'NOTE: suspicious readout noise estimate...'
        print 'ADC offset (e):', bias

        #remove bias
        spot -= bias

        #set highly negative values to zero
        spot[spot + rn**2 < 0.] = 0.

        max = np.max(spot)
        print 'Maximum Value:', max
        peakvalues.append(max)

        #noise model
        variance = spot.copy() + rn**2

        #save to a list
        image.append(spot)
        noise.append(variance)

    #sensibility test, try to check if all the files in the fit are of the same dataset
    if np.std(peakvalues) > 5*np.sqrt(np.median(peakvalues)):
        #check for more than 5sigma outliers, however, this is very sensitive to the centroiding of the spot...
        print 'POTENTIAL OUTLIER, please check the input files...'
        print np.std(peakvalues), 5*np.sqrt(np.median(peakvalues))

    #MCMC based fitting
    ndim = 2*images + 5  #xpos, ypos for each image and single amplitude, radius, focus, and sigmaX and sigmaY
    nwalkers = 1000
    print 'Bayesian Fitting, model has %i dimensions' % ndim

    # Choose an initial set of positions for the walkers using the Gaussian fit
    p0 = np.zeros((nwalkers, ndim))
    for x in xrange(images):
        p0[:, 2*x] = np.random.uniform(7., 14., size=nwalkers)      # x
        p0[:, 2*x+1] = np.random.uniform(7., 14., size=nwalkers)    # y
    p0[:, -5] = np.random.uniform(max, 2.*max, size=nwalkers)       # amplitude
    p0[:, -4] = np.random.uniform(.1, 1., size=nwalkers)            # radius
    p0[:, -3] = np.random.uniform(.1, 1., size=nwalkers)            # focus
    p0[:, -2] = np.random.uniform(.1, 0.5, size=nwalkers)           # width_x
    p0[:, -1] = np.random.uniform(.1, 0.5, size=nwalkers)           # width_y

    # Initialize the sampler with the chosen specs.
    #Create the coordinates x and y
    x = np.arange(0, spot.shape[1])
    y = np.arange(0, spot.shape[0])
    #Put the coordinates in a mesh
    xx, yy = np.meshgrid(x, y)

    #Flatten the arrays
    xx = xx.flatten()
    yy = yy.flatten()

    #initiate sampler
    pool = Pool(7) #A hack Dan gave me to not have ghost processes running as with threads keyword
    sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posteriorJoint, args=[xx, yy, image, noise], pool=pool)

    # Run a burn-in and set new starting position
    print "Burning-in..."
    pos, prob, state = sampler.run_mcmc(p0, burn)
    best_pos = sampler.flatchain[sampler.flatlnprobability.argmax()]
    pos = emcee.utils.sample_ball(best_pos, best_pos/100., size=nwalkers)
    # Reset the chain to remove the burn-in samples.
    sampler.reset()

    # Starting from the final position in the burn-in chain
    print "Running MCMC..."
    pos, prob, state = sampler.run_mcmc(pos, burn)
    sampler.reset()
    pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)

    # Print out the mean acceptance fraction
    print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)

    #Get the index with the highest probability
    maxprob_index = np.argmax(prob)

    #Get the best parameters and their respective errors and print best fits
    params_fit = pos[maxprob_index]
    errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]
    print params_fit

    #unpack the fixed parameters
    amplitude, radius, focus, width_x, width_y = params_fit[-5:]
    amplitudeE, radiusE, focusE, width_xE, width_yE = errors_fit[-5:]

    #print results
    _printFWHM(width_x, width_y, width_xE, width_yE)

    #save the best models per file
    size = size*2 + 1
    gofs = []
    for index, file in enumerate(files):
        #path, file = os.path.split(file)
        id = 'test/' + out + str(index)
        #X and Y are always in pairs
        center_x = params_fit[2*index]
        center_y = params_fit[2*index+1]

        #1)Generate a model Airy disc
        airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
        adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape((size, size))

        #2)Apply Focus
        f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
        focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape((size, size))
        model = signal.convolve2d(adata, focusdata, mode='same')

        #3)Apply CCD diffusion, approximated with a Gaussian
        CCD = models.Gaussian2D(1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.)
        CCDdata = CCD.eval(xx, yy, 1., size/2.-0.5, size/2.-0.5, width_x, width_y, 0.).reshape((size, size))
        model = signal.convolve2d(model, CCDdata, mode='same')

        #save the data, model and residuals
        fileIO.writeFITS(orig[index], id+'data.fits', int=False)
        fileIO.writeFITS(image[index], id+'datafit.fits', int=False)
        fileIO.writeFITS(model, id+'model.fits', int=False)
        fileIO.writeFITS(model - image[index], id+'residual.fits', int=False)
        fileIO.writeFITS(((model - image[index])**2 / noise[index]), id+'residualSQ.fits', int=False)

        #a simple goodness of fit
        gof = (1./(np.size(image[index])*images - ndim)) * np.sum((model - image[index])**2 / noise[index])
        print 'GoF:', gof, ' Max difference', np.max(np.abs(model - image[index]))
        gofs.append(gof)

    #save results
    res = dict(wx=width_x, wy=width_y, wxerr=width_xE, wyerr=width_yE, files=files, out=out,
               wavelength=wavelength, peakvalues=np.asarray(peakvalues), CCDmodel=CCD, CCDmodeldata=CCDdata,
               GoFs=gofs)
    fileIO.cPickleDumpDictionary(res, 'test/' + out + '.pkl')

    #plot
    samples = sampler.chain.reshape((-1, ndim))
    #extents = None
    #if simulated:
    #    extents = [(0.9*truth, 1.1*truth) for truth in truths]
    #    print extents
    fig = triangle.corner(samples, labels=['x', 'y']*images + ['amplitude', 'radius', 'focus', 'width_x', 'width_y'],
                          truths=truths)#, extents=extents)
    fig.savefig('test/' + out + 'Triangle.png')

    pool.close()
Ejemplo n.º 26
0
def fit_2dairydisk(array,
                   crop=False,
                   cent=None,
                   cropsize=15,
                   fwhm=4,
                   threshold=False,
                   sigfactor=6,
                   full_output=True,
                   debug=True):
    """ Fitting a 2D Airy to the 2D distribution of the data.

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True a square sub image will be cropped equal to cropsize.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage.
        If None the center of the frame is used for cropping the subframe (the
        PSF is assumed to be ~ at the center of the frame).
    cropsize : int, optional
        Size of the subimage.
    fwhm : float, optional
        Initial values for the FWHM of the fitted 2d Airy disk, in px.
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Moffat
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
    'amplitude' : Float value. Moffat Amplitude.
    'centroid_x' : Float value. X coordinate of the centroid.
    'centroid_y' : Float value. Y coordinate of the centroid.
    'fwhm' : Float value. FHWM [px].

    """
    check_array(array, dim=2, msg='array')

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              ceny,
                                              cenx,
                                              position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    # Creating the 2d Airy disk model
    init_amplitude = np.ptp(psf_subimage)
    xcom, ycom = cen_com(psf_subimage)
    diam_1st_zero = (fwhm * 2.44) / 1.028
    airy = models.AiryDisk2D(amplitude=init_amplitude,
                             x_0=xcom,
                             y_0=ycom,
                             radius=diam_1st_zero / 2.)
    # Levenberg-Marquardt algorithm
    fitter = fitting.LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(airy, x, y, psf_subimage)

    if crop:
        mean_y = fit.y_0.value + suby
        mean_x = fit.x_0.value + subx
    else:
        mean_y = fit.y_0.value
        mean_x = fit.x_0.value

    amplitude = fit.amplitude.value
    radius = fit.radius.value
    fwhm = ((radius * 1.028) / 2.44) * 2

    # compute uncertainties
    if fitter.fit_info['param_cov'] is not None:
        perr = np.sqrt(np.diag(fitter.fit_info['param_cov']))
        amplitude_err, mean_x_err, mean_y_err, radius_err = perr
        fwhm_err = ((radius_err * 1.028) / 2.44) * 2
    else:
        amplitude_err, mean_x_err, mean_y_err = None, None, None
        radius_err, fwhm_err = None, None

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage - fit(x, y)),
                    grid=True,
                    grid_spacing=1,
                    label=label)
        print('FWHM =', fwhm)
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_0.value)
        print('centroid x subim =', fit.x_0.value, '\n')
        print('amplitude =', amplitude)
        print('radius =', radius)

    if full_output:
        return pd.DataFrame(
            {
                'centroid_y': mean_y,
                'centroid_x': mean_x,
                'fwhm': fwhm,
                'radius': radius,
                'amplitude': amplitude,
                'centroid_y_err': mean_y_err,
                'centroid_x_err': mean_x_err,
                'fwhm_err': fwhm_err,
                'radius_err': radius_err,
                'amplitude_err': amplitude_err
            },
            index=[0])
    else:
        return mean_y, mean_x
Ejemplo n.º 27
0
def find_stars_wfc3ir(img_file, threshold=4, N_passes=2):
    print("Working on image: ", img_file)
    img = fits.getdata(img_file)

    # Calculate the bacgkround and noise (iteratively)
    print("\t Calculating background")
    bkg_threshold = 3
    for nn in range(5):
        if nn == 0:
            bkg_mean = img.mean()
            bkg_std = img.std()
        else:
            bkg_mean = img[good_pix].mean()
            bkg_std = img[good_pix].std()

        bad_hi = bkg_mean + (bkg_threshold * bkg_std)
        bad_lo = 0.0

        good_pix = np.where((img < bad_hi) & (img > bad_lo))

    bkg_mean = img[good_pix].mean()
    bkg_std = img[good_pix].std()
    img_threshold = threshold * bkg_std
    print('\t Bkg = {0:.2f} +/- {1:.2f}'.format(bkg_mean, bkg_std))
    print('\t Bkg Threshold = {0:.2f}'.format(img_threshold))

    # Detect stars
    print('\t Detecting Stars')
    radius_init_guess = 1.4

    # Each pass will have an updated fwhm for the PSF.
    for nn in range(N_passes):
        print('\t Pass {0:d} assuming FWHM = {1:.1f}'.format(nn, fwhm))
        daofind = DAOStarFinder(fwhm=fwhm,
                                threshold=img_threshold,
                                exclude_border=True)
        sources = daofind(img - bkg_mean)

        # Calculate FWHM for each detected star.
        x_fwhm = np.zeros(len(sources), dtype=float)
        y_fwhm = np.zeros(len(sources), dtype=float)
        theta = np.zeros(len(sources), dtype=float)

        cutout_half_size = int(round(fwhm * 2))
        cutout_size = 2 * cutout_half_size

        cutouts = np.zeros((len(sources), cutout_size, cutout_size),
                           dtype=float)
        g2d_model = models.AiryDisk2D(1.0, cutout_half_size, cutout_half_size,
                                      radius_init_guess)
        g2d_fitter = fitting.LevMarLSQFitter()
        cut_y, cut_x = np.mgrid[:cutout_size, :cutout_size]

        for ss in range(len(sources)):
            x_lo = int(round(sources[ss]['xcentroid'] - cutout_half_size))
            x_hi = x_lo + cutout_size
            y_lo = int(round(sources[ss]['ycentroid'] - cutout_half_size))
            y_hi = y_lo + cutout_size

            cutout_tmp = img[y_lo:y_hi, x_lo:x_hi].astype(float)
            if ((cutout_tmp.shape[0] != cutout_size) |
                (cutout_tmp.shape[1] != cutout_size)):
                # Edge source... fitting is no good
                continue

            cutouts[ss] = cutout_tmp
            cutouts[ss] /= cutouts[ss].sum()

            # Fit an elliptical gaussian to the cutout image.
            g2d_params = g2d_fitter(g2d_model, cut_x, cut_y, cutouts[ss])

            x_fwhm[ss] = g2d_params.x_stddev.value / gaussian_fwhm_to_sigma
            y_fwhm[ss] = g2d_params.y_stddev.value / gaussian_fwhm_to_sigma
            theta[ss] = g2d_params.theta.value

        sources['x_fwhm'] = x_fwhm
        sources['y_fwhm'] = y_fwhm
        sources['theta'] = theta

        # Drop sources with flux (signifiance) that isn't good enough.
        # Empirically this is <1.2
        good = np.where(sources['flux'] > 1.2)[0]
        sources = sources[good]

        x_fwhm_med = np.median(sources['x_fwhm'])
        y_fwhm_med = np.median(sources['y_fwhm'])

        print('\t    Number of sources = ', len(sources))
        print('\t    Median x_fwhm = {0:.1f} +/- {1:.1f}'.format(
            x_fwhm_med, sources['x_fwhm'].std()))
        print('\t    Median y_fwhm = {0:.1f} +/- {1:.1f}'.format(
            y_fwhm_med, sources['y_fwhm'].std()))

        fwhm = np.mean([x_fwhm_med, y_fwhm_med])

        formats = {
            'xcentroid': '%8.3f',
            'ycentroid': '%8.3f',
            'sharpness': '%.2f',
            'roundness1': '%.2f',
            'roundness2': '%.2f',
            'peak': '%10.1f',
            'flux': '%10.6f',
            'mag': '%6.2f',
            'x_fwhm': '%5.2f',
            'y_fwhm': '%5.2f',
            'theta': '%6.3f'
        }

        sources.write(img_file.replace('.fits', '_stars.txt'),
                      format='ascii.fixed_width',
                      delimiter=None,
                      bookend=False,
                      formats=formats)

    return