예제 #1
0
파일: imager.py 프로젝트: AlanLoh/nenupy-tv
    def make_psf(self, npix=None, fi=None):
        """ Make the PSF regarding the UV distribution

            :param npix:
                Size in pixels of the image
            :type npix: int, optional
            :param fi:
                Frequency index
            :type fi: int, optional
        """
        from astropy.modeling.models import Gaussian2D
        from astropy.modeling.fitting import LevMarLSQFitter

        print('\tMaking PSF')
        if fi is None:
            fi = self._fi
        if npix is None:
            npix = self.npix * 2
        na = np.newaxis

        # Average over time
        uvw = np.mean(self.uvw, axis=0)
        # Flatten the array
        uvw = uvw[:, self.array.tri_y, self.array.tri_x, :]
        # Transform UVW in lambdas units, take fi frequency
        uvw = uvw[fi, ...] / wavelength(self.freq[fi, na, na])

        # Prepare l, m grid
        lm_psf = np.cos(np.radians(90 - self.fov))
        l = np.linspace(-lm_psf, lm_psf, npix)
        m = np.linspace(lm_psf, -lm_psf, npix)
        lg, mg = np.meshgrid(l, m)

        # Make the TF of UV distribution
        u = uvw[..., 0][:, na, na]
        v = uvw[..., 1][:, na, na]

        # Slow
        # expo = np.exp(
        #     2j * np.pi * (u * lg[na, ...] + v * mg[na, ...])
        # )
        # Faster
        # lg = lg[na, ...]
        # mg = mg[na, ...]
        # pi = np.pi
        # expo = ne.evaluate('exp(2j*pi*(u*lg+v*mg))')
        # psf = np.real(
        #     np.mean(
        #         expo,
        #         axis=0
        #     )
        # )
        # Mutliprocessing
        expo = mp_expo(npix, self.ncpus, lg, mg, u, v)
        psf = np.real(np.mean(expo, axis=0))
        self.psf = psf / psf.max()

        # Get clean beam
        print('\tComputing clean beam')
        # Put most of the PSF to 0 to help the fit
        simple_psf = self.psf.copy()
        simple_psf[self.psf <= np.std(self.psf)] = 0
        nsize = int(simple_psf.shape[0])
        fit_init = Gaussian2D(amplitude=1,
                              x_mean=npix / 2,
                              y_mean=npix / 2,
                              x_stddev=0.2,
                              y_stddev=0.2)
        fit_algo = LevMarLSQFitter()
        yi, xi = np.indices(simple_psf.shape)
        gaussian = fit_algo(fit_init, xi, yi, simple_psf)
        clean_beam = gaussian(xi, yi)
        clean_beam /= clean_beam.max()
        self.clean_beam = clean_beam[int(npix / 2 / 2):int(npix / 2 * 3 / 2),
                                     int(npix / 2 / 2):int(npix / 2 * 3 / 2)]
        return
예제 #2
0
def run_CARMA(time,
              y,
              ysig,
              maxp,
              nsamples,
              aic_file,
              carma_sample_file,
              psd_file,
              psd_plot,
              fit_quality_plot,
              pl_plot,
              do_mags=True):
    #to calculate the order p and q of the CARMA(p,q) process, then run CARMA for values of p and q already calculated

    #function to calculate the order p and q of the CARMA(p,q) process
    # maxp: Maximum value allowed for p, maximun value for q is by default p-1

    time = time - time[0]

    model = cm.CarmaModel(time, y, ysig)
    MAP, pqlist, AIC_list = model.choose_order(maxp, njobs=1)

    # convert lists to a numpy arrays, easier to manipulate
    #the results of the AIC test are stored for future references.
    pqarray = np.array(pqlist)
    pmodels = pqarray[:, 0]
    qmodels = pqarray[:, 1]
    AICc = np.array(AIC_list)

    np.savetxt(aic_file,
               np.transpose([pmodels, qmodels, AICc]),
               header='p  q  AICc')

    p = model.p
    q = model.q

    #running the sampler
    carma_model = cm.CarmaModel(time, y, ysig, p=p, q=q)
    carma_sample = carma_model.run_mcmc(nsamples)
    carma_sample.add_mle(MAP)

    #getting the PSD
    ax = plt.subplot(111)
    print 'Getting bounds on PSD...'
    psd_low, psd_hi, psd_mid, frequencies = carma_sample.plot_power_spectrum(
        percentile=95.0, sp=ax, doShow=False, color='SkyBlue', nsamples=5000)

    psd_mle = cm.power_spectrum(frequencies,
                                carma_sample.mle['sigma'],
                                carma_sample.mle['ar_coefs'],
                                ma_coefs=np.atleast_1d(
                                    carma_sample.mle['ma_coefs']))

    #saving the psd
    np.savetxt(psd_file,
               np.transpose([frequencies, psd_low, psd_hi, psd_mid, psd_mle]),
               header='frequencies  psd_low  psd_hi  psd_mid psd_mle')

    ax.loglog(frequencies, psd_mle, '--b', lw=2)
    dt = time[1:] - time[0:-1]
    noise_level = 2.0 * np.median(dt) * np.mean(ysig**2)
    mean_noise_level = noise_level
    median_noise_level = 2.0 * np.median(dt) * np.median(ysig**2)
    ax.loglog(frequencies,
              np.ones(frequencies.size) * noise_level,
              color='grey',
              lw=2)
    ax.loglog(frequencies,
              np.ones(frequencies.size) * median_noise_level,
              color='green',
              lw=2)

    ax.set_ylim(bottom=noise_level / 100.0)

    ax.annotate("Measurement Noise Level",
                (3.0 * ax.get_xlim()[0], noise_level / 2.5))
    ax.set_xlabel('Frequency [1 / day]')
    if do_mags:
        ax.set_ylabel('Power Spectral Density [mag$^2$ day]')
    else:
        ax.set_ylabel('Power Spectral Density [flux$^2$ day]')
    #plt.title(title)
    plt.savefig(psd_plot)
    plt.close('all')

    print 'Assessing the fit quality...'
    fig = carma_sample.assess_fit(doShow=False)
    ax_again = fig.add_subplot(2, 2, 1)
    #ax_again.set_title(title)
    if do_mags:
        ylims = ax_again.get_ylim()
        ax_again.set_ylim(ylims[1], ylims[0])
        ax_again.set_ylabel('magnitude')
    else:
        ax_again.set_ylabel('ln Flux')
    plt.savefig(fit_quality_plot)

    pfile = open(carma_sample_file, 'wb')
    cPickle.dump(carma_sample, pfile)
    pfile.close()

    params = {
        param: carma_sample.get_samples(param)
        for param in carma_sample.parameters
    }
    params['p'] = model.p
    params['q'] = model.q

    print "fitting bending power-law"
    nf = np.where(psd_mid >= median_noise_level)

    psdfreq = frequencies[nf]
    psd_low = psd_low[nf]
    psd_hi = psd_hi[nf]
    psd_mid = psd_mid[nf]

    A, v_bend, a_low, a_high, blpfit = fit_BendingPL(psdfreq, psd_mid)

    pl_init = models.BrokenPowerLaw1D(amplitude=2,
                                      x_break=0.002,
                                      alpha_1=1,
                                      alpha_2=2)
    fit = LevMarLSQFitter()
    pl = fit(pl_init, psdfreq, psd_mid)

    amplitude = pl.amplitude.value
    x_break = pl.x_break.value
    alpha_1 = pl.alpha_1.value
    alpha_2 = pl.alpha_2.value

    print amplitude, x_break, alpha_1, alpha_2

    print "BendingPL fit parameters = ", A, v_bend, a_low, a_high
    print "BrokenPL fit parameters = ", amplitude, x_break, alpha_1, alpha_2

    plt.clf()
    plt.subplot(111)
    plt.loglog(psdfreq, psd_mid, color='green')
    plt.fill_between(psdfreq, psd_low, psd_hi, facecolor='green', alpha=0.3)
    plt.plot(psdfreq, blpfit, 'r--', lw=2)
    plt.plot(psdfreq, pl(psdfreq), 'k--', lw=2)
    plt.savefig(pl_plot)
    plt.close('all')

    return (params, mean_noise_level, median_noise_level, A, v_bend, a_low,
            a_high, amplitude, x_break, alpha_1, alpha_2)
예제 #3
0
zuccaPHI=np.array([-2.1,-2.168,-2.288,-2.448,-2.708,-3.344])
zuccaPHIERR=np.array([0.06,0.036,0.002,0.036,0.052,0.08])
sfilein='/home/lrhunt/LUM_FUNC/FULLLFOUT/LF_ZUCCA_10_35.txt'
zfilein='/home/lrhunt/LUM_FUNC/ZUCCA_DATA/VMAX/LF_Vmax_B_tot_FIXA0.dat'

MBinMid,MBINAVE,LumFunc,LumFuncErr,LogErr,NGal,AveCMV,AveWeight=np.loadtxt(sfilein,unpack=True,skiprows=1)
MBINAVEz,LogLumFuncz,LogErrzup,LogErrzdown=np.loadtxt(zfilein,unpack=True)
zuccaerr=np.power(10,LogLumFuncz[0:5])*np.log(10)*LogErrzdown[0:5]

with open(sfilein,'r') as lf:
	specvals=lf.readline().strip().split()
zmax=float(specvals[1])
zmin=float(specvals[2])
mbin=float(specvals[3])
Mbinsize=float(specvals[4])
fit=LevMarLSQFitter()

LumFunc2=np.ma.array(LumFunc,mask=False)
MBINAVE2=np.ma.array(MBINAVE,mask=False)
LumFuncErr2=np.ma.array(LumFuncErr,mask=False)

LumFunc2.mask[np.where(LumFunc2==0)[0]]=True
MBINAVE2.mask[np.where(LumFunc2==0)[0]]=True
LumFuncErr2.mask[np.where(LumFunc2==0)[0]]=True

LumFunc2.mask[11:17]=True
MBINAVE2.mask[11:17]=True
LumFuncErr2.mask[11:17]=True

LUMFUNC_ZUCCA_FIT1035=fit(LCBGFIT_init,MBINAVE2.compressed(),LumFunc2.compressed(),weights=1/LumFuncErr2.compressed())
LUMFUNC_ZUCCA_FIT1035z=fit(LCBGFIT_init,MBINAVEz[0:5],np.power(10,LogLumFuncz[0:5]),weights=1/zuccaerr[0:5])
예제 #4
0
파일: oned.py 프로젝트: TESScience/zachopy
def peaks(x,
          y,
          plot=False,
          xsmooth=30,
          threshold=100,
          edgebuffer=10,
          widthguess=1,
          maskwidth=3,
          returnfiltered=False):
    '''Return the significant peaks in a 1D array.

			required:
				x, y = two 1D arrays

			optional:
				plot		# should we show a plot?
				xsmooth		# half-width for median smoothing
				threshold	# how many MADs above background for peaks?
				edgebuffer	# reject peaks with this distance of an edge
				widthguess	# about how wide will the peaks be?
				maskwidth   # peak fits use x's within (maskwidth)*(widthguess)

			If returnfiltered==True, then will return filtered arrays:
				(xPeaks, yPeaks, xfiltered, yfiltered).

			If returnfiltered==False, then only returns the peaks:
				(xPeaks, yPeaks)
	'''

    # calculate a smoothed version of the curve
    smoothed = mediansmooth(x, y, xsmooth=xsmooth)

    filtered = (y - smoothed)

    # calculate the mad of the whole thing
    mad = np.median(np.abs(filtered))

    # normalize the filtered timeseries
    filtered /= mad

    # calculate the derivatives
    derivatives = (filtered[1:] - filtered[:-1]) / (x[1:] - x[:-1])

    # estimate peaks as zero crossings
    guesses = np.zeros_like(x).astype(np.bool)
    guesses[1:-1] = (derivatives[:-1] > 0) * (derivatives[1:] <= 0)

    # make sue the peak is high enough to be interesting
    guesses *= filtered > threshold

    # make sure the peak isn't too close to an edge
    guesses *= (x > np.min(x) + edgebuffer) * (x < np.max(x) - edgebuffer)

    if plot:
        # turn on interactive plotting
        plt.ion()

        # create a figure and gridspec
        fi = plt.figure('peak finding')
        gs = plt.matplotlib.gridspec.GridSpec(2, 1, hspace=0.03)

        # create axes for two kinds of plots
        ax_raw = plt.subplot(gs[0])
        plt.setp(ax_raw.get_xticklabels(), visible=False)
        ax_filtered = plt.subplot(gs[1], sharex=ax_raw)

        # plot the input vector
        kw = dict(alpha=1, color='gray', linewidth=1)
        ax_raw.plot(x, y, **kw)
        ax_filtered.plot(x, filtered, **kw)

        # plot the threshold
        kw = dict(alpha=0.5, color='royalblue', linewidth=1)
        ax_raw.plot(x, threshold * mad + smoothed, **kw)
        ax_filtered.plot(x, threshold + np.zeros_like(x), **kw)

        # set the scale
        ax_raw.set_yscale('log')
        ax_filtered.set_yscale('log')
        ax_filtered.set_ylim(mad, np.max(filtered))

        # plot the peak guesses
        markerkw = dict(marker='o',
                        markersize=6,
                        color='none',
                        markeredgecolor='tomato',
                        alpha=0.5)
        ax_raw.plot(x[guesses], y[guesses], **markerkw)
        ax_filtered.plot(x[guesses], filtered[guesses], **markerkw)

        # create an empty plot object for showing the fits in progress
        fitplotter = ax_filtered.plot([], [],
                                      alpha=0.5,
                                      color='red',
                                      linewidth=1)[0]

        plt.draw()
        a = raw_input("how 'bout them peaks?")

    # create empty lists of peaks
    xPeaks, yPeaks = [], []

    # create a fitter object
    fitter = LevMarLSQFitter()
    for g in np.nonzero(guesses)[0]:

        # initialize an approximate Gaussian
        gauss = Gaussian1D(mean=x[g], amplitude=filtered[g], stddev=widthguess)

        # which points are relevant to this fit?
        mask = np.abs(x - x[g]) <= maskwidth * widthguess

        # use LM to fit the peak position and width
        fit = fitter(gauss, x[mask], filtered[mask])

        # store the peak values
        distancemoved = np.abs((fit.mean.value - x[g]) / fit.stddev.value)
        if distancemoved <= 3.0:
            xPeaks.append(fit.mean.value)
            yPeaks.append(fit.amplitude.value)

            if plot:

                # update the Gaussian's parameters, and plot it
                gauss.parameters = fit.parameters
                xfine = np.linspace(*minmax(x[mask]), num=50)
                fitplotter.set_data(xfine, gauss(xfine))

                # plot the fitted peak
                markerkw['color'] = markerkw['markeredgecolor']
                markerkw['alpha'] = 1
                ax_filtered.plot(xPeaks[-1], yPeaks[-1], **markerkw)

                # set the xlimits
                #ax_filtered.set_xlim(*minmax(x[mask]))

                plt.draw()
                a = raw_input('  and this one in particular?')

    if returnfiltered:
        return np.array(xPeaks), np.array(yPeaks), x, filtered
    else:
        return np.array(xPeaks), np.array(yPeaks)
    '''a = raw_input('?')
    mod_x = np.linspace(0.0, 65000.0, num=100)
    ax[3].plot(mod_x, mod(mod_x) / lincormod(mod_x), label="full/linear")

    ints = range(nints)

    # apply the correction

    line_init = (Exponential1D(
        x_0=-2.0,
        amplitude=-500.,
        bounds={
            "amplitude": [-100000.0, -100.0],
            "x_0": [-4.0, 0.0]
        },
    ) + Linear1D())
    fit_line = LevMarLSQFitter()
    mult_comp = True

    line_init = Linear1D()
    fit_line = LinearLSQFitter()
    mult_comp = False

    intslopes = np.zeros((nints))
    linfit_metric = np.zeros((nints))
    intexpamp = np.zeros((nints))
    for k in range(nints):
        gnum, ydata = get_ramp(hdu[0],
                               pix_x,
                               pix_y,
                               k,
                               rampoffval=rampoffvals[0])
예제 #6
0
파일: core.py 프로젝트: ysBach/photutils
def centroid_1dg(data, error=None, mask=None):
    """
    Calculate the centroid of a 2D array by fitting 1D Gaussians to the
    marginal ``x`` and ``y`` distributions of the array.

    Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
    arrays are automatically masked.  The mask for invalid values
    represents the combination of the invalid-value masks for the
    ``data`` and ``error`` arrays.

    Parameters
    ----------
    data : array_like
        The 2D data array.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    Returns
    -------
    centroid : `~numpy.ndarray`
        The ``x, y`` coordinates of the centroid.
    """

    data = np.ma.asanyarray(data)

    if mask is not None and mask is not np.ma.nomask:
        mask = np.asanyarray(mask)
        if data.shape != mask.shape:
            raise ValueError('data and mask must have the same shape.')
        data.mask |= mask

    if np.any(~np.isfinite(data)):
        data = np.ma.masked_invalid(data)
        warnings.warn(
            'Input data contains input values (e.g. NaNs or infs), '
            'which were automatically masked.', AstropyUserWarning)

    if error is not None:
        error = np.ma.masked_invalid(error)
        if data.shape != error.shape:
            raise ValueError('data and error must have the same shape.')
        data.mask |= error.mask

        error.mask = data.mask
        xy_error = [np.sqrt(np.ma.sum(error**2, axis=i)) for i in [0, 1]]
        xy_weights = [(1.0 / xy_error[i].clip(min=1.e-30)) for i in [0, 1]]
    else:
        xy_weights = [np.ones(data.shape[i]) for i in [1, 0]]

    # assign zero weight where an entire row or column is masked
    if np.any(data.mask):
        bad_idx = [np.all(data.mask, axis=i) for i in [0, 1]]
        for i in [0, 1]:
            xy_weights[i][bad_idx[i]] = 0.

    xy_data = [np.ma.sum(data, axis=i).data for i in [0, 1]]

    constant_init = np.ma.min(data)
    centroid = []
    for (data_i, weights_i) in zip(xy_data, xy_weights):
        params_init = gaussian1d_moments(data_i)
        g_init = Const1D(constant_init) + Gaussian1D(*params_init)
        fitter = LevMarLSQFitter()
        x = np.arange(data_i.size)
        g_fit = fitter(g_init, x, data_i, weights=weights_i)
        centroid.append(g_fit.mean_1.value)

    return np.array(centroid)
예제 #7
0
파일: fit_2d.py 프로젝트: bpairet/VIP
def fit_2dgaussian(array,
                   crop=False,
                   cent=None,
                   cropsize=15,
                   fwhmx=4,
                   fwhmy=4,
                   theta=0,
                   threshold=False,
                   sigfactor=6,
                   full_output=False,
                   debug=False):
    """ Fitting a 2D Gaussian to the 2D distribution of the data with photutils.
    
    Parameters
    ----------
    array : array_like
        Input frame with a single PSF.
    crop : {False, True}, optional
        If True an square sub image will be cropped.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage. 
        If None the center of the frame is used for cropping the subframe (the 
        PSF is assumed to be ~ at the center of the frame). 
    cropsize : int, optional
        Size of the subimage.
    fwhmx, fwhmy : float, optional
        Initial values for the standard deviation of the fitted Gaussian, in px.
    theta : float, optional
        Angle of inclination of the 2d Gaussian counting from the positive X
        axis.
    threshold : {False, True}, optional
        If True the background pixels will be replaced by small random Gaussian 
        noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Gaussian
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian 
        noise. 
    full_output : {False, True}, optional
        If False it returns just the centroid, if True also returns the 
        FWHM in X and Y (in pixels), the amplitude and the rotation angle.
    debug : {True, False}, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.
        
    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting. 
    mean_x : float
        Source centroid x position on input array from fitting.
        
    If *full_output* is True it returns:
    mean_y, mean_x : floats
        Centroid. 
    fwhm_y : float
        FHWM in Y in pixels. 
    fwhm_x : float
        FHWM in X in pixels.
    amplitude : float
        Amplitude of the Gaussian.
    theta : float
        Rotation angle.
    
    """
    if not array.ndim == 2:
        raise TypeError('Input array is not a frame or 2d array')

    # If frame size is even we drop last row and last column
    if array.shape[0] % 2 == 0:
        array = array[:-1, :].copy()
    if array.shape[1] % 2 == 0:
        array = array[:, :-1].copy()

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              ceny,
                                              cenx,
                                              position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * 50
        psf_subimage[indi] = subimnoise[indi]

    yme, xme = np.where(psf_subimage == psf_subimage.max())
    # Creating the 2D Gaussian model
    gauss = models.Gaussian2D(amplitude=psf_subimage.max(),
                              x_mean=xme,
                              y_mean=yme,
                              x_stddev=fwhmx * gaussian_fwhm_to_sigma,
                              y_stddev=fwhmy * gaussian_fwhm_to_sigma,
                              theta=theta)
    # Levenberg-Marquardt algorithm
    fitter = LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(gauss, x, y, psf_subimage, maxiter=1000, acc=1e-08)

    if crop:
        mean_y = fit.y_mean.value + suby
        mean_x = fit.x_mean.value + subx
    else:
        mean_y = fit.y_mean.value
        mean_x = fit.x_mean.value
    fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
    fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
    amplitude = fit.amplitude.value
    theta = fit.theta.value

    if debug:
        if threshold: msg = 'Subimage thresholded / Model / Residuals'
        else: msg = 'Subimage (no threshold) / Model / Residuals'
        pp_subplots(psf_subimage,
                    fit(x, y),
                    psf_subimage - fit(x, y),
                    colorb=True,
                    grid=True,
                    title=msg)
        print 'FWHM_y =', fwhm_y
        print 'FWHM_x =', fwhm_x
        print
        print 'centroid y =', mean_y
        print 'centroid x =', mean_x
        print 'centroid y subim =', fit.y_mean.value
        print 'centroid x subim =', fit.x_mean.value
        print
        print 'peak =', amplitude
        print 'theta =', theta

    if full_output:
        return mean_y, mean_x, fwhm_y, fwhm_x, amplitude, theta
    else:
        return mean_y, mean_x
예제 #8
0
    def compute(self, psi, ford, fext, ford_err=None, fext_err=None,
                logger=None):
        """Compute the polarimetry.

        Parameters
        ----------
        psi : array_like
            Retarder positions in degrees
        ford, fext : array_like
            Fluxes of ordinary (ford) and extraordinary (fext) beams.
        ford_err, fext_err : array_like
            Statistical errors of ordinary and extraordinary fluxes.
        logger : `logging.Logger`
            Python logger of the function.

        Notes
        -----
        * `psi`, `ford` and `fext` must match the dimensions.

        * If each data have just one dimension, it will be considered
          a single star.

        * If each data have two dimensions, it will be considered multiple
          stars, where each line representes one star.
        """
        logger = logger or self.logger

        self._filter_neg(ford, fext)  # inplace
        z, z_err = self.calc_z(psi, ford, fext, ford_err, fext_err)

        n_stars = len(z)
        logger.info(f'Computing polarimetry for {n_stars} stars.')

        # Variables to store the results
        res = Table()
        res['z'] = z
        res['z_err'] = z_err
        for i in ('q', 'u'):
            res[i] = np.zeros(n_stars, dtype='f8')
            res[i+"_err"] = np.zeros(n_stars, dtype='f8')
            res[i].fill(np.nan)  # fill with nan to be safer
            res[i+"_err"].fill(np.nan)
        if self.retarder != 'half':
            res['v'] = np.zeros(n_stars, dtype='f8')
            res['v_err'] = np.zeros(n_stars, dtype='f8')
            res['v'].fill(np.nan)
            res['v_err'].fill(np.nan)

        for i in range(n_stars):
            fitter = LevMarLSQFitter()
            model = self._model()
            if z_err is not None:
                m_fit = fitter(model, psi[i], z[i], weights=1/z_err[i])
            else:
                m_fit = fitter(model, psi[i], z[i])
            info = fitter.fit_info
            for n, v, err in zip(m_fit.param_names, m_fit.parameters,
                                 np.sqrt(np.diag(info['param_cov']))):
                res[n][i] = v
                res[n+"_err"] = err

        res['p'] = np.hipot(res['q'], res['u'])
        res['p_err'] = np.sqrt(((res['q']/res['p'])**2)*(res['q_err']**2) +
                               ((res['u']/res['p'])**2)*(res['u_err']**2))
        res['theta'] = compute_theta(res['q'], res['u'])
        res['theta_err'] = 28.65*res['p_err']/res['p']

        return res
예제 #9
0
def gauss_fit_2p(velo, spec, toWrite=False, source='', size=0, paras=None):
    @custom_model
    def gaussian_2peak(x,
                       amplitude1=1.,
                       mean1=-1.,
                       sigma1=1.,
                       amplitude2=1.,
                       mean2=1.,
                       sigma2=1.):
        return (amplitude1 * np.exp(-0.5 * ((x - mean1) / sigma1)**2) +
                amplitude2 * np.exp(-0.5 * ((x - mean2) / sigma2)**2))

    plt.gcf()
    if paras == None:
        plt.clf()
        paras = [0.1, 40., 10., 0.1, 100., 10.]
        plt.plot(velo, spec)
        plt.xlim(-200, 200)
        plt.title(source + ' - Radius {:3.0f}"'.format(size), size='x-large')
        plt.xlabel('V$_{LSR}$ (km/s)', size='x-large')
        plt.ylabel('Jy/beam', size='x-large')
    else:
        plt.gcf()

    r_s2f = 2.35482  # sigma to fwhm: FWHM = 2.355*sigma
    gauss2 = gaussian_2peak(amplitude1=paras[0],
                            mean1=paras[1],
                            sigma1=paras[2] / r_s2f,
                            amplitude2=paras[3],
                            mean2=paras[4],
                            sigma2=paras[5] / r_s2f)
    #    print(gauss2.param_names)
    gauss2.amplitude1.min = 0.
    gauss2.amplitude2.min = 0.
    gauss2.mean1.bounds = [0., 150.]
    gauss2.mean2.bounds = [0., 150.]
    gauss2.sigma1.bounds = [5. / r_s2f, 40. / r_s2f]
    gauss2.sigma2.bounds = [5. / r_s2f, 40. / r_s2f]
    fit = LevMarLSQFitter()
    sp_fit = fit(gauss2, velo, spec, maxiter=50000)
    print('fitting error: ', fit.fit_info['ierr'])
    print(fit.fit_info['message'])
    fpeak1 = sp_fit.amplitude1.value
    vlsr1 = sp_fit.mean1.value
    fwhm1 = sp_fit.sigma1.value * r_s2f
    fpeak2 = sp_fit.amplitude2.value
    vlsr2 = sp_fit.mean2.value
    fwhm2 = sp_fit.sigma2.value * r_s2f
    try:
        if fit.fit_info['param_cov'] is not None:
            para_err = np.sqrt(np.diag(fit.fit_info['param_cov']))
        else:
            para_err = np.zeros(len(paras))
    except:
        para_err = np.zeros(len(paras))

    print('Flux1: {:6.4f}; Vlsr1: {:5.1f}; FWHM1: {:4.1f}'.format(
        fpeak1, vlsr1, fwhm1))
    print('Flux2: {:6.4f}; Vlsr2: {:5.1f}; FWHM2: {:4.1f}'.format(
        fpeak2, vlsr2, fwhm2))
    print(para_err)

    for old_line in plt.gca().lines + plt.gca().collections:
        print('oldline')
        old_line.remove()
    plt.plot(velo, spec, color='C0')
    plt.plot(velo, sp_fit(velo), color='C3')
    if toWrite:
        plt.savefig(source + '.png', dpi=300, bbox_inches='tight')
    plt.show()

    return fpeak1, vlsr1, fwhm1, fpeak2, vlsr2, fwhm2, para_err[0], para_err[
        1], para_err[2] * r_s2f, para_err[3], para_err[4], para_err[5] * r_s2f
예제 #10
0
def fit_pah6_2(spectrum=None, lmin=5.2, lmax=6.7, poly_deg=3,
               include_pah5_27=True, include_pah5_70=True, include_pah6_70=True,
               include_h2s6=True, include_h2s7=True, include_feII=True, model_only=False):
	"""
	Fit the 6.2 micron PAH complex with the combined model of three Drude profiles
	and a cubic polynomial for the continuum
	"""
	
	# Cubic polynomial to model the continuum
	cont_mod = Polynomial1D(degree=poly_deg, name='Continuum')
	
	# One Drude profile to model the 6.22 micron PAH feature
	pah6_22 = Drude(x_0=6.22, fwhm=0.187, amplitude=0.1, fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 6.22')
	
	# Gaussian lines to model the [FeII] and H2 S(5) and S(6) emission
	# Drude profiles to model the 5.27, 5.70, and 6.7 PAH features
	feII = GaussianLine(x_0=5.34, fwhm=0.053, amplitude=0.1,
	                  fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='[FeII]')
	feII.amplitude.min = 0

	h2s7 = GaussianLine(x_0=5.511, fwhm=0.053, amplitude=0.1,
	                  fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='H2 S(7)')
	h2s7.amplitude.min = 0
	h2s6 = GaussianLine(x_0=6.109, fwhm=0.053, amplitude=0.1,
	                  fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='H2 S(6)')
	h2s6.amplitude.min = 0
	pah5_27 = Drude(x_0=5.27, fwhm=0.179, amplitude=0.1, fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 5.27')
	pah5_70 = Drude(x_0=5.70, fwhm=0.416, amplitude=0.1, fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 5.70')
	pah6_70 = Drude(x_0=6.69, fwhm=0.468, amplitude=0.1, fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 6.70')


	# Full model
	model = cont_mod + pah6_22
	if include_h2s6:
 		model += h2s6
 	if include_h2s7:
 		model += h2s7
 	if include_pah5_27:
 		model += pah5_27
 	if include_pah5_70:
 		model += pah5_70
	if include_pah6_70:
 		model += pah6_70
	if include_feII:
		model += feII
	
	if not model_only:
		lmFitter = LevMarLSQFitter()
		bayesFitter = SpectraBayesFitter(threads=1, nsteps=5000, nburn=1000)
	 
		lam = spectrum.waves_rest.value
		flux = spectrum.flux.value
		err = spectrum.error.value
		ind = (lam >= lmin) & (lam <= lmax)
		x = lam[ind]
		y = flux[ind]
		e = err[ind]
		w = 1/e
	
		init_mod = lmFitter(model, x, y, maxiter=2000, weights=w)
		result = bayesFitter.fit(init_mod, x, y, yerr=e)
	else:
		result = model

	return result
예제 #11
0
def plot_fit_ice():
    fs = 16
    plt.rc("xtick", direction="in", labelsize=fs * 0.8)
    plt.rc("ytick", direction="in", labelsize=fs * 0.8)

    fig, ax = plt.subplots(2, 1, figsize=(9, 6), sharex=True)

    # read the spectrum for the H2O ice
    file = "H2O_NASA.dat"
    table = pd.read_table(file, comment="#", sep="\s+")
    table = table[2531:]
    waves = 1 / table["Freq."] * 1e4
    norm = np.max(-table["%T,10K"] + 100)
    absorbs = (-table["%T,10K"] + 100) / norm

    # plot the spectrum
    ax[0].plot(waves, absorbs, color="k", label=r"H$_2$O ice")
    ax[1].plot(waves,
               absorbs,
               color="k",
               lw=1,
               ls="--",
               alpha=0.5,
               label=r"H$_2$O ice")

    # fit the spectrum
    drude = Drude1D(x_0=3.03, fixed={"amplitude": True})
    gauss = Gaussian1D(mean=3.03, stddev=0.13, fixed={"amplitude": True})
    lorentz = Lorentz1D(x_0=3.03, fixed={"amplitude": True})

    fit = LevMarLSQFitter()
    fit_result1 = fit(drude, waves, absorbs)
    fit_result2 = fit(gauss, waves, absorbs, maxiter=1000)
    fit_result3 = fit(lorentz, waves, absorbs, maxiter=10000)

    # calculate the residuals
    res1 = absorbs - fit_result1(waves)
    res2 = absorbs - fit_result2(waves)
    res3 = absorbs - fit_result3(waves)

    print("D", np.sum(res1**2))
    print("G", np.sum(res2**2))
    print("L", np.sum(res3**2))

    # plot the fits
    ax[0].plot(
        waves,
        fit_result1(waves),
        ls="--",
        label="Drude",
    )
    ax[0].plot(
        waves,
        fit_result2(waves),
        ls=":",
        label="Gaussian",
    )
    ax[0].plot(
        waves,
        fit_result3(waves),
        ls="-.",
        label="Lorentz",
    )
    ax[0].legend()

    # read the spectrum for the mixed ice
    file = "mix_NASA.dat"
    table = pd.read_table(file, comment="#", sep="\s+")
    print(np.max(table["%T,10K"]))

    table = table[2531:]
    waves = 1 / table["Freq."] * 1e4
    norm = np.max(-table["%T,10K"] + 95)
    absorbs = (-table["%T,10K"] + 95) / norm

    # plot the spectrum
    plt.plot(waves, absorbs, color="k", label=r"mixed ice")

    # fit the spectrum
    drude = Drude1D(x_0=3.03, fixed={"amplitude": True})
    gauss = Gaussian1D(mean=3.03, stddev=0.13, fixed={"amplitude": True})
    lorentz = Lorentz1D(x_0=3.03, fixed={"amplitude": True})

    fit = LevMarLSQFitter()
    fit_result1 = fit(drude, waves, absorbs, maxiter=1000)
    fit_result2 = fit(gauss, waves, absorbs, maxiter=10000)
    fit_result3 = fit(lorentz, waves, absorbs, maxiter=10000)

    # calculate the residuals
    res1 = absorbs - fit_result1(waves)
    res2 = absorbs - fit_result2(waves)
    res3 = absorbs - fit_result3(waves)

    print("D", np.sum(res1**2))
    print("G", np.sum(res2**2))
    print("L", np.sum(res3**2))

    print(fit_result1, fit_result2, fit_result3)

    plt.plot(
        waves,
        fit_result1(waves),
        ls="--",
        label="Drude",
    )
    plt.plot(
        waves,
        fit_result2(waves),
        ls=":",
        label="Gaussian",
    )
    plt.plot(
        waves,
        fit_result3(waves),
        ls="-.",
        label="Lorentz",
    )
    ax[1].legend()

    plt.xlim(2.5, 4)
    plt.xlabel(r"$\lambda$ [$\mu m$]", fontsize=fs)
    fig.text(
        0.06,
        0.5,
        "Normalized absorbance",
        rotation="vertical",
        va="center",
        ha="center",
        fontsize=fs,
    )
    plt.subplots_adjust(hspace=0)
    fig.savefig("/Users/mdecleir/spex_nir_extinction/Figures/lab_ice.pdf",
                bbox_inches="tight")
예제 #12
0
def fit_pah7_7(spectrum=None, lmin=6.5, lmax=9.5, poly_deg=3,
               include_arII=True, include_h2s5=True,
               include_h2s4=True, include_pah8_6=True,
               include_neVI=True, include_pah8_3=True, 
               model_only=False):
	"""
	Fit the 7.7 micron PAH complex with the combined model of three Drude profiles
	and a cubic polynomial for the continuum
	"""
	
	# Cubic polynomial to model the continuum
	#cont_mod = Continuum(c0=0.01, c1=0.01, c2=0.01, c3=0.01, name='Continuum')
	cont_mod = Polynomial1D(degree=poly_deg, name='Continuum')

	# Two Drude profiles to model the 11.3 micron PAH complex
	pah7_42 = Drude(x_0=7.42, fwhm=0.935, amplitude=0.1, fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 7.42')
	pah7_60 = Drude(x_0=7.60, fwhm=0.334, amplitude=0.1, fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 7.60')
	pah7_85 = Drude(x_0=7.85, fwhm=0.416, amplitude=0.1, fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 7.85')
	
	# Gaussian lines to model the [SIV] and H2 S(2) emission
	arII = GaussianLine(x_0=6.985, fwhm=0.053, amplitude=0.1,
	                  fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='[ArII]')
	h2s5 = GaussianLine(x_0=6.909, fwhm=0.053, amplitude=0.1,
	                  fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='H2 S(5)')
	h2s4 = GaussianLine(x_0=8.026, fwhm=0.1, amplitude=0.1,
	                  fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='H2 S(4)')
	neVI = GaussianLine(x_0=7.652, fwhm=0.053, amplitude=0.3,
	                  fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='[NeVI]')
	pah8_6 = Drude(x_0=8.61, fwhm=0.336, amplitude=0.1,
	              fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 8.60')
	pah8_3 = Drude(x_0=8.33, fwhm=0.417, amplitude=0.1,
	              fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 8.33')
	#pah6_22 = Drude(x_0=6.22, fwhm=0.187, amplitude=0.1, fixed={'x_0':True, 'fwhm':True, 'amplitude':False}, name='PAH 6.22')
	# Full model
	model = cont_mod + pah7_42 + pah7_60 + pah7_85
 	if include_arII:
 		model += arII
 	if include_h2s5:
 		model += h2s5
	if include_h2s4:
 		model += h2s4
 	if include_pah8_6:
 		model += pah8_6
	if include_neVI:
		model += neVI
	if include_pah8_3:
		model += pah8_3
	#if include_pah6_22:
	#	model += pah6_22

	if not model_only:
		lmFitter = LevMarLSQFitter()
		bayesFitter = SpectraBayesFitter(threads=1, nsteps=500, nburn=100)
	 
		lam = spectrum.waves_rest.value
		flux = spectrum.flux.value
		err = spectrum.error.value
		ind = (lam >= lmin) & (lam <= lmax)
		x = lam[ind]
		y = flux[ind]
		e = err[ind]
		w = 1/e
	
		init_mod = lmFitter(model, x, y, maxiter=2000, weights=w)
		result = bayesFitter.fit(init_mod, x, y, yerr=e)
		#result = init_mod
	else:
		result = model

	return result
예제 #13
0
def std1dspec(infile, startz=2000, nsigma=5, overwrite=False):
    print('\n#############################')
    print('Making 1D spectrum')
                              
    hdl = fits.open(infile)
    hdr = hdl[0].header
    
    scidata = hdl[0].data
    binfac1 = hdr['BIN-FCT1']

    # Showing the image 
    aspect = 0.43/(0.104*binfac1)
    fig=plt.figure()
    plt.title('Click on the star. ')
    plt.imshow(scidata[startz,:,:], aspect=aspect, \
               interpolation='nearest', origin='lower')

    global xc,yc
    xc = 0.0
    yc = 0.0

    def star_center(event):
        global xc,yc
        xc= event.xdata
        yc = event.ydata
        plt.close()
        return

    
    cid = fig.canvas.mpl_connect('button_press_event', star_center)
    print('\n\t Click near the star center.')
    plt.show()

    print('\t Initial star location: (%.2f, %.2f)'%(xc,yc))
    initc = np.array((xc,yc))
    cutdata, initp = cutout(scidata[startz,:,:], initc ,w=10)
    g_init = Gaussian2D(amplitude=np.max(cutdata),
                         x_mean=initc[0]-initp[0],
                         y_mean=initc[1]-initp[1],
                         x_stddev=2.0,
                         y_stddev=1.0,
                         theta=3.1416/2)
    g_init.theta.fixed = True
    fitter = LevMarLSQFitter()
    y, x = np.indices(cutdata.shape)
    gfit = fitter(g_init, x, y, cutdata)
    print('\t Initial 2D Gaussian fitting result:')
    print(gfit)
    position0 = np.array([gfit.x_mean.value, gfit.y_mean.value])
    position0 = position0 + initp
    position = position0

    a = gfit.x_stddev.value * nsigma
    b = gfit.y_stddev.value * nsigma
    theta = gfit.theta.value

    plt.imshow(scidata[startz,:,:], aspect=aspect, \
               interpolation='nearest', origin='lower')
    apertures = EllipticalAperture(position, a=a ,b=b,theta=theta)
    apertures.plot()
    print('\n\t Check the aperture, and close the plot window.')
    plt.title('Check the aperture')
    plt.show()

    global coords, ii, std1ddata, lam

    std1ddata = np.zeros(scidata.shape[0], dtype=np.float32)
    positions = np.zeros((scidata.shape[0], 2), dtype=np.float)

    # Aperture photometry with incleasing wavelength pix from startz 
    for i in range(startz,scidata.shape[0]):
        cutdata, initp = cutout(scidata[i,:,:],position)
        if np.min(cutdata) == np.max(cutdata):
            print('\t Cutdata is empty at '+str(i)+' pix.')
            break
        position_pre = position
        position = centroid_com(cutdata)
        position = position + initp
        if np.linalg.norm(position-position_pre) > 2.:
            print('\t Cetroid is not good at '+str(i)+' pix.')
            break
        positions[i,:] = position
        #apertures = EllipticalAperture(position, a=a ,b=b,theta=theta)
        #phot_table = aperture_photometry(scidata[i,:,:], apertures)   
        #std1ddata[i] = phot_table['aperture_sum'].data[0]

    # Aperture photometry with decreasing wavelength pix from startz 
    position = position0
    for i in range(startz-1,0,-1):
        cutdata, initp = cutout(scidata[i,:,:],position)
        if np.min(cutdata) == np.max(cutdata):
            print('\t Cutdata is empty! at ' + str(i) + ' pix.')
            break
        position_pre = position
        position = centroid_com(cutdata)
        position = position + initp
        if np.linalg.norm(position-position_pre) > 2.:
            print('\t Cetroid is not good at ' + str(i) + ' pix.')
            break
        positions[i,:] = position
        #apertures = EllipticalAperture(position, a=a ,b=b,theta=theta)
        #phot_table = aperture_photometry(scidata[i,:,:], apertures)   
        #std1ddata[i] = phot_table['aperture_sum'].data[0]


    # Plotting the 1D data & selecting the spectral range.
    crpix = hdr['CRPIX3']
    crval = hdr['CRVAL3']
    #cdelt = hdr['CDELT3']
    cdelt = hdr['CD3_3']
    object_name = hdr['OBJECT']

    npix = len(std1ddata)
    start = crval - (crpix-1)*cdelt
    stop = crval + (npix - crpix + 0.5)*cdelt
    lam = np.arange(start ,stop, cdelt)
    
    plt.plot(lam,positions[:,0])
    plt.title(object_name)
    plt.xlabel('Lambda (Angstrom)')
    plt.ylabel('X (pix)')
    plt.grid()
    plt.show()

    plt.plot(lam,positions[:,1])
    plt.title(object_name)
    plt.xlabel('Lambda (Angstrom)')
    plt.ylabel('Y (pix)')
    plt.grid()
    plt.show()
    
    return
예제 #14
0
def model_Keplerian(self,
                    threshold,
                    source_distance,
                    fit_method=LevMarLSQFitter(),
                    flag_singularity=True,
                    flag_radius=None,
                    flag_intervals=None,
                    velocity_interval=None,
                    channel_interval=None,
                    weak_quadrants=False,
                    return_stddevs=True,
                    plot=False,
                    write_table_to=None,
                    debug=False):
    """Model a keplerian profile to PVdata.

    Args:
        self (PVdata):
            PVdata object to compute the data from.
        threshold (int/ float):
            Set as multiples of PVdata.noise (for instance 3sigma)
        source_distance (any):
            Distance to the source, which is necessary for computing physical distances.
        fit_method (any, optional):
            Method to fit the model to the data.
        flag_singularity (bool, optional):
            Flag the zero position data points, to avoid running in trouble there during fitting.
        flag_radius (astropy.units.Quantity, optional):
            If given, then all data points within this given radius from the position_reference are flagged.
        flag_intervals (list of tupels of astropy.units.Quantity, optional):
            Similar to flag_radius, but arbitrary intervals may be flagged. Each interval is
            given as a tuple of two radial distances from the position_reference.
        velocity_interval (any, optional):
            Velocity interval to restrict the fitting to.
        channel_interval (any, optional):
            Channel interval to restrict the fitting to.
        weak_quadrants (bool, optional):
            Fit the model to the signal in the weaker opposing quadrants.
        return_stddevs (boolean, optional):
            The fit method LevMarLSQFitter is able to return the standard deviation of the fit parameters. Default is
            True.
        plot (boolean, optional):
            If True, the fit will be displayed as a matplotlib pyplot.
        write_table_to (str, optional):
            Name of a file to write the data points to, formatted as a table.
        debug (bool, optional):
            Stream debugging information to the terminal.

    Returns:
        best_fit (astropy.modelling.models.custom_model):
            Best fitting model.
        stddevs (numpy.array):
            Only if return_stddevs is True. The array entries correspond to the best_fit instance parameters in the
            same order.
        chi2 (float):
            chi-squared residual of the fit to the unflagged data.
    """

    # Compute the velocity table
    if velocity_interval is not None:
        channel_interval = self._velocity_to_channel(velocity_interval)
        indices = {
            'min': channel_interval[0],
            'max': channel_interval[1],
            'central': int((channel_interval[1] + channel_interval[0]) / 2)
        }
        table = self.estimate_extreme_velocities(
            threshold=threshold,
            source_distance=source_distance,
            plot=False,
            weak_quadrants=weak_quadrants,
            velocity_interval=velocity_interval,
            writeto=write_table_to,
            debug=debug)
    elif channel_interval is not None:
        indices = {
            'min': channel_interval[0],
            'max': channel_interval[1],
            'central': int((channel_interval[1] + channel_interval[0]) / 2)
        }
        table = self.estimate_extreme_velocities(
            threshold=threshold,
            source_distance=source_distance,
            plot=False,
            weak_quadrants=weak_quadrants,
            channel_interval=channel_interval,
            writeto=write_table_to,
            debug=debug)
    else:
        indices = None
        table = self.estimate_extreme_velocities(
            threshold=threshold,
            source_distance=source_distance,
            weak_quadrants=weak_quadrants,
            writeto=write_table_to,
            debug=debug)

    # Extract xy data from table
    xdata = table['Distance'].to('au').value
    ydata = table['Velocity'].to('km/ s').value
    xdata = np.ma.masked_array(xdata, np.zeros(xdata.shape, dtype=bool))
    ydata = np.ma.masked_array(ydata, np.zeros(ydata.shape, dtype=bool))

    # Apply flagging masks
    if flag_singularity:
        print('Flagging the singularity:')
        i = np.where(np.abs(table['Distance'].value) < 1e-6)[0]
        xdata.mask[i] = True
        ydata.mask[i] = True
        print(f">> Flagged the elements {i}.")
    if flag_radius is not None:
        print('Flagging towards a radial distance of {}:'.format(flag_radius))
        flag_radius = flag_radius.to('au').value
        i = np.where(np.abs(table['Distance'].value) < flag_radius)[0]
        xdata.mask[i] = True
        ydata.mask[i] = True
        print(f">> Flagged the elements {i}.")
    if flag_intervals is not None:
        print('Flagging intervals:')
        flagged = np.empty(shape=(0, ), dtype=int)
        for interval in flag_intervals:
            i1 = np.where(interval[0].value < table['Distance'].value)[0]
            i2 = np.where(table['Distance'].value < interval[1].value)[0]
            i = np.intersect1d(i1, i2)
            xdata.mask[i] = True
            ydata.mask[i] = True
            flagged = np.append(flagged, i)
        if len(np.unique(flagged)) < 10:
            print('>> Flagged the elements {}.'.format(np.unique(flagged)))
        else:
            print('>> Flagged {} elements.'.format(len(np.unique(flagged))))

    # choose and initialize the fit model
    if self.start_low(indices=indices, weak_quadrants=weak_quadrants):
        model = Keplerian1D(mass=10.,
                            v0=self.vLSR.value,
                            r0=0,
                            bounds={'mass': (0.0, None)})
    else:
        model = Keplerian1D_neg(mass=10.,
                                v0=self.vLSR.value,
                                r0=0,
                                bounds={'mass': (0.0, None)})

    # Fit the chosen model to the data
    best_fit = fit_method(model, xdata.compressed(), ydata.compressed())

    # Estimate chi2
    chi2 = np.sum(np.square(best_fit(xdata) - ydata))

    # Plot
    if plot:
        plt.plot(table['Distance'], table['Velocity'], 'o', label='data')
        plt.xlabel('Position offest ({})'.format(table['Distance'].unit))
        plt.ylabel('Velocity ({})'.format(table['Velocity'].unit))
        plt.axhline(self.vLSR.value, c='k', ls='--', label='$v_\mathrm{LSR}$')
        plt.plot(xdata, best_fit(xdata), label='model')
        plt.fill_between(xdata,
                         best_fit(xdata),
                         best_fit.v0,
                         facecolor='tab:orange',
                         alpha=.5)
        if debug:
            plt.plot(xdata, model(xdata), label='init')
        plt.grid()
        plt.legend()
        plt.show()
        plt.close()

    # return
    if not isinstance(fit_method, LevMarLSQFitter):
        return_stddevs = False
    if return_stddevs:
        covariance = fit_method.fit_info['param_cov']
        try:
            stddevs = np.sqrt(np.diag(covariance))
            return best_fit, stddevs, chi2
        except ValueError as e:
            if covariance is None:
                print(
                    'Catched the following error, which is due to an unsucessful fit (covariance matrix is {}):'
                    .format(covariance))
                print('ValueError: {}'.format(e))
            return best_fit, chi2
    if debug:
        print(fit_method.fit_info['message'])

    return best_fit, chi2
예제 #15
0
def test_NFW_fit():
    """Test linear fitting of NFW model."""
    # Fixed parameters
    redshift = 0.63
    cosmo = cosmology.Planck15

    # Radial set
    r = np.array([
        1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
        7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04
    ]) * u.kpc

    # 200c Overdensity
    massfactor = ("critical", 200)

    density_r = np.array([
        1.77842761e+08, 9.75233623e+06, 2.93789626e+06, 1.90107238e+06,
        1.30776878e+06, 7.01004140e+05, 4.20678479e+05, 1.57421880e+05,
        7.54669701e+04, 2.56319769e+04, 6.21976562e+03, 3.96522424e+02,
        7.39336808e+01
    ]) * (u.solMass / u.kpc**3)

    fitter = LevMarLSQFitter()

    n200c = NFW(mass=1.8E15 * u.M_sun,
                concentration=7.0,
                redshift=redshift,
                cosmo=cosmo,
                massfactor=massfactor)
    n200c.redshift.fixed = True

    n_fit = fitter(n200c, r, density_r, maxiter=1000)

    assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
    assert_quantity_allclose(n_fit.concentration, 8.5)

    # 200m Overdensity
    massfactor = ("mean", 200)

    density_r = np.array([
        1.35677282e+08, 7.95392979e+06, 2.50352599e+06, 1.64535870e+06,
        1.14642248e+06, 6.26805453e+05, 3.81691731e+05, 1.46294819e+05,
        7.11559560e+04, 2.45737796e+04, 6.05459585e+03, 3.92183991e+02,
        7.34674416e+01
    ]) * (u.solMass / u.kpc**3)

    fitter = LevMarLSQFitter()

    n200m = NFW(mass=1.8E15 * u.M_sun,
                concentration=7.0,
                redshift=redshift,
                cosmo=cosmo,
                massfactor=massfactor)
    n200m.redshift.fixed = True

    n_fit = fitter(n200m, r, density_r, maxiter=1000)

    assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
    assert_quantity_allclose(n_fit.concentration, 8.5)

    # Virial mass
    massfactor = ("virial", 200)

    density_r = np.array([
        1.44573515e+08, 8.34873998e+06, 2.60137484e+06, 1.70348738e+06,
        1.18337370e+06, 6.43994654e+05, 3.90800249e+05, 1.48930537e+05,
        7.21856397e+04, 2.48289464e+04, 6.09477095e+03, 3.93248818e+02,
        7.35821787e+01
    ]) * (u.solMass / u.kpc**3)

    fitter = LevMarLSQFitter()

    nvir = NFW(mass=1.8E15 * u.M_sun,
               concentration=7.0,
               redshift=redshift,
               cosmo=cosmo,
               massfactor=massfactor)
    nvir.redshift.fixed = True

    n_fit = fitter(nvir, r, density_r, maxiter=1000)

    assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
    assert_quantity_allclose(n_fit.concentration, 8.5)
예제 #16
0
            wl_image = im_file[1].data

        # definitions for the PSF fitting
        epsf, gauss_std, n_resample = phot_funcs.get_psf(wl_image, psf_stars_x,
                                                         psf_stars_y)
        # aperture radius used for first flux guess
        aper_rad = 4 * gauss_std / n_resample

        # fix the positions to only fit the flux of the star, not its position
        epsf.x_0.fixed = True
        epsf.y_0.fixed = True

        phot = BasicPSFPhotometry(group_maker=DAOGroup(15.),
                                  psf_model=epsf,
                                  bkg_estimator=None,
                                  fitter=LevMarLSQFitter(),
                                  fitshape=(17),
                                  aperture_radius=aper_rad)

        # IDs and positions from input file
        ctable = pd.read_csv(cfile)
        ids, xpos, ypos = ctable['id'], ctable['x'], ctable['y']
        ras, decs = ctable['ra'], ctable['dec']
        uv_mags, ir_mags = ctable['f336_mag'], ctable['f814_mag']

        # star to extract spectrum for (first one in the input file)
        star = phot_funcs.Star(ids[0], xpos[0], ypos[0], ras[0], decs[0],
                               uv_mags[0], ir_mags[0],
                               fname=(outfolder + fname))

        # star positions to consider when fitting the star of interest
예제 #17
0
파일: core.py 프로젝트: ysBach/photutils
def fit_2dgaussian(data, error=None, mask=None):
    """
    Fit a 2D Gaussian plus a constant to a 2D image.

    Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
    arrays are automatically masked.  The mask for invalid values
    represents the combination of the invalid-value masks for the
    ``data`` and ``error`` arrays.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    Returns
    -------
    result : A `GaussianConst2D` model instance.
        The best-fitting Gaussian 2D model.
    """

    from ..morphology import data_properties  # prevent circular imports

    data = np.ma.asanyarray(data)

    if mask is not None and mask is not np.ma.nomask:
        mask = np.asanyarray(mask)
        if data.shape != mask.shape:
            raise ValueError('data and mask must have the same shape.')
        data.mask |= mask

    if np.any(~np.isfinite(data)):
        data = np.ma.masked_invalid(data)
        warnings.warn(
            'Input data contains input values (e.g. NaNs or infs), '
            'which were automatically masked.', AstropyUserWarning)

    if error is not None:
        error = np.ma.masked_invalid(error)
        if data.shape != error.shape:
            raise ValueError('data and error must have the same shape.')
        data.mask |= error.mask
        weights = 1.0 / error.clip(min=1.e-30)
    else:
        weights = np.ones(data.shape)

    if np.ma.count(data) < 7:
        raise ValueError('Input data must have a least 7 unmasked values to '
                         'fit a 2D Gaussian plus a constant.')

    # assign zero weight to masked pixels
    if data.mask is not np.ma.nomask:
        weights[data.mask] = 0.

    mask = data.mask
    data.fill_value = 0.0
    data = data.filled()

    # Subtract the minimum of the data as a crude background estimate.
    # This will also make the data values positive, preventing issues with
    # the moment estimation in data_properties (moments from negative data
    # values can yield undefined Gaussian parameters, e.g. x/y_stddev).
    props = data_properties(data - np.min(data), mask=mask)

    init_const = 0.  # subtracted data minimum above
    init_amplitude = np.ptp(data)
    g_init = GaussianConst2D(constant=init_const,
                             amplitude=init_amplitude,
                             x_mean=props.xcentroid.value,
                             y_mean=props.ycentroid.value,
                             x_stddev=props.semimajor_axis_sigma.value,
                             y_stddev=props.semiminor_axis_sigma.value,
                             theta=props.orientation.value)
    fitter = LevMarLSQFitter()
    y, x = np.indices(data.shape)
    gfit = fitter(g_init, x, y, data, weights=weights)

    return gfit
예제 #18
0
def fit_2dgaussian(data, error=None, mask=None):
    """
    Fit a 2D Gaussian plus a constant to a 2D image.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    Returns
    -------
    result : A `GaussianConst2D` model instance.
        The best-fitting Gaussian 2D model.
    """

    if data.size < 7:
        warnings.warn(
            'data array must have a least 7 values to fit a 2D '
            'Gaussian plus a constant', AstropyUserWarning)
        return None

    if error is not None:
        weights = 1.0 / error
    else:
        weights = None

    if mask is not None:
        mask = np.asanyarray(mask)
        if weights is None:
            weights = np.ones_like(data)
        # down-weight masked pixels
        weights[mask] = 1.e-30

    # Subtract the minimum of the data as a crude background estimate.
    # This will also make the data values positive, preventing issues with
    # the moment estimation in data_properties (moments from negative data
    # values can yield undefined Gaussian parameters, e.g. x/y_stddev).
    shift = np.min(data)
    data = np.copy(data) - shift
    props = data_properties(data, mask=mask)
    init_values = np.array([
        props.xcentroid.value, props.ycentroid.value,
        props.semimajor_axis_sigma.value, props.semiminor_axis_sigma.value,
        props.orientation.value
    ])

    init_const = 0.  # subtracted data minimum above
    init_amplitude = np.nanmax(data) - np.nanmin(data)
    g_init = GaussianConst2D(init_const, init_amplitude, *init_values)
    fitter = LevMarLSQFitter()
    y, x = np.indices(data.shape)
    gfit = fitter(g_init, x, y, data, weights=weights)
    gfit.amplitude_0 = gfit.amplitude_0 + shift
    return gfit
예제 #19
0
def extractFlux(cnam, ccd, rccd, read, gain, ccdwin, rfile, store):
    """This extracts the flux of all apertures of a given CCD.

    The steps are (1) creation of PSF model, (2) PSF fitting, (3)
    flux extraction. The apertures are assumed to be correctly positioned.

    It returns the results as a dictionary keyed on the aperture label. Each
    entry returns a list:

    [x, ex, y, ey, fwhm, efwhm, beta, ebeta, counts, countse, sky, esky,
    nsky, nrej, flag]

    flag = bitmask. See hipercam.core to see all the options which are
    referred to by name in the code e.g. ALL_OK. The various flags can
    signal that there no sky pixels (NO_SKY), the sky aperture was off
    the edge of the window (SKY_AT_EDGE), etc.

    This code::

       >> bset = flag & TARGET_SATURATED

    determines whether the data saturation flag is set for example.

    Arguments::

       cnam     : string
          CCD identifier label

       ccd       : CCD
           the debiassed, flat-fielded CCD.

       rccd : CCD
          corresponding raw CCD, used to work out whether data are
          saturated in target aperture.

       read      : CCD
           readnoise divided by the flat-field

       gain      : CCD
           gain multiplied by the flat field

       ccdwin   : dictionary of strings
           the Window label corresponding to each Aperture

       rfile     : Rfile
           reduce file configuration parameters

       store     : dict of dicts
           see moveApers for what this contains.

    """

    # initialise flag
    flag = hcam.ALL_OK

    ccdaper = rfile.aper[cnam]

    results = {}
    # get profile params from aperture store
    mfwhm = store['mfwhm']
    mbeta = store['mbeta']
    method = 'm' if mbeta > 0.0 else 'g'

    if mfwhm <= 0:
        # die hard, die soon as there's nothing we can do.
        print((' *** WARNING: CCD {:s}: no measured FWHM to create PSF model'
               '; no extraction possible').format(cnam))
        # set flag to indicate no FWHM
        flag = hcam.NO_FWHM

        for apnam, aper in ccdaper.items():
            info = store[apnam]
            results[apnam] = {
                'x': aper.x,
                'xe': info['xe'],
                'y': aper.y,
                'ye': info['ye'],
                'fwhm': info['fwhm'],
                'fwhme': info['fwhme'],
                'beta': info['beta'],
                'betae': info['betae'],
                'counts': 0.,
                'countse': -1,
                'sky': 0.,
                'skye': 0.,
                'nsky': 0,
                'nrej': 0,
                'flag': flag
            }
        return results

    # all apertures have to be in the same window, or we can't easily make a
    # postage stamp of the data
    wnames = set(ccdwin.values())
    if len(wnames) != 1:
        print((' *** WARNING: CCD {:s}: not all apertures'
               ' lie within the same window; no extraction possible'
               ).format(cnam))

        # set flag to indicate no extraction
        flag = hcam.NO_EXTRACTION

        # return empty results
        for apnam, aper in ccdaper.items():
            info = store[apnam]
            results[apnam] = {
                'x': aper.x,
                'xe': info['xe'],
                'y': aper.y,
                'ye': info['ye'],
                'fwhm': info['fwhm'],
                'fwhme': info['fwhme'],
                'beta': info['beta'],
                'betae': info['betae'],
                'counts': 0.,
                'countse': -1,
                'sky': 0.,
                'skye': 0.,
                'nsky': 0,
                'nrej': 0,
                'flag': flag
            }
            return results
    wnam = wnames.pop()

    # PSF params are in binned pixels, so find binning
    bin_fac = ccd[wnam].xbin

    # create PSF model
    if method == 'm':
        psf_model = MoffatPSF(beta=mbeta, fwhm=mfwhm / bin_fac)
    else:
        psf_model = IntegratedGaussianPRF(sigma=mfwhm *
                                          gaussian_fwhm_to_sigma / bin_fac)

    # force photometry only at aperture positions
    # this means PSF shape and positions are fixed, we are only fitting flux
    if rfile['psf_photom']['positions'] == 'fixed':
        psf_model.x_0.fixed = True
        psf_model.y_0.fixed = True

    # create instances for PSF photometry
    gfac = float(rfile['psf_photom']['gfac'])
    sclip = float(rfile['sky']['thresh'])
    daogroup = DAOGroup(gfac * mfwhm / bin_fac)
    mmm_bkg = MMMBackground(sigma_clip=SigmaClip(sclip))
    fitter = LevMarLSQFitter()
    fitshape_box_size = int(2 * int(rfile['psf_photom']['fit_half_width']) + 1)
    fitshape = (fitshape_box_size, fitshape_box_size)

    photometry_task = BasicPSFPhotometry(group_maker=daogroup,
                                         bkg_estimator=mmm_bkg,
                                         psf_model=psf_model,
                                         fitter=fitter,
                                         fitshape=fitshape)

    # initialise flag
    flag = hcam.ALL_OK

    # extract Windows relevant for these apertures
    wdata = ccd[wnam]
    wraw = rccd[wnam]

    # extract sub-windows that include all of the apertures, plus a little
    # extra around the edges.
    x1 = min([ap.x - ap.rsky2 - wdata.xbin for ap in ccdaper.values()])
    x2 = max([ap.x + ap.rsky2 + wdata.xbin for ap in ccdaper.values()])
    y1 = min([ap.y - ap.rsky2 - wdata.ybin for ap in ccdaper.values()])
    y2 = max([ap.y + ap.rsky2 + wdata.ybin for ap in ccdaper.values()])

    # extract sub-Windows
    swdata = wdata.window(x1, x2, y1, y2)
    swraw = wraw.window(x1, x2, y1, y2)

    # compute pixel positions of apertures in windows
    xpos, ypos = zip(
        *((swdata.x_pixel(ap.x), swdata.y_pixel(ap.y)) \
          for ap in ccdaper.values())
    )
    positions = Table(names=['x_0', 'y_0'], data=(xpos, ypos))

    # do the PSF photometry
    photom_results = photometry_task(swdata.data, init_guesses=positions)
    slevel = mmm_bkg(swdata.data)

    # unpack the results and check apertures
    for apnam, aper in ccdaper.items():
        try:
            # reset flag
            flag = hcam.ALL_OK

            result_row = photom_results[photom_results['id'] == int(apnam)]
            if len(result_row) == 0:
                flag |= hcam.NO_DATA
                raise hcam.HipercamError(
                    'no source in PSF photometry for this aperture')
            elif len(result_row) > 1:
                flag |= hcam.NO_EXTRACTION
                raise hcam.HipercamError(
                    'ambiguous lookup for this aperture in PSF photometry')
            else:
                result_row = result_row[0]

            # compute X, Y arrays over the sub-window relative to the centre
            # of the aperture and the distance squared from the centre (Rsq)
            # to save a little effort.
            x = swdata.x(np.arange(swdata.nx)) - aper.x
            y = swdata.y(np.arange(swdata.ny)) - aper.y
            X, Y = np.meshgrid(x, y)
            Rsq = X**2 + Y**2

            # size of a pixel which is used to taper pixels as they approach
            # the edge of the aperture to reduce pixellation noise
            size = np.sqrt(wdata.xbin * wdata.ybin)

            # target selection, accounting for extra apertures and allowing
            # pixels to contribute if their centres are as far as size/2 beyond
            # the edge of the circle (but with a tapered weight)
            dok = Rsq < (aper.rtarg + size / 2.)**2
            if not dok.any():
                # check there are some valid pixels
                flag |= hcam.NO_DATA
                raise hcam.HipercamError('no valid pixels in aperture')

            # check for saturation and nonlinearity
            if cnam in rfile.warn:
                if swraw.data[dok].max() >= rfile.warn[cnam]['saturation']:
                    flag |= hcam.TARGET_SATURATED

                if swraw.data[dok].max() >= rfile.warn[cnam]['nonlinear']:
                    flag |= hcam.TARGET_NONLINEAR
            else:
                warnings.warn(
                    'CCD {:s} has no nonlinearity or saturation levels set')

            counts = result_row['flux_fit']
            countse = result_row['flux_unc']
            info = store[apnam]

            results[apnam] = {
                'x': aper.x,
                'xe': info['xe'],
                'y': aper.y,
                'ye': info['ye'],
                'fwhm': info['fwhm'],
                'fwhme': info['fwhme'],
                'beta': info['beta'],
                'betae': info['betae'],
                'counts': counts,
                'countse': countse,
                'sky': slevel,
                'skye': 0,
                'nsky': 0,
                'nrej': 0,
                'flag': flag
            }

        except hcam.HipercamError as err:

            info = store[apnam]
            flag |= hcam.NO_EXTRACTION

            results[apnam] = {
                'x': aper.x,
                'xe': info['xe'],
                'y': aper.y,
                'ye': info['ye'],
                'fwhm': info['fwhm'],
                'fwhme': info['fwhme'],
                'beta': info['beta'],
                'betae': info['betae'],
                'counts': 0.,
                'countse': -1,
                'sky': 0.,
                'skye': 0.,
                'nsky': 0,
                'nrej': 0,
                'flag': flag
            }

    # finally, we are done
    return results
예제 #20
0
def fit_dust_wg00():
    """
    Fit WG00 as flexible model
    """
    from dust_attenuation import averages, shapes, radiative_transfer
    from astropy.modeling.fitting import LevMarLSQFitter
    #from importlib import reload
    #reload(averages); reload(shapes)
    #reload(averages); reload(shapes)

    shapes.x_range_N09 = [0.01, 1000]
    averages.x_range_C00 = [0.01, 1000]
    averages.x_range_L02 = [0.01, 0.18]

    tau_V = 1.0
    tau_V_grid = np.array([
        0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0,
        7.0, 8.0, 9.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0
    ])

    tau_V_grid = tau_V_grid[tau_V_grid < 11]

    i = -1

    params = []
    for i in range(len(tau_V_grid)):
        tau_V = tau_V_grid[i]
        wg00 = radiative_transfer.WG00(tau_V=tau_V, **WG00_DEFAULTS)

        model = shapes.N09()
        model.fixed['x0'] = True

        fitter = LevMarLSQFitter()

        wave = np.logspace(np.log10(0.18), np.log10(3), 100)
        y = wg00(wave * u.micron)
        _fit = fitter(model, wave, y)

        plt.plot(wave,
                 y,
                 label='tau_V = {0:.2f}'.format(tau_V),
                 color='k',
                 alpha=0.4)

        #plt.plot(wave, _fit(wave))

        shapes.x_range_N09 = [0.01, 1000]
        averages.x_range_C00 = [0.01, 1000]

        wfull = np.logspace(-1.5, np.log10(20.1), 10000)
        plt.plot(wfull, _fit(wfull), color='r', alpha=0.4)
        params.append(_fit.parameters)

    params = np.array(params)
    N = params.shape[1]
    vx = np.linspace(0, 10, 100)
    order = [3, 5, 5, 5, 5]

    coeffs = {}

    for i in range(N):
        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.plot(tau_V_grid,
                params[:, i],
                marker='o',
                label=_fit.param_names[i])

        c = np.polyfit(tau_V_grid, params[:, i], order[i])
        ax.plot(vx, np.polyval(c, vx))
        ax.legend()
        ax.grid()
        coeffs[_fit.param_names[i]] = c
예제 #21
0
    def Flux(self, x, y, ll, ul, r):
        x = int(x)
        y = int(y)
        data = self.hdulist[self.fz].data[x - r:x + r, y - r:y + r]
        data = (lacosmic.lacosmic(data,
                                  2,
                                  10,
                                  10,
                                  effective_gain=self.gain,
                                  readnoise=self.readnoise))[0]
        bkgrms = MADStdBackgroundRMS()
        std = bkgrms(data)
        iraffind = IRAFStarFinder(threshold=self.limit * std,
                                  fwhm=self.sigma_psf * gaussian_sigma_to_fwhm,
                                  minsep_fwhm=0.01,
                                  roundhi=5.0,
                                  roundlo=-5.0,
                                  sharplo=0.0,
                                  sharphi=2.0)
        daogroup = DAOGroup(2.0 * self.sigma_psf * gaussian_sigma_to_fwhm)
        mmm_bkg = MMMBackground()
        psf_model = IntegratedGaussianPRF(sigma=self.sigma_psf)
        from photutils.psf import IterativelySubtractedPSFPhotometry
        photometry = IterativelySubtractedPSFPhotometry(
            finder=iraffind,
            group_maker=daogroup,
            bkg_estimator=mmm_bkg,
            psf_model=psf_model,
            fitter=LevMarLSQFitter(),
            niters=1,
            fitshape=(21, 21))

        result_tab = photometry(image=data)
        """
        if plot == 1:
            residual_image = photometry.get_residual_image()
            print(result_tab['x_fit','y_fit'])
            plt.figure(self.filename+' data')
            plt.imshow(data, cmap='viridis',
                       aspect=1, interpolation='nearest', origin='lower')
            plt.show()
            plt.figure(self.filename+' residual')
            plt.imshow(residual_image, cmap='viridis',
                       aspect=1, interpolation='nearest', origin='lower')
            plt.show()
            plt.figure(self.filename+' PSF')
            plt.imshow(data-residual_image, cmap='viridis',
                       aspect=1, interpolation='nearest', origin='lower')
            plt.show()
        """

        if len(result_tab) > 5:
            return (0, 0)
        if len(result_tab) == 0:
            print('None')
            return (0, 0)
        result_tab['Minus'] = np.zeros(len(result_tab))
        for i in range(len(result_tab)):
            #if 18.5 < result_tab['x_fit'][i] < 28.5 and 18.5 < result_tab['y_fit'][i] < 28.5:
            if ll < result_tab['x_fit'][i] < ul and ll < result_tab['y_fit'][
                    i] < ul:
                result_tab['Minus'][i] = 1
            else:
                result_tab['Minus'][i] = 0
        mask = result_tab['Minus'] == 1.0
        result_tab = result_tab[mask]
        if len(result_tab) != 1:
            return (0, 0)
        flux_counts = float(result_tab['flux_fit'][0])
        flux_unc = float(result_tab['flux_unc'][0])
        flux_unc = flux_unc / flux_counts
        error = np.sqrt((flux_counts / self.gain) +
                        (self.sigma_psf * gaussian_sigma_to_fwhm**2 * np.pi *
                         self.readnoise))
        return (flux_counts, flux_unc)
예제 #22
0
def centroid_2dg(data, error=None, mask=None):
    """
    Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
    a constant) to the array.

    Non-finite values (e.g., NaN or inf) in the ``data`` or ``error``
    arrays are automatically masked. These masks are combined.

    Parameters
    ----------
    data : array_like
        The 2D data array.

    error : array_like, optional
        The 2D array of the 1-sigma errors of the input ``data``.

    mask : array_like (bool), optional
        A boolean mask, with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.

    Returns
    -------
    centroid : `~numpy.ndarray`
        The ``x, y`` coordinates of the centroid.
    """
    from ..morphology import data_properties  # prevent circular imports

    data = np.ma.asanyarray(data)

    if mask is not None and mask is not np.ma.nomask:
        mask = np.asanyarray(mask)
        if data.shape != mask.shape:
            raise ValueError('data and mask must have the same shape.')
        data.mask |= mask

    if np.any(~np.isfinite(data)):
        data = np.ma.masked_invalid(data)
        warnings.warn('Input data contains non-finite values (e.g., NaN or '
                      'infs) that were automatically masked.',
                      AstropyUserWarning)

    if error is not None:
        error = np.ma.masked_invalid(error)
        if data.shape != error.shape:
            raise ValueError('data and error must have the same shape.')
        data.mask |= error.mask
        weights = 1.0 / error.clip(min=1.e-30)
    else:
        weights = np.ones(data.shape)

    if np.ma.count(data) < 7:
        raise ValueError('Input data must have a least 7 unmasked values to '
                         'fit a 2D Gaussian plus a constant.')

    # assign zero weight to masked pixels
    if data.mask is not np.ma.nomask:
        weights[data.mask] = 0.

    mask = data.mask
    data.fill_value = 0.
    data = data.filled()

    # Subtract the minimum of the data as a rough background estimate.
    # This will also make the data values positive, preventing issues with
    # the moment estimation in data_properties. Moments from negative data
    # values can yield undefined Gaussian parameters, e.g., x/y_stddev.
    props = data_properties(data - np.min(data), mask=mask)

    constant_init = 0.  # subtracted data minimum above
    g_init = (Const2D(constant_init)
              + Gaussian2D(amplitude=np.ptp(data),
                           x_mean=props.xcentroid,
                           y_mean=props.ycentroid,
                           x_stddev=props.semimajor_sigma.value,
                           y_stddev=props.semiminor_sigma.value,
                           theta=props.orientation.value))
    fitter = LevMarLSQFitter()
    y, x = np.indices(data.shape)
    gfit = fitter(g_init, x, y, data, weights=weights)
    return np.array([gfit.x_mean_1.value, gfit.y_mean_1.value])
예제 #23
0
allfitspath.sort()
allfitsname.sort()

print(len(allfitspath), "Items are searched")
#%%

images = []

#%%
DISPAXIS = 2  # 1 = line = python_axis_1 // 2 = column = python_axis_0
FONTSIZE = 12  # Change it on your computer if you wish.
rcParams.update({'font.size': FONTSIZE})
COMPIMAGE = os.path.join(ppdpath,
                         'Comp-master.fits')  # Change directory if needed!
OBJIMAGE = os.path.join(newfitspath, 'HD207673-0001.fits')
LINE_FITTER = LevMarLSQFitter()

#%%
lamphdu = fits.open(COMPIMAGE)
objhdu = fits.open(OBJIMAGE)
lampimage = lamphdu[0].data
objimage = objhdu[0].data

if lampimage.shape != objimage.shape:
    raise ValueError('lamp and obj images should have same sizes!')

if DISPAXIS == 2:
    lampimage = lampimage.T
    objimage = objimage.T
elif DISPAXIS != 1:
    raise ValueError(
예제 #24
0
def cheating_astrometry(image,
                        input_table,
                        psf: np.ndarray,
                        filename: str = '?',
                        config: Config = Config.instance()):
    """
    Evaluate the maximum achievable precision of the EPSF fitting approach by using a hand-defined psf
    :param input_table:
    :param image:
    :param filename:
    :param psf:
    :param config:
    :return:
    """
    try:
        print(f'starting job on image {filename} with {config}')
        origin = np.array(psf.shape) / 2
        # type: ignore
        epsf = photutils.psf.EPSFModel(psf,
                                       flux=1,
                                       origin=origin,
                                       oversampling=1,
                                       normalize=False)
        epsf = photutils.psf.prepare_psf_model(epsf, renormalize_psf=False)

        finder = get_finder(image, config)

        #fwhm = estimate_fwhm(epsf.psfmodel)
        fwhm = config.fwhm_guess
        grouper = DAOGroup(config.separation_factor * fwhm)

        epsf.fwhm = astropy.modeling.Parameter(
            'fwhm', 'this is not the way to add this I think')
        epsf.fwhm.value = fwhm
        bkgrms = MADStdBackgroundRMS()

        photometry = BasicPSFPhotometry(finder=finder,
                                        group_maker=grouper,
                                        bkg_estimator=bkgrms,
                                        psf_model=epsf,
                                        fitter=LevMarLSQFitter(),
                                        fitshape=config.fitshape)

        guess_table = input_table.copy()
        guess_table = cut_edges(guess_table, 101, image.shape[0])
        guess_table.rename_columns(['x', 'y'], ['x_0', 'y_0'])

        guess_table['x_0'] += np.random.uniform(-0.1,
                                                +0.1,
                                                size=len(guess_table['x_0']))
        guess_table['y_0'] += np.random.uniform(-0.1,
                                                +0.1,
                                                size=len(guess_table['y_0']))

        result_table = photometry(image, guess_table)

        return PhotometryResult(image, input_table, result_table, epsf, None,
                                config, filename)
    except Exception as ex:
        import traceback
        print(f'error in cheating_astrometry({filename}, {psf}, {config})')
        error = ''.join(
            traceback.format_exception(type(ex), ex, ex.__traceback__))
        print(error)
        return error