Exemple #1
0
def deep_obj_mask(img, ota, apply=False):
    from astropy.io import fits
    from astropy.stats import sigma_clipped_stats
    image = odi.scaledpath+'scaled_'+ota+'.'+str(img[16:])
    ota_mask = 'objmask_'+ota+'.'+str(img[16:17])+'.fits'
    hdulist = fits.open(image)
    hdu_ota = hdulist[0]
    # maskhdu = fits.open(bppath+ota_mask)
    gapshdu = fits.open(odi.bppath+'reproj_mask_'+ota+'.'+str(img[16:]))
    total_mask = gapshdu[0].data
    #maskhdu[0].data + 

    nx, ny = hdu_ota.data.shape
    print nx, ny
    mean1, median1, std1 = sigma_clipped_stats(hdu_ota.data[0:ny/2,0:nx/2], mask=total_mask[0:ny/2,0:nx/2], sigma=3.0, iters=3)
    mean2, median2, std2 = sigma_clipped_stats(hdu_ota.data[0:ny/2,nx/2:nx], mask=total_mask[0:ny/2,nx/2:nx], sigma=3.0, iters=3)
    mean3, median3, std3 = sigma_clipped_stats(hdu_ota.data[ny/2:ny,0:nx/2], mask=total_mask[ny/2:ny,0:nx/2], sigma=3.0, iters=3)
    mean4, median4, std4 = sigma_clipped_stats(hdu_ota.data[ny/2:ny,nx/2:nx], mask=total_mask[ny/2:ny,nx/2:nx], sigma=3.0, iters=3)
    mean = [mean1, mean2, mean3, mean4]
    median = [median1, median2, median3, median4]
    std = [std1, std2, std3, std4]
    # plt.imshow(hdu_ota.data, origin='lower', cmap='Greys_r', vmin=-10., vmax=500.)
    # plt.imshow(total_mask, cmap=random_cmap(), alpha=0.5)
    # plt.show()
    return mean, median, std
Exemple #2
0
def create_superdark(crj_filename, basedark):
    """ Create a superdark from the crj and basedark

    Create the science portion of the forthcoming reference dark by adding the
    'only baseline dark current' image to the 'only hot pixels' image.

    .. note:: input file will be updated in-place.

    Parameters
    ----------
    crj_filename : str
        filename of the cosmic-ray rejected file
    basedark : str
        basedark name

    """

    with fits.open(crj_filename, mode='update') as crj_hdu:

        ## Perform iterative statistics on this normalized superdark
        data_mean, data_median, data_std = sigma_clipped_stats(crj_hdu[('sci', 1)].data,
                                                               sigma=5,
                                                               iters=40)

        p_five_sigma = data_median + (5*data_std)
        print('hot pixels are defined as above: ', p_five_sigma)
        basedark_hdu = fits.open(basedark)

        base_mean, base_median, base_std = sigma_clipped_stats(basedark_hdu[('sci', 1)].data,
                                                            sigma=5,
                                                            iters=40)

        fivesig = base_median + 5.0 * base_std
        zerodark = crj_hdu[('sci', 1)].data - base_median
        only_hotpix = np.where(crj_hdu[('sci', 1)].data >= p_five_sigma,
                               zerodark,
                               0.0)

        basedark_med = medfilt(basedark_hdu[('sci', 1)].data, (5, 5))
        only_dark = np.where(basedark_hdu[('sci', 1)].data >= p_five_sigma,
                             basedark_med,
                             basedark_hdu[('sci', 1)].data)


        crj_hdu[('sci', 1)].data = only_dark + only_hotpix

        #- update DQ extension
        crj_hdu[('dq', 1)].data = np.where(only_hotpix >= p_five_sigma,
                                           16,
                                           crj_hdu[('dq', 1)].data)

        #- Update Error
        crj_hdu[('err', 1)].data = np.where(only_hotpix == 0,
                                            basedark_hdu[('err', 1)].data,
                                            crj_hdu[('err', 1)].data)
Exemple #3
0
def resolvingpower_from_photonlist(photons, orders,
                                   col='proj_x', zeropos=None,
                                   ordercol='order'):
    '''Calculate the resolving power for several grating orders

    If fewer than 20 photons are detected in a single order, this function
    returns nan values.

    Unlike `~marxs.analysis.gratings.resolvingpower_per_order` this method does
    not run any ray-trace simulations. It just extracts information from a
    photon list that is passed in.

    Parameters
    ----------
    photons : `astropy.table.Table`
        Photon event list
    orders : np.array
        Orders for which the resolving power will be calculated
    col : string
        Column name for the column holding the dispersion coordinate.
    zeropos : float or ``None``
        Value of column `col` where the zeroth order is found. If not given,
        this is calculated (assuming the zeroth order photons are part of the
        event list).
    ordercol : string
        Name of column that lists grating order for each photon

    Returns
    -------
    res : np.array
        resolving power for each order
    pos : np.array
        mean value of ``col`` for each order
    std : np.array
        standard deviation of the distribution of ``col`` for each order
    '''
    if zeropos is None:
        ind = (photons[ordercol] == 0)
        if ind.sum() < 20:
            raise AnalysisError('Too few photons in list to determine position of order 0 automatically.')
        zeropos, medzeropos, stdzero = sigma_clipped_stats(photons[col][ind])

    pos = np.zeros_like(orders, dtype=float)
    std = np.zeros_like(orders, dtype=float)

    for i, o in enumerate(orders):
        ind = (photons[ordercol] == o)
        if ind.sum() > 20:
            meanpos, medianpos, stdpos = sigma_clipped_stats(photons[col][ind])
        else:
            meanpos, stdpos = np.nan, np.nan
        pos[i] = meanpos
        std[i] = stdpos
    res = np.abs(pos - zeropos) / (std * 2.3548)
    return res, pos, std
Exemple #4
0
def measureBackground(data, iterations, mask):
    if mask.sum() > 0:
        mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=mask)
    else:
        mean, median, std = sigma_clipped_stats(data, sigma=3.0)

    if iterations == 0:
        return mean, median, std
    else:
        threshold = median + (std * 2)
        segm_img = detect_sources(data, threshold, npixels=5)
        mask = segm_img.astype(np.bool)  # turn segm_img into a mask
        circMask = generateCircularMask(5)
        finalMask = binary_dilation(mask, circMask)
        return measureBackground(data, iterations - 1, finalMask)
Exemple #5
0
def find_source_daofind(frame, ellipse, config, track_record, special=False):

    """
    This function ...
    :param data:
    :return:
    """

    # TODO: FIX THIS FUNCTION

    sigma_level = 5.0

    # Calculate the sigma-clipped statistics of the data
    mean, median, std = sigma_clipped_stats(data, sigma=3.0)

    result_table = daofind(data - median, fwhm=3.0, threshold=sigma_level*std)

    result_table.rename_column('xcentroid', 'x_peak')
    result_table.rename_column('ycentroid', 'y_peak')

    # If requested, make a plot with the source(s) indicated
    if plot: plotting.plot_peaks(data, result_table['x_peak'], result_table['y_peak'], radius=4.0)

    # Return the list of source positions
    #return result_table, median

    source = []
    return source
Exemple #6
0
def circ_flux(image, maskmap, ap_center, ap_radii, max_rad=None):
    """
    Calculate flux in a circular aperture at the given set of aperture radii.
    """
    
    mean, median, std = sigma_clipped_stats(image, mask=(maskmap==0),
                                            sigma=3.0, iters=3)
    # Calculate the background level
    bkgd_flux = median

    ap_fluxes = np.zeros(len(ap_radii))*np.nan
    bkgd_fluxes = np.zeros(len(ap_radii))*np.nan

    if max_rad is None:
        max_rad = max(np.shape(maskmap)) / 2.0
    
    # Compute fluxes and background levels for every given radius
    for i, rad in enumerate(ap_radii):
        # If the radius is bigger than half the image, the photometry
        # will fail (because the aperture will fall off the image)
        if rad > max_rad:
            continue

        # Just take bkgd as median of whole image
        # Otherwise no background left with big apertures
        bkgd_fluxes[i] = bkgd_flux
        bkgd_subtracted = image - bkgd_fluxes[i]
        
        # Now do the aperture photometry itself
        aperture = photutils.CircularAperture(ap_center, r=rad)
        phot_table = photutils.aperture_photometry(bkgd_subtracted, aperture)
        ap_fluxes[i] = phot_table["aperture_sum"][0]
        
    return ap_fluxes, bkgd_fluxes
    def create_median(self, resampled_models):
        """IFU-specific version of create_median."""
        resampled_sci = [i.data for i in resampled_models]
        resampled_wht = [i.weightmap for i in resampled_models]

        nlow = self.outlierpars.get('nlow', 0)
        nhigh = self.outlierpars.get('nhigh', 0)
        maskpt = self.outlierpars.get('maskpt', 0.7)
        badmasks = []
        for w in resampled_wht:
            mean_weight, _, _ = sigma_clipped_stats(w,
                                                    sigma=3.0,
                                                    mask_value=0.)
            weight_threshold = mean_weight * maskpt
            # Mask pixels were weight falls below MASKPT percent of
            #    the mean weight
            mask = np.less(w, weight_threshold)
            log.debug("Number of pixels with low weight: {}".format(
                        np.sum(mask)))
            badmasks.append(mask)

        # Compute median of stack os images using BADMASKS to remove low weight
        # values
        median_image = median(resampled_sci, nlow=nlow, nhigh=nhigh,
                              badmasks=badmasks)

        return median_image
Exemple #8
0
def find_hotpix(filename):
    """Find hotpixels and update DQ array

    Pixels hotter that median + 5*sigma will be updated to have a
    DQ value of 16.

    .. note:: The input file will be updated in-place.

    Parameters
    ----------
    
    filename: str
        filename of the input biasfile

    """

    with fits.open(filename, mode='update') as hdu:
        im_mean, im_median, im_std = sigma_clipped_stats(hdu[('sci', 1)].data,
                                                         sigma=3,
                                                         iters=40)

        five_sigma = im_median + 5 * im_std
        index = np.where((hdu[('SCI', 1)].data > five_sigma) &
                         (hdu[('SCI', 1)].data > im_mean + 0.1))

        hdu[('DQ', 1)].data[index] = 16
Exemple #9
0
def update_sci(filename):
    """Create the science extension of the baseline dark

    .. note:: The input file will be updated in-place.

    Parameters
    ----------
    filename: str
        name of the file to be updated

    """

    with fits.open(filename, mode='update') as hdu:
        im_mean, im_median, im_std = sigma_clipped_stats(hdu[('sci', 1)].data,
                                                         sigma=5,
                                                         iters=50)
        fivesig = im_mean + 5.0 * im_std
        only_hotpix = np.where(hdu[('sci', 1)].data >= fivesig,
                               hdu[('sci', 1)].data - im_mean,
                               0)


        #-- I don't see this being used
        med_im = median_filter(hdu[('sci', 1)].data, (3, 3))
        only_baseline = np.where(hdu[('sci', 1)].data >= fivesig,
                                 med_im,
                                 hdu[('sci', 1)].data)


        hdu[('dq', 1)].data = np.where(only_hotpix >= .1,
                                       16,
                                       hdu[('dq', 1)].data)
Exemple #10
0
def compute_stats(x):
	""" Compute stats """
	
	## Compute stats
	npixels= np.size(x)
	pixel_min= np.min(x)
	pixel_max= np.max(x)
	mean= np.mean(x)
	stddev= np.std(x,ddof=1)
	median= np.median(x)
	mad= mad_std(x)
	skewness= stats.skew(x)
	kurtosis= stats.kurtosis(x)
	
	## Compute robust stats
	niter= 1
	sigmaclip= 3
	[mean_clipped, median_clipped, stddev_clipped] = sigma_clipped_stats(x, sigma=sigmaclip, iters=niter, std_ddof=1)

	print '*** IMG STATS ***'	
	print 'n=',npixels
	print 'min/max=',pixel_min,'/',pixel_max
	print 'mean=',mean
	print 'stddev=',stddev
	print 'median=',median
	print 'mad=',mad
	print 'skew=',skewness
	print 'kurtosis=',kurtosis
	print 'mean_clipped=',mean_clipped
	print 'median_clipped=',median_clipped
	print 'stddev_clipped=',stddev_clipped
	print '*****************'
def mode(array):
    """An estimate of the statistical mode of this image"""
    # SUPER fast and sloppy mode estimate:
    mean, median, std = sigma_clipped_stats(array)
    quickModeEst = 3*median - 2*mean

    # Compute an approximately 3-sigma range about this
    modeRegion = quickModeEst + std*np.array([-1.5, +1.5])

    # Now compute the number of bins to generate in this range
    binWidth = freedman_bin_width(array.flatten())
    bins     = np.arange(modeRegion[0], modeRegion[1], binWidth)

    # Loop through larger and larger binning until find unique solution
    foundMode = False
    while not foundMode:
        # Generate a histogram of the flat field
        hist, flatBins = np.histogram(array.flatten(), bins=bins)

        # Locate the histogram maximum
        maxInds = (np.where(hist == np.max(hist)))[0]
        if maxInds.size == 1:
            # Grab the index of the maximum value and shrink
            maxInd = maxInds[0]
            foundMode = True
        else:
            # Shrink the NUMBER of bins to help find a unqiue maximum
            numBins *= 0.9
            bins     = np.linspace(modeRegion[0], modeRegion[1], numBins)

    # Estimate flatMode from histogram maximum
    flatMode = np.mean(flatBins[maxInd:maxInd+2])

    return flatMode
Exemple #12
0
def replace_hot_pix(mean_bias, median_image):
    """ Replace image values in residual single hot pixels

    defined as those having
    values greater than (mean + 5 sigma of Poisson noise) by those in
    median-filtered bias image. This represents the science extension of the
    final output reference superbias.

    mean_bias will be updated in place.

    Parameters
    ----------
    mean_bias : str
        name of the mean bias bias
    median_image : np.ndarray
        2d median image of the bias

    """

    print('Replacing hot pixels')
    residual_image = fits.getdata(mean_bias, ext=('sci', 1)) - median_image
    resi_mean, resi_median, resi_std = sigma_clipped_stats(residual_image,
                                                           sigma=5,
                                                           iters=40)

    fivesig = resi_mean + (5.0 * resi_std)
    print("  hot is > {}".format(fivesig))
    index = np.where(residual_image >= fivesig)

    with fits.open(mean_bias, mode='update') as hdu:
        hdu[('sci', 1)].data[index] = median_image[index]
Exemple #13
0
def build_starMask(arr, sigmaThresh = 2.0, neighborThresh = 2, kernelSize = 9):
    '''This function will idenify in-star pixels using a local median-filtered.
    This should work for both clean (e.g. PRISM) images and dirty (e.g. Mimir)
    images. The usual "detect_sources" method will identify dirty features as
    false positives, so this method is preferable.
    '''

    # Compute kernel shape
    medianKernShape = (kernelSize, kernelSize)

    # Filter the image
    medImg = median_filter(arr, size = medianKernShape)

    # get stddev of image background
    mean, median, stddev = sigma_clipped_stats(arr)

    # Look for deviates from the filter (positive values only)
    starMask = np.logical_and(np.abs(arr - medImg) > sigmaThresh*stddev,
                               arr > 0)
    # Count the number of masked neighbors for each pixel
    neighborCount = np.zeros_like(arr, dtype=int)
    for dx in range(-1,2,1):
        for dy in range(-1,2,1):
            neighborCount += np.roll(np.roll(starMask, dy, axis=0),
                                     dx, axis=1).astype(int)

    # Subtract the self-counting pixel count
    neighborCount -= 1

    # Find pixels with more than two masked neighbor (including self)
    starMask = np.logical_and(starMask, neighborCount > neighborThresh)

    return starMask
Exemple #14
0
def _lookup_via_photutils(fits_file, wcs=None, *args, **kwargs):
    from photutils import DAOStarFinder
    data = fits.getdata(fits_file) - 2048  # Camera bias
    mean, median, std = sigma_clipped_stats(data)

    fwhm = kwargs.get('fwhm', 3.0)
    threshold = kwargs.get('threshold', 3.0)

    daofind = DAOStarFinder(fwhm=fwhm, threshold=threshold * std)
    sources = daofind(data - median).to_pandas()

    sources.rename(columns={
        'xcentroid': 'x',
        'ycentroid': 'y',
    }, inplace=True)

    if wcs is None:
        header = fits_utils.getheader(fits_file)
        wcs = WCS(header)

    coords = wcs.all_pix2world(sources['x'], sources['y'], 1)

    sources['ra'] = coords[0]
    sources['dec'] = coords[1]

    return sources
def makeRGB(rimg,gimg,bimg,minsigma=1.,maxpercentile=99.9,color_scaling=None,sigmaclip=3,iters=20,nonlinear=8.):
    ''' minsigma -- black level is set this many clipped-sigma below sky
       maxpercentile -- white level is set at this percentile of the pixel levels
       color_scaling -- list or array: maximum value is divided by this; 
                        so if you want the brightest pixels to be reddish, try, say 1.0,0.2, 0.1
       sigmaclip -- clipping threshold for iterative sky determination
       iters -- number of iterations for iterative sky determination
    '''
    bands = ['r','g','b']
    # Color scaling
    if color_scaling == None:
        cfactor = {'r':1.,'g':1.0,'b':1.0}
    else:
        cfactor = {}
        for i,b in enumerate(bands):
            cfactor[b] = color_scaling[i]
    images = {'r':rimg, 'g':gimg, 'b':bimg}
    rescaled_img = {}
    for b in ['r','g','b']:
        mean, median, stddev = astats.sigma_clipped_stats(images[b],sigma=sigmaclip,iters=iters)
        imin = median-minsigma*stddev
        imax = np.percentile(images[b],maxpercentile)
        rescaled_img[b] = img_scale.asinh(images[b],scale_min=imin,scale_max=imax/cfactor[b],non_linear=nonlinear)
        print imin,imax
    rgbimg = np.zeros((rescaled_img['r'].shape[0],rescaled_img['r'].shape[1],3),dtype=np.float64)
    rgbimg[:,:,0]=rescaled_img['r']
    rgbimg[:,:,1]=rescaled_img['g']
    rgbimg[:,:,2]=rescaled_img['b']
    return rgbimg
def gaussian_fit_to_histogram(dataset):
    """ fit a gaussian function to the histogram of the given dataset
    :param dataset: a series of measurements that is presumed to be normally
       distributed, probably around a mean that is close to zero.
    :return: mean, mu and width, sigma of the gaussian model fit.
    """
    def gauss(x, mu, sigma):
        return np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))

    if np.ndim(dataset) == 2:
        musigma = np.array([gaussian_fit_to_histogram(dataset[:, i])
                            for i in range(np.shape(dataset)[1])])
        return musigma[:, 0], musigma[:, 1]

    dataset = dataset[np.isfinite(dataset)]
    ndatapoints = len(dataset)
    stdmean, stdmedian, stderr, = sigma_clipped_stats(dataset, sigma=5.0)
    nhistbins = max(10, int(ndatapoints / 10))
    histbins = np.linspace(stdmedian - 3 * stderr, stdmedian + 3 * stderr,
                           nhistbins)
    yhist, xhist = np.histogram(dataset, bins=histbins)
    binwidth = np.mean(np.diff(xhist))
    binpeak = float(np.max(yhist))
    param0 = [stdmedian, stderr]  # initial guesses for gaussian mu and sigma
    xval = xhist[:-1] + (binwidth / 2)
    yval = yhist / binpeak
    minparam, cov = curve_fit(gauss, xval, yval, p0=param0)
    mumin, sigmamin = minparam
    return mumin, sigmamin
def source_detection(ccd, fwhm=3.0, sigma=3.0, iters=5, threshold=5.0):
    """
    Returns an astropy table containing the position of sources within the image.

    Parameters
    ----------------

    ccd : numpy.ndarray
        The CCD Image array.

    fwhm : float, optional
        Full-width half-max of stars in the image.

    sigma : float, optional.
        The number of standard deviations to use as the lower and upper clipping limit.

    iters : int, optional
        The number of iterations to perform sigma clipping
    
    threshold : float, optional.
        The absolute image value above which to select sources.
    
    Returns
    -----------

    sources
        an astropy table of the positions of sources in the image.
    """
    data = ccd.data
    mean, median, std = sigma_clipped_stats(data, sigma=sigma, iters=iters)
    sources = daofind(data - median, fwhm=fwhm, threshold=threshold*std)
    return sources
Exemple #18
0
def measure_FWHM(data):
    '''Obtain the FWHM of some quantity in an event list.

    This function provides a robust measure of the FWHM of a quantity.
    It ignores outliers by first sigma clipping the input data, then it
    fits a Gaussian to the histogram of the sigma-clipped data.

    Even if the data is not Gaussian distributed, this should provide a good
    estimate of the real FWHM and it is a lot more stable then calculating the "raw" FWHM
    of the histrogram, which can depend sensitively on the bin size, if the max value is
    driven by noise.

    Parameters
    ----------
    data : np.array
        unbinned input data

    Results
    -------
    FWHM : float
        robust estimate for FWHM
    '''
    # Get an estimate of a sensible bin width for histogram
    mean, median, std = sigma_clipped_stats(data)
    n = len(data)
    hist, bin_edges = np.histogram(data, range=mean + np.array([-3, 3]) * std, bins = n/10)
    g_init = models.Gaussian1D(amplitude=hist.max(), mean=mean, stddev=std)
    fit_g = fitting.LevMarLSQFitter()
    g = fit_g(g_init, (bin_edges[:-1] + bin_edges[1:]) / 2., hist)
    return 2.3548 * g.stddev
def GetImage(image, mask):
    Passed = 0
    try:
        imageData = fits.getdata(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        hdr = fits.getheader(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        w = WCS(reducedpath+date+'.{:0>3}.reduced.fits'.format(image))
        Passed = 1
    except:
        print('Trying a different file name.')
        Passed = 0
        pass
    if Passed == 0:
        try:
            imageData = fits.getdata(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
            hdr = fits.getheader(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
            w = WCS(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image))
        except: raise OSError('We do not know that filename: %s'%(reducedpath+date+'.f{:0>3}.reduced.fits'.format(image)))

    # Parse the header to get the object name
    ObjName = hdr['OBJECT'].split(',')[0].split(' ')[0]
    band    = hdr['FILTER2']

    # Create the masked Image Data
    imageDataM = np.ma.array(imageData, mask=mask)

    # Computed the background levels
    mean, median, std = stats.sigma_clipped_stats(imageData, mask=mask, sigma=3.0)
    print('mean', 'median', 'std', 'BACKGROUND')
    print(mean, median, std)

    # Remove the background
    imageDataRed = imageDataM - median
    return imageDataRed, median, ObjName, band, std, hdr, w
Exemple #20
0
def simple_extract_order(hrs, y1, y2, binsum=1, median_filter_size=None, interp=False):
    """Simple_extract_order transforms the observed spectra into a square form, 
       extracts all of the pixels between y1 and y2, and then sums the results.

    Parameters
    ----------
    hrs: ~HRSOrder
        An HRSOrder with a calibrated wavelength property

    y1: int
        Lower limit to extract the hrs data.  It should be in terms of the
        rectangular represenation of the flattened order

    y2: int
        Upper limit to extract the hrs data.  It should be in terms of the
        rectangular represenation of the flattened order

    binsum: int
        Amount of increase the binning in wavelength space

    median_filter_size: None or int
        Size for median filter to be run on the data and remove the general
        shape of the spectrum


    Returns
    -------
    wavelength: ~numpy.ndarray
        1D representation of the wavelength

    flux: ~numpy.ndarray
        1D representation of the flux
    """
    
    #set up the initial wavelength and flux 
    data, coef = hrs.create_box(hrs.flux, interp=interp)
    wdata, coef = hrs.create_box(hrs.wavelength)

    w1 = hrs.wavelength[hrs.wavelength>0].min()
    w2 = hrs.wavelength.max()
    dw = binsum * (w2-w1)/(len(data[0]))
    warr = np.arange(w1, w2, dw)
    farr = np.zeros_like(warr)

    y,x = np.indices(data.shape)
    for i in range(len(warr)):
        mask = (wdata >= warr[i]-0.5*dw) * (wdata < warr[i]+0.5*dw)
        mask = mask * (y >= y1) * (y < y2)
        if np.any(data[mask].ravel()):
            farr[i] = stats.sigma_clipped_stats(1.0*data[mask].ravel())[0]

    if median_filter_size:
        sf = nd.filters.median_filter(farr, size=median_filter_size)
        farr = farr / sf 
 
    #step to clean up the spectrum
    mask = (warr > 0) * (farr > 0) 
    return warr[mask], farr[mask]
Exemple #21
0
 def  get_noise(fitsfile, vm=False):
     dat = fits.getdata(fitsfile)
     if np.sum(np.isfinite(dat))==0:
         return np.nan, np.nan, np.nan
     mean, median, stddev = sigma_clipped_stats(dat, sigma=5, iters=10)
     if not vm:
         return mean, median, stddev
     else:
         vmax=np.median(dat[dat>(mean+5*stddev)])
         return mean, median, stddev, vmax
Exemple #22
0
 def evaluate(self, data):
     mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5)
     daofind = DAOStarFinder(fwhm=3.0, threshold=5.*std)
     self.sources = daofind(data - median)
     for col in self.sources.colnames:
         self.sources[col].info.format = "%.8g"
     if self.num() > 0:
         for p in ("sharpness", "roundness1", "roundness2"):
             self.par[p] = self.calc(p)
     return self.sources
def manual_bg(data):
    #directly from photutils example
    mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5)
    threshold = median + (std * 2.)
    segm_img = detect_sources(data, threshold, npixels=5)
    #%% morphological
    mask = segm_img.data.astype(bool)
    strel = ones((3,3))
    dilated = binary_dilation(mask,strel)
    mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=dilated)
    #%% use mean or median for background? Let's do what sextractor does
    if (mean - median) / std > 0.3:
        bgval = median
    else:
        bgval = mean
    #%% background subtract
    dinf = iinfo(data.dtype)

    imgmask = masked_where(~dilated,100+data).clip(0,dinf.max).astype(data.dtype)
    datashift=(data-bgval).clip(0,dinf.max).astype(data.dtype)
#%%
    fg,axs = subplots(2,2)
    ax = axs[0,0]
    hi=ax.imshow(data,interpolation='none',cmap='gray')
    ax.imshow(imgmask,interpolation='none',cmap='gray')
    ax.set_title('original image with stars highlighted')
    fg.colorbar(hi,ax=ax)
    ax.grid(False)

    ax=axs[0,1]
    ax.imshow(dilated,interpolation='none')
    ax.set_title('dilated')
    ax.grid(False)

    ax = axs[1,0]
    hi=ax.imshow(datashift,interpolation='none',cmap='gray')
    ax.set_title('background subtracted image {:.3f}'.format(bgval))
    fg.colorbar(hi,ax=ax)
    ax.grid(False)

    fg.tight_layout()

    return datashift
Exemple #24
0
def find_stars(im):
    "Locate stars in an array, using DAOphot. Returns N x 2 array with xy positions. No magnitudes."
         
    mean, median, std = sigma_clipped_stats(im, sigma=3.0, iters=5)
    sources = daofind(im - median, fwhm=3.0, threshold=5.*std)
    x_phot = sources['xcentroid']
    y_phot = sources['ycentroid']
        
    points_phot = np.transpose((x_phot, y_phot)) # Create an array N x 2

    return points_phot
Exemple #25
0
    def snr(self, x, y, radius, data, fwhm_x, fwhm_y, flux, aperture_area):
        x0, y0, arr = self.cut_region(x, y, radius, data)
        mean, median, std = sigma_clipped_stats(arr, sigma=sigma)
        try:
            signal_to_noise = flux / math.sqrt(flux +
                                               aperture_area * math.pow(std, 2))
        except ValueError:
            signal_to_noise = 0
        print('snr:', signal_to_noise)

        return signal_to_noise, median
def find_2MASS_flux(array):
    # Identify which pixels have acceptable "background" levels. Start by
    # grabbing the image statistics
    mean, median, stddev = sigma_clipped_stats(array)

    # Idesntify pixels more than 2-sigma above the background
    fgdThresh = median + 2.0*stddev
    fgdRegion = array > fgdThresh

    # Repeat the classification withiout the *definitely* nebular pixels
    bkgPix = np.logical_not(fgdRegion)
    mean, median, stddev = sigma_clipped_stats(array[bkgPix])
    fgdThresh = median + 2.0*stddev
    fgdRegion = array > fgdThresh

    # Clean the foreground ID region
    all_labels  = measure.label(fgdRegion)
    all_labels1 = morphology.remove_small_objects(all_labels, min_size=50)
    fgdRegion   = all_labels1 > 0

    # Dilate a TOOON to be conservatine...
    sigma = 20.0 * gaussian_fwhm_to_sigma    # FWHM = 3.0

    # Build a kernel for detecting pixels above the threshold
    kernel = Gaussian2DKernel(sigma, x_size=41, y_size=41)
    kernel.normalize()
    fgdRegion= convolve_fft(
        fgdRegion.astype(float),
        kernel.array
    )
    fgdRegion = (fgdRegion > 0.01)

    # Expand a second time to be conservative
    fgdRegion= convolve_fft(
        fgdRegion.astype(float),
        kernel.array
    )
    fgdRegion = (fgdRegion > 0.01)

    # Return the flux-bright pixels to the user
    return fgdRegion
Exemple #27
0
def flag_hot_pixels(refbias_name):
    """Flag hotpixels in the DQ array

    Pixels more than (mean + 3*sigma) away from a median smoothed image
    are flagged as DQ=16 in the DQ array.

    .. note:: The input file is updated in-place.

    .. note::
      The IRAF version of this pipeline specified a 2x15 pixel median filter
      to calculate the smoothed imaged, but the IRAF task documentation says that
      even sizes are increased by 1 for the computation.  A 3x5 pixel filter is
      used directly here.

    Parameters
    ----------
    refbias_name : str
        name of the reference file to flag

    """

    with fits.open(refbias_name, mode='update') as refbias_hdu:
        smooth_bias = medfilt(refbias_hdu[('sci', 1)].data, (3, 15))

        smooth_bias_mean, smooth_bias_med, smooth_bias_std = sigma_clipped_stats(smooth_bias, sigma=3, iters=30)
        bias_mean, bias_median, bias_std = sigma_clipped_stats(refbias_hdu[('sci', 1)].data, sigma=3, iters=30)


        smooth_bias += (bias_mean - smooth_bias_mean)

        bias_residual = refbias_hdu[('sci', 1)].data - smooth_bias

        resid_mean, resid_median, resid_std = sigma_clipped_stats(bias_residual,
                                                               sigma=3,
                                                               iters=30)
        r_five_sigma = resid_mean + 5.0 * resid_std

        print('Updating DQ values of hot pixels above a level of ', r_five_sigma)
        refbias_hdu[('dq', 1)].data = np.where(bias_residual > r_five_sigma,
                                               16,
                                               refbias_hdu[('dq', 1)].data)
Exemple #28
0
def sigma_clipped_std(data, **kwargs):
    '''Return stddev of sigma-clipped data.

    Parameters
    ----------
    data : np.array
        unbinned input data
    kwargs : see `astropy.stats.sigma_clipped_stats`
        All keyword arguments are passed to `astropy.stats.sigma_clipped_stats`.
    '''
    mean, median, std = sigma_clipped_stats(data, **kwargs)
    return std
Exemple #29
0
def get_adaptive_aperture(target_stamp, return_snr=False, cutoff_value=1):
    aperture_pixels = dict()
    snr = dict()

    rgb_data = get_rgb_data(target_stamp)

    for color, i in zip('rgb', range(3)):
        color_data = rgb_data[i]

        # Get the background
        s_mean, s_med, s_std = sigma_clipped_stats(color_data.compressed())

        # Subtract background
        color_data = color_data - s_med

        # Get SNR of each pixel
        noise0 = np.sqrt(np.abs(color_data) + 10.5**2)
        snr0 = color_data / noise0

        # Weight each pixel according to SNR
        w0 = snr0**2 / (snr0).sum()
        weighted_snr = snr0 * w0

        weighted_sort_snr = np.sort(weighted_snr.flatten().filled(0))[::-1]
        weighted_sort_idx = np.argsort((weighted_snr).flatten().filled(0))[::-1]

        # Running sum of SNR
        snr_pixel_sum = np.cumsum(weighted_sort_snr)

        # Snip to first fourth
        snr_pixel_sum = snr_pixel_sum[:int(len(weighted_sort_snr) / 4)]

        # Use gradient to determine cutoff (to zero)
        snr_pixel_gradient = np.gradient(snr_pixel_sum)

        # Get gradient above cutoff value
        top_snr_gradient = snr_pixel_gradient[snr_pixel_gradient > cutoff_value]

        # Get the positions for the matching pixels
        best_pixel_idx = weighted_sort_idx[:len(top_snr_gradient)]

        # Get the original index position in the unflattened matrix
        aperture_pixels[color] = [idx
                                  for idx
                                  in zip(*np.unravel_index(best_pixel_idx,
                                                           color_data.shape))]

        snr[color] = (snr_pixel_sum, snr_pixel_gradient)

    if return_snr:
        return aperture_pixels, snr
    else:
        return aperture_pixels
Exemple #30
0
def make_residual(mean_bias, kern=(3, 15)):
    """Create residual image

    Median filter the median with a 15 x 3 box and subtract from the mean
    to produce the residual image.


    """
    mean_hdu = pyfits.open(mean_bias)
    mean_image = mean_hdu[('sci', 1)].data

    median_image = median_filter(mean_image, kern)

    medi_mean = sigma_clipped_stats(median_image, sigma=3, iters=40)[0]
    mean_mean = sigma_clipped_stats(mean_image, sigma=3, iters=40)[0]
    diffmean = mean_mean - medi_mean

    median_image += diffmean
    residual_image = mean_image - median_image

    return residual_image, median_image
def fit_2dgaussian(array,
                   crop=False,
                   cent=None,
                   cropsize=15,
                   fwhmx=4,
                   fwhmy=4,
                   theta=0,
                   threshold=False,
                   sigfactor=6,
                   full_output=False,
                   plot=True,
                   verbose=True):
    """ Fitting a 2D Gaussian to the 2D distribution of the data with photutils.
    
    Parameters
    ----------
    array : array_like
        Input frame with a single PSF.
    crop : {False, True}, optional
        If True an square sub image will be cropped.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage. 
        If None the center of the frame is used for cropping the subframe (the 
        PSF is assumed to be ~ at the center of the frame). 
    cropsize : int, optional
        Size of the subimage.
    fwhmx, fwhmy : float, optional
        Initial values for the standard deviation of the fitted Gaussian, in px.
    theta : float, optional
        Angle of inclination of the 2d Gaussian counting from the positive X
        axis.
    threshold : {False, True}, optional
        If True the background pixels will be replaced by small random Gaussian 
        noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Gaussian
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian 
        noise. 
    full_output : {False, True}, optional
        If False it returns just the centroid, if True also returns the 
        FWHM in X and Y (in pixels), the amplitude and the rotation angle.
    plot : {True, False}, optional
        If True, the function prints out parameters of the fit 
    plot : {True, False}, optional
        If True, the function plots the data and residuals with contours
        
    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting. 
    mean_x : float
        Source centroid x position on input array from fitting.
        
    If *full_output* is True it returns:
    mean_y, mean_x : floats
        Centroid. 
    fwhm_y : float
        FHWM in Y in pixels. 
    fwhm_x : float
        FHWM in X in pixels.
    amplitude : float
        Amplitude of the Gaussian.
    theta : float
        Rotation angle.
    
    """
    if not array.ndim == 2:
        raise TypeError('Input array is not a frame or 2d array')

    if crop:
        if cent is None:
            ceny, cenx = array.shape[1] // 2, array.shape[0] // 2
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              ceny,
                                              cenx,
                                              position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd  #*50
        psf_subimage[indi] = subimnoise[indi]

    yme, xme = np.where(psf_subimage == psf_subimage.max())
    # Creating the 2D Gaussian model
    gauss = models.Gaussian2D(amplitude=psf_subimage.max(),
                              x_mean=xme,
                              y_mean=yme,
                              x_stddev=fwhmx * gaussian_fwhm_to_sigma,
                              y_stddev=fwhmy * gaussian_fwhm_to_sigma,
                              theta=theta)
    # Levenberg-Marquardt algorithm
    fitter = LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(gauss, x, y, psf_subimage, maxiter=1000, acc=1e-08)

    if crop:
        mean_y = fit.y_mean.value + suby
        mean_x = fit.x_mean.value + subx
    else:
        mean_y = fit.y_mean.value
        mean_x = fit.x_mean.value
    fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
    fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
    amplitude = fit.amplitude.value
    theta = fit.theta.value

    if plot:
        plot_image_fit_residuals(psf_subimage,
                                 psf_subimage - fit(x, y),
                                 save=None)
    if verbose:
        print('FWHM_y =', fwhm_y)
        print('FWHM_x =', fwhm_x)
        print()
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_mean.value)
        print('centroid x subim =', fit.x_mean.value)
        print()
        print('peak =', amplitude)
        print('theta =', theta)

    if full_output:
        return pd.DataFrame({
            'centroid_y': mean_y,
            'centroid_x': mean_x,
            'fwhm_y': fwhm_y,
            'fwhm_x': fwhm_x,
            'amplitude': amplitude,
            'theta': theta
        })
    else:
        return mean_y, mean_x
Exemple #32
0
        for img in AimgList:
            # Grab the image timestamp
            thisDate = (img.header['DATE-OBS'])[0:10]
            thisDate = datetime.datetime.strptime(thisDate, "%Y-%m-%d")
            thisTime = (img.header['UTCSTART'])
            h, m, s  = thisTime.split(':')
            timeStp  = thisDate + datetime.timedelta(
                hours=int(h),
                minutes=int(m),
                seconds=(float(s) + 0.5*img.header['EXPTIME']))
            AimgTimes.append(timeStp.timestamp())

            # Grab the image sky value
            ny, nx = img.arr.shape
            subSampled = (img.arr[0:ny:10, 0:nx:10]).flatten()
            mean, median, stddev = sigma_clipped_stats(subSampled)
            AimgSky.append(mean)


        # Convert the image time and sky values to arrays
        AimgTimes = np.array(AimgTimes)
        AimgSky   = np.array(AimgSky)

        # Cleanup memory
        del AimgList

        # Read in the A images
        BimgList = np.array([AstroImage(file1) for file1 in Bgroup['Filename'].data])

        # Grab the timestamp for each image in the list
        BimgTimes = []
Exemple #33
0
def analyse():
    completed = 0
    textOutput.append("Analysing...")
    textOutput.append(
        "-----------------------------------------------------------------------------"
    )

    for field in header:
        textOutput.append(
            str(field) + ":  \t " + str(header[field]) + "\t //" +
            str(header.comments[field]))
        completed += 0.5
        progress.setValue(completed)
        time.sleep(0.02)

    textOutput.append(
        "-----------------------------------------------------------------------------"
    )
    canvas.plotFitLog()
    completed += 5
    progress.setValue(completed)
    time.sleep(1)

    hist.plotHist(10000)
    completed += 5
    progress.setValue(completed)

    hist.plotHist(159999)
    completed += 5
    progress.setValue(completed)

    textOutput.append(str('Min:') + str(np.min(image_data)))
    textOutput.append(str('Max:') + str(np.max(image_data)))
    textOutput.append(str('Mean:') + str(np.mean(image_data)))
    textOutput.append(str('Stdev:') + str(np.std(image_data)))

    mean, median, std = sigma_clipped_stats(image_data, sigma=3.0, iters=5)
    noiseLevel = mean

    completed = 45
    progress.setValue(completed)

    #canvas.plotApertures()

    mean, median, std = sigma_clipped_stats(image_data, sigma=3.0, iters=5)
    daofind = DAOStarFinder(fwhm=3.0, threshold=5. * std)
    sources = daofind(image_data - median)

    completed = 47
    progress.setValue(completed)

    canvas.addPicture()
    pix_coords = []
    for point in sources:
        pix_coords.append((point['xcentroid'], point['ycentroid']))
    w = WCS(hdu.header)
    world_coords = w.wcs_pix2world(pix_coords, 0)
    sky_coords = coordinates.SkyCoord(world_coords * u.deg, frame='fk4')
    sr = 0.2 * u.degree
    num = 0

    completed = 50
    progress.setValue(completed)
    res = []
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')

    for cord in sky_coords:
        canvas.plotPoint(pix_coords[num][0], pix_coords[num][1], 'red')
        if (completed < 99):
            completed += 1
            progress.setValue(completed)
            time.sleep(0.3)
            completed += 1
            progress.setValue(completed)
        textOutput.append(
            "-----------------------------------------------------------------------------"
        )
        textOutput.append('Found star #' + str(num + 1))
        textOutput.append('Constelation name: ' + cord.get_constellation())
        textOutput.append(str(cord))
        result = conesearch.conesearch(
            cord,
            sr,
            catalog_db=
            'The HST Guide Star Catalog, Version 1.2 (Lasker+ 1996) 1')
        res.append(result)
        textOutput.append('Number of stars from Catalogue: ' +
                          str(len(result.array.data)))
        textOutput.append(str(result.array.dtype.names))
        textOutput.append(
            str(result.array.data[result.array.data['Pmag'].argmax()]))
        textOutput.append("My mag: " + str(sources[num]['mag']) +
                          " Catalog mag: " +
                          str(result.array.data['Pmag'].max()))
        canvas.plotPoint(pix_coords[num][0], pix_coords[num][1], 'lightgreen')
        num = num + 1

    num = 0
    for cord in sky_coords:
        x = int(pix_coords[num][1])
        y = int(pix_coords[num][0])
        peak = image_data[x][y]
        if (getMyMagnitude(image_data, x, y, peak) == 71437):
            mags.append((getMyMagnitude(image_data, x, y, peak),
                         0.5 + res[num].array.data['Pmag'].max()))
        elif (getMyMagnitude(image_data, x, y, peak) == 10200):
            mags.append(
                (getMyMagnitude(image_data, x, y,
                                peak), -1 + res[num].array.data['Pmag'].max()))
        else:
            mags.append(
                (getMyMagnitude(image_data, x, y,
                                peak), res[num].array.data['Pmag'].max()))
        print("Peak: " + str(peak) + " " + str(x) + " " + str(y) +
              " My mag: " + str(getMyMagnitude(image_data, x, y, peak)) +
              " Catalog mag: " + str(res[num].array.data['Pmag'].max()))
        num = num + 1
    magsorted = sorted(mags, key=lambda x: x[0])
    plt.plot([x[0] for x in magsorted], [x[1] for x in magsorted])
    plt.show()
    completed = 100
    progress.setValue(completed)
Exemple #34
0
#skymask = thismask & (edgmask == False)
objmask = np.zeros_like(thismask)

thisimg = image * thismask * mask
#  Smash the image (for this slit) into a single flux vector.  How many pixels wide is the slit at each Y?
xsize = slit_righ - slit_left
nsamp = np.ceil(np.median(xsize))
# Mask skypixels with 2 FWHM of edge
left_asym = np.outer(slit_left, np.ones(int(nsamp))) + np.outer(
    xsize / nsamp, np.arange(nsamp))
righ_asym = left_asym + np.outer(xsize / nsamp, np.ones(int(nsamp)))
# This extract_asymbox2 call smashes the image in the spectral direction along the curved object traces
flux_spec = extract_asymbox2(thisimg, left_asym, righ_asym)
flux_mean, flux_median, flux_sig = sigma_clipped_stats(flux_spec,
                                                       axis=0,
                                                       sigma=4.0)
if (nsamp < 9.0 * FWHM):
    fluxsub = flux_mean.data - np.median(flux_mean.data)
else:
    kernel_size = int(np.ceil(BG_SMTH * FWHM) // 2 * 2 +
                      1)  # This ensure kernel_size is odd
    fluxsub = flux_mean.data - scipy.signal.medfilt(flux_mean.data,
                                                    kernel_size=kernel_size)
    # This little bit below deals with degenerate cases for which the slit gets brighter toward the edge, i.e. when
    # alignment stars saturate and bleed over into other slits. In this case the median smoothed profile is the nearly
    # everywhere the same as the profile itself, and fluxsub is full of zeros (bad!). If 90% or more of fluxsub is zero,
    # default to use the unfiltered case
    isub_bad = (fluxsub == 0.0)
    frac_bad = np.sum(isub_bad) / nsamp
    if frac_bad > 0.9:
Exemple #35
0
def approx_stellar_position(cube, fwhm, return_test=False, verbose=False):
    """FIND THE APPROX COORDS OF THE STAR IN EACH CHANNEL (even the ones
    dominated by noise)

    Parameters
    ----------
    obj_tmp : array_like
        Input 3d cube
    fwhm : float or array 1D
        Input full width half maximum value of the PSF for each channel.
        This will be used as the standard deviation for Gaussian kernel
        of the Gaussian filtering.
        If float, it is assumed the same for all channels.
    return_test: bool, optional
        Whether the test result vector (a bool vector with whether the star
        centroid could be find in the corresponding channel) should be returned
        as well, along with the approx stellar coordinates.
    verbose: bool, optional
        Chooses whether to print some additional information.

    Returns:
    --------
    Array of y and x approx coordinates of the star in each channel of the cube
    if return_test: it also returns the test result vector
    """
    from ..metrics import peak_coordinates

    obj_tmp = cube.copy()
    n_z = obj_tmp.shape[0]

    if isinstance(fwhm, float) or isinstance(fwhm, int):
        fwhm_scal = fwhm
        fwhm = np.zeros((n_z))
        fwhm[:] = fwhm_scal

    # 1/ Write a 2-columns array with indices of all max pixel values in the cube
    star_tmp_idx = np.zeros([n_z, 2])
    star_approx_idx = np.zeros([n_z, 2])
    test_result = np.ones(n_z)
    for zz in range(n_z):
        star_tmp_idx[zz] = peak_coordinates(obj_tmp[zz], fwhm[zz])

    # 2/ Detect the outliers in each column
    _, med_y, stddev_y = sigma_clipped_stats(star_tmp_idx[:, 0], sigma=2.5)
    _, med_x, stddev_x = sigma_clipped_stats(star_tmp_idx[:, 1], sigma=2.5)
    lim_inf_y = med_y - 3 * stddev_y
    lim_sup_y = med_y + 3 * stddev_y
    lim_inf_x = med_x - 3 * stddev_x
    lim_sup_x = med_x + 3 * stddev_x

    if verbose:
        print("median y of star - 3sigma = ", lim_inf_y)
        print("median y of star + 3sigma = ", lim_sup_y)
        print("median x of star - 3sigma = ", lim_inf_x)
        print("median x of star + 3sigma = ", lim_sup_x)

    for zz in range(n_z):
        if ((star_tmp_idx[zz, 0] < lim_inf_y) or (
                star_tmp_idx[zz, 0] > lim_sup_y) or
                (star_tmp_idx[zz, 1] < lim_inf_x) or (
                        star_tmp_idx[zz, 1] > lim_sup_x)):
            test_result[zz] = 0

    # 3/ Replace by the median of neighbouring good coordinates if need be
    for zz in range(n_z):
        if test_result[zz] == 0:
            ii = 1
            inf_neigh = max(0, zz - ii)
            sup_neigh = min(n_z - 1, zz + ii)
            while test_result[inf_neigh] == 0 and test_result[sup_neigh] == 0:
                ii = ii + 1
                inf_neigh = max(0, zz - ii)
                sup_neigh = min(n_z - 1, zz + ii)
            if test_result[inf_neigh] == 1 and test_result[sup_neigh] == 1:
                star_approx_idx[zz] = np.floor((star_tmp_idx[sup_neigh] + \
                                                star_tmp_idx[inf_neigh]) / 2)
            elif test_result[inf_neigh] == 1:
                star_approx_idx[zz] = star_tmp_idx[inf_neigh]
            else:
                star_approx_idx[zz] = star_tmp_idx[sup_neigh]
        else:
            star_approx_idx[zz] = star_tmp_idx[zz]

    if return_test:
        return star_approx_idx, test_result.astype(bool)
    else:
        return star_approx_idx
Exemple #36
0
def fit_2dairydisk(array,
                   crop=False,
                   cent=None,
                   cropsize=15,
                   fwhm=4,
                   threshold=False,
                   sigfactor=6,
                   full_output=True,
                   debug=True):
    """ Fitting a 2D Airy to the 2D distribution of the data.

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True a square sub image will be cropped equal to cropsize.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage.
        If None the center of the frame is used for cropping the subframe (the
        PSF is assumed to be ~ at the center of the frame).
    cropsize : int, optional
        Size of the subimage.
    fwhm : float, optional
        Initial values for the FWHM of the fitted 2d Airy disk, in px.
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Moffat
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
    'amplitude' : Float value. Moffat Amplitude.
    'centroid_x' : Float value. X coordinate of the centroid.
    'centroid_y' : Float value. Y coordinate of the centroid.
    'fwhm' : Float value. FHWM [px].

    """
    check_array(array, dim=2, msg='array')

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              ceny,
                                              cenx,
                                              position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    # Creating the 2d Airy disk model
    init_amplitude = np.ptp(psf_subimage)
    xcom, ycom = cen_com(psf_subimage)
    diam_1st_zero = (fwhm * 2.44) / 1.028
    airy = models.AiryDisk2D(amplitude=init_amplitude,
                             x_0=xcom,
                             y_0=ycom,
                             radius=diam_1st_zero / 2.)
    # Levenberg-Marquardt algorithm
    fitter = fitting.LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(airy, x, y, psf_subimage)

    if crop:
        mean_y = fit.y_0.value + suby
        mean_x = fit.x_0.value + subx
    else:
        mean_y = fit.y_0.value
        mean_x = fit.x_0.value

    amplitude = fit.amplitude.value
    radius = fit.radius.value
    fwhm = ((radius * 1.028) / 2.44) * 2

    # compute uncertainties
    if fitter.fit_info['param_cov'] is not None:
        perr = np.sqrt(np.diag(fitter.fit_info['param_cov']))
        amplitude_err, mean_x_err, mean_y_err, radius_err = perr
        fwhm_err = ((radius_err * 1.028) / 2.44) * 2
    else:
        amplitude_err, mean_x_err, mean_y_err = None, None, None
        radius_err, fwhm_err = None, None

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage - fit(x, y)),
                    grid=True,
                    grid_spacing=1,
                    label=label)
        print('FWHM =', fwhm)
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_0.value)
        print('centroid x subim =', fit.x_0.value, '\n')
        print('amplitude =', amplitude)
        print('radius =', radius)

    if full_output:
        return pd.DataFrame(
            {
                'centroid_y': mean_y,
                'centroid_x': mean_x,
                'fwhm': fwhm,
                'radius': radius,
                'amplitude': amplitude,
                'centroid_y_err': mean_y_err,
                'centroid_x_err': mean_x_err,
                'fwhm_err': fwhm_err,
                'radius_err': radius_err,
                'amplitude_err': amplitude_err
            },
            index=[0])
    else:
        return mean_y, mean_x
Exemple #37
0
    def catalogue(self):

        imagedir = self.filesdir + Constants.working_directory + Constants.image_directory

        file = None

        if not self.has_sets:
            file = Constants.reduced_prefix + self.image_names + "0001" + Constants.fits_extension
        else:
            file = Constants.reduced_prefix + self.image_names + "_1_001" + Constants.fits_extension

        if not self.has_sets:
            self.n_sets = 1

        for i in range(1, self.n_sets + 1):

            image_data = fits.getdata(imagedir + file, ext=0)
            mean, median, std = sigma_clipped_stats(image_data,
                                                    sigma=3.0,
                                                    iters=5)

            sources = self.find_stars(image_data, std)

            self.convert_to_ra_and_dec(sources, imagedir + file)
            if self.has_sets:
                filepath = self.filesdir + Constants.working_directory + Constants.catalogue_prefix + self.image_names + "_" + str(
                    i) + Constants.standard_file_extension
            else:
                filepath = self.filesdir + Constants.working_directory + Constants.catalogue_prefix + self.image_names + Constants.standard_file_extension

            sources.write(filepath,
                          format=Constants.table_format,
                          overwrite=True)
            self.make_reg_file(sources)

            if not self.has_sets:
                file = Constants.reduced_prefix + self.image_names + "000" + str(
                    i) + Constants.fits_extension
            else:
                file = Constants.reduced_prefix + self.image_names + "_" + str(
                    i) + "_001" + Constants.fits_extension

        times = self.filesdir + "workspace/" + Constants.time_file
        if (os.path.exists(times)):
            open(times, "w").close()

        file = imagedir + file
        i = 2
        set = 1

        while (os.path.exists(file)):
            print(times)
            self.add_times(times, fits.getheader(file))
            print(i)
            if not self.has_sets:
                file = imagedir + Constants.reduced_prefix + self.image_names + "000" + str(
                    i) + ".fits"

            else:

                if i > self.set_size:
                    i = 1
                    set += 1

                file = imagedir + Constants.reduced_prefix + self.image_names + "_" + str(
                    set) + "_" + Utilities.format_index(
                        i) + Constants.fits_extension

            i += 1
Exemple #38
0
def psfphotometry(imagefile,
                  ra=None,
                  dec=None,
                  x=None,
                  y=None,
                  fwhm=5.0,
                  zp=0.0,
                  gain=1.0,
                  doDifferential=False,
                  xfield=None,
                  yfield=None,
                  xfirst=None,
                  yfirst=None):

    hdulist = fits.open(imagefile)
    header = fits.getheader(imagefile)

    if x == None:
        w = WCS(header)
        x0, y0 = w.wcs_world2pix(ra, dec, 1)
        gain = 1.0
    else:
        x0, y0 = x, y

    if len(hdulist) > 3:
        image = hdulist[1].data
    elif len(hdulist) == 2:
        image = hdulist[0].data
    else:
        image = hdulist[0].data
    image_shape = image.shape

    #daogroup = DAOGroup(crit_separation=8)
    daogroup = DAOGroup(crit_separation=25)

    mmm_bkg = MMMBackground()
    #iraffind = IRAFStarFinder(threshold=2.0*mmm_bkg(image),
    #                          fwhm=4.0)
    fitter = LevMarLSQFitter()
    gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
    gaussian_prf.sigma.fixed = False
    gaussian_prf.flux.fixed = False

    psffile = imagefile.replace(".fits", ".psf")
    fid = open(psffile, 'w')

    if len(image_shape) == 3:

        nhdu, xshape, yshape = image.shape
        dateobs = utcparser(hdulist[0].header["UTCSTART"])
        mjd = dateobs.mjd

        if "KINCYCTI" in hdulist[0].header:
            mjdall = mjd + np.arange(
                nhdu) * hdulist[0].header["KINCYCTI"] / 86400.0
        else:
            mjdall = mjd + np.arange(
                nhdu) * hdulist[0].header["EXPTIME"] / 86400.0

        mjds, mags, magerrs, fluxes, fluxerrs = [], [], [], [], []
        for jj in range(nhdu):
            if np.mod(jj, 10) == 0:
                print('PSF fitting: %d/%d' % (jj, nhdu))

            image = hdulist[0].data[jj, :, :]
            mjd = mjdall[jj]

            n, median, std = sigma_clipped_stats(image, sigma=3.0)
            daofind = DAOStarFinder(fwhm=2.0, threshold=2. * std)

            #phot_obj = IterativelySubtractedPSFPhotometry(finder=daofind,
            #                                              group_maker=daogroup,
            #                                              bkg_estimator=mmm_bkg,
            #                                              psf_model=gaussian_prf,
            #                                              fitter=fitter,
            #                                              fitshape=(21, 21),
            #                                              niters=10)

            image = image - np.median(image)
            image_slice = np.zeros(image.shape)

            slsize = 25
            xmin = np.max([0, int(x0 - slsize)])
            xmax = np.min([int(x0 + slsize), image.shape[0]])
            ymin = np.max([0, int(y0 - slsize)])
            ymax = np.min([int(y0 + slsize), image.shape[1]])
            image_slice[ymin:ymax, xmin:xmax] = 1

            if doDifferential:
                xmin_f = np.max([0, int(xfield - slsize)])
                xmax_f = np.min([int(xfield + slsize), image.shape[0]])
                ymin_f = np.max([0, int(yfield - slsize)])
                ymax_f = np.min([int(yfield + slsize), image.shape[1]])
                image_slice[ymin_f:ymax_f, xmin_f:xmax_f] = 1

            image = image * image_slice

            if (xfirst is None) or (yfirst is None):
                phot_obj = BasicPSFPhotometry(finder=daofind,
                                              group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(21, 21),
                                              bkg_estimator=mmm_bkg)
                phot_results = phot_obj(image)
            else:
                gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
                gaussian_prf.sigma.fixed = False
                gaussian_prf.flux.fixed = False
                gaussian_prf.x_0.fixed = False
                gaussian_prf.y_0.fixed = False

                phot_obj = BasicPSFPhotometry(group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(21, 21),
                                              bkg_estimator=mmm_bkg)

                pos = Table(names=['x_0', 'y_0'],
                            data=[[xfirst, xfield], [yfirst, yfield]])
                phot_results_tmp = phot_obj(image, init_guesses=pos)
                resimage = phot_obj.get_residual_image()

                pos = Table(names=['x_0', 'y_0'], data=[[x0], [y0]])

                gaussian_prf = IntegratedGaussianPRF(flux=1, sigma=1.7)
                gaussian_prf.sigma.fixed = False
                gaussian_prf.flux.fixed = False
                gaussian_prf.x_0.fixed = True
                gaussian_prf.y_0.fixed = True

                phot_obj = BasicPSFPhotometry(group_maker=daogroup,
                                              psf_model=gaussian_prf,
                                              fitter=fitter,
                                              fitshape=(7, 7),
                                              bkg_estimator=mmm_bkg)

                phot_results = phot_obj(resimage, init_guesses=pos)

                phot_results = vstack([phot_results_tmp, phot_results])

            #if True:
            if False:
                #sources = iraffind(image)
                sources = daofind(image)
                import matplotlib.pyplot as plt

                positions = np.transpose(
                    (sources['ycentroid'], sources['xcentroid']))
                apertures = CircularAperture(positions, r=4.)
                fig, axs = plt.subplots(1, 2)
                plt.axes(axs[0])
                plt.imshow(image.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(image[image > 0], 10),
                           vmax=np.percentile(image[image > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin, ymax])
                plt.ylim([xmin, xmax])

                resimage = phot_obj.get_residual_image()
                plt.axes(axs[1])
                plt.imshow(resimage.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=0,
                           vmax=np.percentile(resimage[resimage > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin, ymax])
                plt.ylim([xmin, xmax])
                plt.savefig('test_%d.png' % jj)
                plt.close()

                fig, axs = plt.subplots(1, 2)
                plt.axes(axs[0])
                plt.imshow(image.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(image[image > 0], 10),
                           vmax=np.percentile(image[image > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin_f, ymax_f])
                plt.ylim([xmin_f, xmax_f])

                resimage = phot_obj.get_residual_image()
                plt.axes(axs[1])
                plt.imshow(resimage.T,
                           origin='lower',
                           cmap='viridis',
                           aspect=1,
                           interpolation='nearest',
                           vmin=np.percentile(resimage[resimage > 0], 10),
                           vmax=np.percentile(resimage[resimage > 0], 90))
                apertures.plot(color='red')
                plt.xlim([ymin_f, ymax_f])
                plt.ylim([xmin_f, xmax_f])
                plt.savefig('test_f_%d.png' % jj)
                plt.close()

            #phot_results.pprint_all()

            #print(stop)

            dist = np.sqrt((phot_results["x_fit"] - x0)**2 +
                           (phot_results["y_fit"] - y0)**2)
            idx = np.argmin(dist)
            flux = phot_results[idx]["flux_fit"]
            fluxerr = phot_results[idx]["flux_unc"]
            magerr = 1.0857 * fluxerr / flux  #1.0857 = 2.5/log(10)
            mag = zp - 2.5 * np.log10(flux)

            if doDifferential:
                dist = np.sqrt((phot_results["x_fit"] - xfield)**2 +
                               (phot_results["y_fit"] - yfield)**2)
                idy = np.argmin(dist)
                flux_field = phot_results[idy]["flux_fit"]
                fluxerr_field = phot_results[idy]["flux_unc"]
                magerr_field = 1.0857 * fluxerr_field / flux_field  #1.0857 = 2.5/log(10)
                mag_field = zp - 2.5 * np.log10(flux_field)

                mag = mag - mag_field
                magerr = np.sqrt(magerr**2 + magerr_field**2)
                fluxerr = np.sqrt((fluxerr / flux)**2 +
                                  (fluxerr_field / flux_field)**2)
                flux = flux / flux_field
                fluxerr = flux * fluxerr

            #print(phot_results[idy]["flux_fit"], phot_results[idx]["flux_fit"])

            mjds.append(mjd)
            mags.append(mag)
            magerrs.append(magerr)
            fluxes.append(flux)
            fluxerrs.append(fluxerr)

            fid.write('%.5f %.5f %.5f %.5f %.5f\n' %
                      (dateobs.mjd, mag, magerr, flux, fluxerr))
        fid.close()

        return np.array(mjds), np.array(mags), np.array(magerrs), np.array(
            fluxes), np.array(fluxerrs)

    else:
        mjds, mags, magerrs, fluxes, fluxerrs = [], [], [], [], []
        for ii, hdu in enumerate(hdulist):
            if ii == 0: continue
            header = hdulist[ii].header
            image = hdulist[ii].data
            if not "DATE" in header:
                print("Warning: 'DATE missing from %s hdu %d/%d" %
                      (imagefile, ii, len(hdulist)))
                continue

            dateobs = Time(header["DATE"])

            phot_results = phot_obj(image)

            dist = np.sqrt((phot_results["x_fit"] - x0)**2 +
                           (phot_results["y_fit"] - y0)**2)
            idx = np.argmin(dist)
            flux = phot_results[idx]["flux_fit"]
            fluxerr = phot_results[idx]["flux_unc"]
            magerr = 1.0857 * fluxerr / flux  #1.0857 = 2.5/log(10)
            mag = zp - 2.5 * np.log10(flux)

            mjds.append(dateobs.mjd)
            mags.append(mag)
            magerrs.append(magerr)
            fluxes.append(flux)
            fluxerrs.append(fluxerr)

            fid.write('%.5f %.5f %.5f %.5f %.5f\n' %
                      (dateobs.mjd, mag, magerr, flux, fluxerr))
        fid.close()

        return np.array(mjds), np.array(mags), np.array(magerrs), np.array(
            fluxes), np.array(fluxerrs)
Exemple #39
0


### test segmentation
import numpy as np
from photutils import datasets
hdu = datasets.load_star_image()
data = hdu.data[0:400, 0:400]
image = hdu.data.astype(float)
image -= np.median(image)

from photutils import daofind
from astropy.stats import mad_std
from astropy.stats import sigma_clipped_stats
bkg_sigma = mad_std(image)
mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5)
print(mean, median, std)
sources = daofind(image, fwhm=4.0, threshold=3.0*bkg_sigma)
print(sources)

from photutils import CircularAperture
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
import matplotlib.pylab as plt
positions = (sources['xcentroid'], sources['ycentroid'])
apertures = CircularAperture(positions, r=4.)
norm = ImageNormalize(stretch=SqrtStretch())
plt.imshow(data, cmap='Greys', origin='lower', norm=norm)
apertures.plot(color='blue', lw=1.5, alpha=0.5)

#
Exemple #40
0
def make_mask(filename,
              ext,
              trail_coords,
              sublen=75,
              subwidth=200,
              order=3,
              sigma=4,
              pad=10,
              plot=False,
              verbose=False):
    """Create DQ mask for an image for a given satellite trail.
    This mask can be added to existing DQ data using :func:`update_dq`.

    .. note::

        Unlike :func:`detsat`, multiprocessing is not available for
        this function.

    Parameters
    ----------
    filename : str
        FITS image filename.

    ext : int, str, or tuple
        Extension for science data, as accepted by ``astropy.io.fits``.

    trail_coords : ndarray
        One of the trails returned by :func:`detsat`.
        This must be in the format of ``[[x0, y0], [x1, y1]]``.

    sublen : int, optional
        Length of strip to use as the fitting window for the trail.

    subwidth : int, optional
        Width of box to fit trail on.

    order : int, optional
        The order of the spline interpolation for image rotation.
        See :func:`skimage.transform.rotate`.

    sigma : float, optional
        Sigma of the satellite trail for detection. If points are
        a given sigma above the background in the subregion then it is
        marked as a satellite. This may need to be lowered for resolved
        trails.

    pad : int, optional
        Amount of extra padding in pixels to give the satellite mask.

    plot : bool, optional
        Plot the result.

    verbose : bool, optional
        Print extra information to the terminal, mostly for debugging.

    Returns
    -------
    mask : ndarray
        Boolean array marking the satellite trail with `True`.

    Raises
    ------
    ImportError
        Missing scipy or skimage>=0.11 packages.

    IndexError
        Invalid subarray indices.

    ValueError
        Image has no positive values, trail subarray too small, or
        trail profile not found.

    """
    if not HAS_OPDEP:
        raise ImportError('Missing scipy or skimage>=0.11 packages')

    if verbose:
        t_beg = time.time()

    fname = '{0}[{1}]'.format(filename, ext)
    image = fits.getdata(filename, ext)

    dx = image.max()
    if dx <= 0:
        raise ValueError('Image has no positive values')

    # rescale the image
    image = image / dx
    # make sure everything is at least 0
    image[image < 0] = 0

    (x0, y0), (x1, y1) = trail_coords  # p0, p1

    #  Find out how much to rotate the image
    rad = np.arctan2(y1 - y0, x1 - x0)
    newrad = (np.pi * 2) - rad
    deg = np.degrees(rad)

    if verbose:
        print('Rotation: {0}'.format(deg))

    rotate = transform.rotate(image, deg, resize=True, order=order)

    if plot and plt is not None:
        plt.ion()
        mean = np.median(image)
        stddev = image.std()
        lower = mean - stddev
        upper = mean + stddev

        fig1, ax1 = plt.subplots()
        ax1.imshow(image, vmin=lower, vmax=upper, cmap=plt.cm.gray)
        ax1.set_title(fname)

        fig2, ax2 = plt.subplots()
        ax2.imshow(rotate, vmin=lower, vmax=upper, cmap=plt.cm.gray)
        ax2.set_title('{0} rotated by {1} deg'.format(fname, deg))

        plt.draw()

    #  Will do all of this in the loop, but want to make sure there is a
    #  good point first and that there is indeed a profile to fit.
    #  get starting point
    sx, sy = _rotate_point((x0, y0), newrad, image.shape, rotate.shape)

    #  start with one subarray around p0
    dx = int(subwidth / 2)
    ix0, ix1, iy0, iy1 = _get_valid_indices(rotate.shape, sx - dx, sx + dx,
                                            sy - sublen, sy + sublen)
    subr = rotate[iy0:iy1, ix0:ix1]
    if len(subr) <= sublen:
        raise ValueError('Trail subarray size is {0} but expected {1} or '
                         'larger'.format(len(subr), sublen))

    # Flatten the array so we are looking along rows
    # Take median of each row, should filter out most outliers
    # This list will get appended in the loop
    medarr = np.median(subr, axis=1)
    flat = [medarr]

    # get the outliers
    #mean = biweight_location(medarr)
    mean = sigma_clipped_stats(medarr)[0]
    stddev = biweight_midvariance(medarr)

    # only flag things that are sigma from the mean
    z = np.where(medarr > (mean + (sigma * stddev)))[0]

    if plot and plt is not None:
        fig1, ax1 = plt.subplots()
        ax1.plot(medarr, 'b.')
        ax1.plot(z, medarr[z], 'r.')
        ax1.set_xlabel('Index')
        ax1.set_ylabel('Value')
        ax1.set_title('Median array in flat[0]')
        plt.draw()

    # Make sure there is something in the first pass before trying to move on
    if len(z) < 1:
        raise ValueError('First look at finding a profile failed. '
                         'Nothing found at {0} from background! '
                         'Adjust parameters and try again.'.format(sigma))

    # get the bounds of the flagged points
    lower = z.min()
    upper = z.max()
    diff = upper - lower

    # add in a pading value to make sure all of the wings are accounted for
    lower = lower - pad
    upper = upper + pad

    # for plotting see how the profile was made (append to plot above)
    if plot and plt is not None:
        padind = np.arange(lower, upper)
        ax1.plot(padind, medarr[padind], 'yx')
        plt.draw()

    # start to create a mask
    mask = np.zeros(rotate.shape)
    lowerx, upperx, lowery, uppery = _get_valid_indices(
        mask.shape, np.floor(sx - subwidth), np.ceil(sx + subwidth),
        np.floor(sy - sublen + lower), np.ceil(sy - sublen + upper))
    mask[lowery:uppery, lowerx:upperx] = 1

    done = False
    first = True
    nextx = upperx  # np.ceil(sx + subwidth)
    centery = np.ceil(lowery + diff)  # np.ceil(sy - sublen + lower + diff)
    counter = 0

    while not done:
        # move to the right of the centerpoint first. do the same
        # as above but keep moving right until the edge is hit.
        ix0, ix1, iy0, iy1 = _get_valid_indices(rotate.shape, nextx - dx,
                                                nextx + dx, centery - sublen,
                                                centery + sublen)
        subr = rotate[iy0:iy1, ix0:ix1]

        # determines the edge, if the subr is not good, then the edge was
        # hit.
        if 0 in subr.shape:
            if verbose:
                print('Hit edge, subr shape={0}, first={1}'.format(
                    subr.shape, first))
            if first:
                first = False
                centery = sy
                nextx = sx
            else:
                done = True
            continue

        medarr = np.median(subr, axis=1)
        flat.append(medarr)

        #mean = biweight_location(medarr)
        mean = sigma_clipped_stats(medarr, sigma=sigma)[0]
        stddev = biweight_midvariance(medarr)  # Might give RuntimeWarning
        z = np.where(medarr > (mean + (sigma * stddev)))[0]

        if len(z) < 1:
            if first:
                if verbose:
                    print('No good profile found for counter={0}. Start '
                          'moving left from starting point.'.format(counter))
                centery = sy
                nextx = sx
                first = False
            else:
                if verbose:
                    print('z={0} is less than 1, subr shape={1}, '
                          'we are done'.format(z, subr.shape))
                done = True
            continue

        # get the bounds of the flagged points
        lower = z.min()
        upper = z.max()
        diff = upper - lower

        # add in a pading value to make sure all of the wings
        # are accounted for
        lower = np.floor(lower - pad)
        upper = np.ceil(upper + pad)
        lowerx, upperx, lowery, uppery = _get_valid_indices(
            mask.shape, np.floor(nextx - subwidth), np.ceil(nextx + subwidth),
            np.floor(centery - sublen + lower),
            np.ceil(centery - sublen + upper))
        mask[lowery:uppery, lowerx:upperx] = 1

        lower_p = (lowerx, lowery)
        upper_p = (upperx, uppery)
        lower_t = _rotate_point(lower_p,
                                newrad,
                                image.shape,
                                rotate.shape,
                                reverse=True)
        upper_t = _rotate_point(upper_p,
                                newrad,
                                image.shape,
                                rotate.shape,
                                reverse=True)

        lowy = np.floor(lower_t[1])
        highy = np.ceil(upper_t[1])
        lowx = np.floor(lower_t[0])
        highx = np.ceil(upper_t[0])

        # Reset the next subr to be at the center of the profile
        if first:
            nextx = nextx + dx
            centery = lowery + diff  # centery - sublen + lower + diff

            if (nextx + subwidth) > rotate.shape[1]:
                if verbose:
                    print('Hit rotate edge at counter={0}'.format(counter))
                first = False
            elif (highy > image.shape[0]) or (highx > image.shape[1]):
                if verbose:
                    print('Hit image edge at counter={0}'.format(counter))
                first = False

            if not first:
                centery = sy
                nextx = sx

        # Not first, this is the pass the other way.
        else:
            nextx = nextx - dx
            centery = lowery + diff  # centery - sublen + lower + diff

            if (nextx - subwidth) < 0:
                if verbose:
                    print('Hit rotate edge at counter={0}'.format(counter))
                done = True
            elif (highy > image.shape[0]) or (highx > image.shape[1]):
                if verbose:
                    print('Hit image edge at counter={0}'.format(counter))
                done = True

        counter += 1

        # make sure it does not try to go infinetly
        if counter > 500:
            if verbose:
                print('Too many loops, exiting')
            done = True
    # End while

    rot = transform.rotate(mask, -deg, resize=True, order=1)
    ix0 = (rot.shape[1] - image.shape[1]) / 2
    iy0 = (rot.shape[0] - image.shape[0]) / 2
    lowerx, upperx, lowery, uppery = _get_valid_indices(
        rot.shape, ix0, image.shape[1] + ix0, iy0, image.shape[0] + iy0)
    mask = rot[lowery:uppery, lowerx:upperx]

    if mask.shape != image.shape:
        warnings.warn(
            'Output mask shape is {0} but input image shape is '
            '{1}'.format(mask.shape, image.shape), AstropyUserWarning)

    # Change to boolean mask
    mask = mask.astype(np.bool)

    if plot and plt is not None:
        # debugging array
        test = image.copy()
        test[mask] = 0

        mean = np.median(test)
        stddev = test.std()
        lower = mean - stddev
        upper = mean + stddev

        fig1, ax1 = plt.subplots()
        ax1.imshow(test, vmin=lower, vmax=upper, cmap=plt.cm.gray)
        ax1.set_title('Masked image')

        fig2, ax2 = plt.subplots()
        ax2.imshow(mask, cmap=plt.cm.gray)
        ax2.set_title('DQ mask')

        plt.draw()

    if verbose:
        t_end = time.time()
        print('Run time: {0} s'.format(t_end - t_beg))

    return mask
Exemple #41
0
def detect_threshold(data,
                     snr,
                     background=None,
                     error=None,
                     mask=None,
                     mask_value=None,
                     sigclip_sigma=3.0,
                     sigclip_iters=None):
    """
    Calculate a pixel-wise threshold image to be used to detect sources.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    snr : float
        The signal-to-noise ratio per pixel above the ``background`` for
        which to consider a pixel as possibly being part of a source.

    background : float or array_like, optional
        The background value(s) of the input ``data``.  ``background``
        may either be a scalar value or a 2D image with the same shape
        as the input ``data``.  If the input ``data`` has been
        background-subtracted, then set ``background`` to ``0.0``.  If
        `None`, then a scalar background value will be estimated using
        sigma-clipped statistics.

    error : float or array_like, optional
        The Gaussian 1-sigma standard deviation of the background noise
        in ``data``.  ``error`` should include all sources of
        "background" error, but *exclude* the Poisson error of the
        sources.  If ``error`` is a 2D image, then it should represent
        the 1-sigma background error in each pixel of ``data``.  If
        `None`, then a scalar background rms value will be estimated
        using sigma-clipped statistics.

    mask : array_like, bool, optional
        A boolean mask with the same shape as ``data``, where a `True`
        value indicates the corresponding element of ``data`` is masked.
        Masked pixels are ignored when computing the image background
        statistics.

    mask_value : float, optional
        An image data value (e.g., ``0.0``) that is ignored when
        computing the image background statistics.  ``mask_value`` will
        be ignored if ``mask`` is input.

    sigclip_sigma : float, optional
        The number of standard deviations to use as the clipping limit
        when calculating the image background statistics.

    sigclip_iters : float, optional
       The number of iterations to perform sigma clipping, or `None` to
       clip until convergence is achieved (i.e., continue until the last
       iteration clips nothing) when calculating the image background
       statistics.

    Returns
    -------
    threshold : 2D `~numpy.ndarray`
        A 2D image with the same shape as ``data`` containing the
        pixel-wise threshold values.

    See Also
    --------
    detect_sources

    Notes
    -----
    The ``mask``, ``mask_value``, ``sigclip_sigma``, and
    ``sigclip_iters`` inputs are used only if it is necessary to
    estimate ``background`` or ``error`` using sigma-clipped background
    statistics.  If ``background`` and ``error`` are both input, then
    ``mask``, ``mask_value``, ``sigclip_sigma``, and ``sigclip_iters``
    are ignored.
    """

    if background is None or error is None:
        # TODO: remove when astropy 1.1 is released
        if ASTROPY_LT_1P1:
            data_mean, data_median, data_std = sigma_clipped_stats(
                data,
                mask=mask,
                mask_val=mask_value,
                sigma=sigclip_sigma,
                iters=sigclip_iters)
        else:
            data_mean, data_median, data_std = sigma_clipped_stats(
                data,
                mask=mask,
                mask_value=mask_value,
                sigma=sigclip_sigma,
                iters=sigclip_iters)
        bkgrd_image = np.zeros_like(data) + data_mean
        bkgrdrms_image = np.zeros_like(data) + data_std

    if background is None:
        background = bkgrd_image
    else:
        if np.isscalar(background):
            background = np.zeros_like(data) + background
        else:
            if background.shape != data.shape:
                raise ValueError('If input background is 2D, then it '
                                 'must have the same shape as the input '
                                 'data.')

    if error is None:
        error = bkgrdrms_image
    else:
        if np.isscalar(error):
            error = np.zeros_like(data) + error
        else:
            if error.shape != data.shape:
                raise ValueError('If input error is 2D, then it '
                                 'must have the same shape as the input '
                                 'data.')

    return background + (error * snr)
Exemple #42
0
def order_median_scale(spectra,
                       nsig=3.0,
                       niter=5,
                       overlapfrac=0.03,
                       num_min_pixels=50,
                       SN_MIN_MEDSCALE=0.5,
                       debug=False):
    '''
    Scale different orders using the median of overlap regions. It starts from the reddest order, i.e. scale H to K,
      and then scale J to H+K, etc.
    Parameters:
      spectra: XSpectrum1D spectra
      nsig: float
        sigma used for sigma_clipping median
      niter: int
        number of iterations for sigma_clipping median
      overlapfrac: float
        minmum overlap fraction (number of overlapped pixels devided by number of pixels of the whole spectrum) between orders.
      num_min_pixels: int
        minum required good pixels. The code only scale orders when the overlapped
        pixels > max(num_min_pixels,overlapfrac*len(wave))
      SN_MIN_MEDSCALE: float
        Maximum RMS S/N allowed to automatically apply median scaling
      Show QA plot if debug=True
    Return:
        No return, but the spectra is already scaled after executing this function.
    '''
    norder = spectra.nspec
    fluxes, sigs, wave = coadd.unpack_spec(spectra, all_wave=False)
    fluxes_raw = fluxes.copy()

    # scaling spectrum order by order. We use the reddest order as the reference since slit loss in redder is smaller
    for i in range(norder - 1):
        iord = norder - i - 1
        sn_iord_iref = fluxes[iord] * (1. / sigs[iord])
        sn_iord_scale = fluxes[iord - 1] * (1. / sigs[iord - 1])
        allok = (sigs[iord - 1, :] > 0) & (sigs[iord, :] > 0) & (
            sn_iord_iref > SN_MIN_MEDSCALE) & (sn_iord_scale > SN_MIN_MEDSCALE)
        if sum(allok) > np.maximum(num_min_pixels, len(wave) * overlapfrac):
            # Ratio
            med_flux = spectra.data['flux'][
                iord, allok] / spectra.data['flux'][iord - 1, allok]
            # Clip
            mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(
                med_flux, sigma=nsig, iters=niter)
            med_scale = np.minimum(med_scale, 5.0)
            spectra.data['flux'][iord - 1, :] *= med_scale
            spectra.data['sig'][iord - 1, :] *= med_scale
            msgs.info('Scaled %s order by a factor of %s' %
                      (iord, str(med_scale)))

            if debug:
                plt.plot(wave,
                         spectra.data['flux'][iord],
                         'r-',
                         label='reference spectrum')
                plt.plot(wave,
                         fluxes_raw[iord - 1],
                         'k-',
                         label='raw spectrum')
                plt.plot(spectra.data['wave'][iord - 1, :],
                         spectra.data['flux'][iord - 1, :],
                         'b-',
                         label='scaled spectrum')
                mny, medy, stdy = stats.sigma_clipped_stats(fluxes[iord,
                                                                   allok],
                                                            sigma=nsig,
                                                            iters=niter)
                plt.ylim([0.1 * medy, 4.0 * medy])
                plt.xlim([
                    np.min(wave[sigs[iord - 1, :] > 0]),
                    np.max(wave[sigs[iord, :] > 0])
                ])
                plt.legend()
                plt.xlabel('wavelength')
                plt.ylabel('Flux')
                plt.show()
        else:
            msgs.warn(
                'Not enough overlap region for sticking different orders.')
Exemple #43
0
def find_center(image, center_guess, cutout_size=30, max_iters=10):
    """
    Find the centroid of a star from an initial guess of its position. Originally
    written to find star from a mouse click.

    Parameters
    ----------

    image : numpy array or CCDData
        Image containing the star.

    center_guess : array or tuple
        The position, in pixels, of the initial guess for the position of
        the star. The coordinates should be horizontal first, then vertical,
        i.e. opposite the usual Python convention for a numpy array.

    cutout_size : int, optional
        The default width of the cutout to use for finding the star.

    max_iters : int, optional
        Maximum number of iterations to go through in finding the center.
    """
    pad = cutout_size // 2
    x, y = center_guess

    # Keep track of iterations
    cnt = 0

    # Grab the cutout...
    sub_data = image[y - pad:y + pad, x - pad:x + pad]  # - med

    # ...do stats on it...
    _, sub_med, _ = sigma_clipped_stats(sub_data)
    # sub_med = 0

    # ...and centroid.
    x_cm, y_cm = centroid_com(sub_data - sub_med)

    # Translate centroid back to original image (maybe use Cutout2D instead)
    cen = np.array([x_cm + x - pad, y_cm + y - pad])

    # ceno is the "original" center guess, set it to something nonsensical here
    ceno = np.array([-100, -100])

    while (cnt <= max_iters and (np.abs(np.array([x_cm, y_cm]) - pad).max() > 3
                                 or np.abs(cen - ceno).max() > 0.1)):

        # Update x, y positions for subsetting
        x = int(np.floor(x_cm)) + x - pad
        y = int(np.floor(y_cm)) + y - pad
        sub_data = image[y - pad:y + pad, x - pad:x + pad]  # - med
        _, sub_med, _ = sigma_clipped_stats(sub_data)
        # sub_med = 0
        mask = (sub_data - sub_med) < 0
        x_cm, y_cm = centroid_com(sub_data - sub_med, mask=mask)
        ceno = cen
        cen = np.array([x_cm + x - pad, y_cm + y - pad])
        if not np.all(~np.isnan(cen)):
            raise RuntimeError('Centroid finding failed, '
                               'previous was {}, current is {}'.format(
                                   ceno, cen))
        cnt += 1

    return cen
    #    print(ra_pix,dec_pix)
    #    print()

    #    mask=np.array

    #    background level and error
    #    using FWHM
    #    sigma_fwhm=fwhm[k]/2.35
    #    print(k,fwhm[k],sigma_fwhm)

    #    using the median absolute deviation to estimate the background noise level gives a value
    #    bkg_noise=mad_std(imdata)
    #    print(bkg_noise)
    print()
    #    This method provides a better estimate of the background and background noise levels:
    bkg_mean, bkg_median, bkg_std = sigma_clipped_stats(imdata, sigma=3.)
    print('... bkg_mean,bkg_median,bkg_std')
    print('...', '%.4f' % bkg_mean, '%.4f' % bkg_median, '%.4f' % bkg_std)
    # bkg_std = sqrt((bkg-bkg_mean)^2/n) = background noise

    imdata[imdata < bkg_median * 0.9] = bkg_median

    bkg_err_ratio[k] = bkg_std / bkg_median
    print('... bkg_err_ratio = bkg_std/bkg_median =',
          '%.4f' % bkg_err_ratio[k])
    #    bkg_err[k]=-2.5*np.log10(bkg_std/bkg_median)
    #    print('bkg_err (mag)',bkg_err[k])
    #    1176.6646745343921 1176.0827349796416 39.1334108277639
    #    np.argwhere(np.isnan(imdata))
    #    time.sleep(1)
    #    print(WCS.world_axis_physical_types)
Exemple #45
0
    async def __call__(self, image: Image) -> Image:
        """Do aperture photometry on given image.

        Args:
            image: Image to do aperture photometry on.

        Returns:
            Image with attached catalog.
        """
        loop = asyncio.get_running_loop()

        # no pixel scale given?
        if image.pixel_scale is None:
            log.warning("No pixel scale provided by image.")
            return image

        # fetch catalog
        if image.catalog is None:
            log.warning("No catalog in image.")
            return image
        sources = image.catalog.copy()

        # get positions
        positions = [(x - 1, y - 1) for x, y in sources.iterrows("x", "y")]

        # perform aperture photometry for diameters of 1" to 8"
        for diameter in [1, 2, 3, 4, 5, 6, 7, 8]:
            # extraction radius in pixels
            radius = diameter / 2.0 / image.pixel_scale
            if radius < 1:
                continue

            # defines apertures
            aperture = CircularAperture(positions, r=radius)
            annulus_aperture = CircularAnnulus(positions,
                                               r_in=2 * radius,
                                               r_out=3 * radius)
            annulus_masks = annulus_aperture.to_mask(method="center")

            # loop annuli
            bkg_median = []
            for m in annulus_masks:
                annulus_data = m.multiply(image.data)
                annulus_data_1d = annulus_data[m.data > 0]
                _, median_sigclip, _ = sigma_clipped_stats(annulus_data_1d)
                bkg_median.append(median_sigclip)

            # do photometry
            phot = await loop.run_in_executor(
                None,
                partial(aperture_photometry,
                        image.data,
                        aperture,
                        mask=image.mask,
                        error=image.uncertainty))

            # calc flux
            bkg_median_np = np.array(bkg_median)
            aper_bkg = bkg_median_np * aperture.area
            sources["fluxaper%d" % diameter] = phot["aperture_sum"] - aper_bkg
            if "aperture_sum_err" in phot.columns:
                sources["fluxerr%d" % diameter] = phot["aperture_sum_err"]
            sources["bkgaper%d" % diameter] = bkg_median_np

        # copy image, set catalog and return it
        img = image.copy()
        img.catalog = sources
        return img
Exemple #46
0
    def _combine(self, files, scale_by, rh0, delta0, path):
        if scale_by == 'nightly':
            k = 0, 0
        elif scale_by == 'coma':
            # coma: delta**1 rh**4
            k = 1, 4
        else:
            # surface: delta**2 rh**2
            k = 2, 2

        stack = []
        stack_ref = []
        # loop over each image
        for f in files:
            fn = os.path.join(path, f)
            with fits.open(fn) as hdu:
                h = hdu['SCI'].header
                if ((h.get('MAGZP', -1) < 0) or (h.get('CRPIX1', None) is None)
                        or (hdu['SANGLE'].header.get('CRPIX1', None) is None)):
                    continue

                # use provided mask, if possible
                if 'SANGLEMASK' in hdu:
                    obj_mask = hdu['SANGLEMASK'].data.astype(bool)
                else:
                    obj_mask = np.zeros_like(hdu['SANGLE'].data, bool)

                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    mask = (hdu['SANGLE'].data <
                            DATA_FLOOR + ~np.isfinite(hdu['SANGLE'].data))

                # unmask objects within ~5" of target position
                x = int(hdu['SANGLE'].header['CRPIX1']) - 1
                y = int(hdu['SANGLE'].header['CRPIX2']) - 1
                lbl, n = nd.label(obj_mask.astype(int))
                for m in np.unique(lbl[y - 5:y + 6, x - 5:x + 6]):
                    obj_mask[lbl == m] = False

                # get data, if not a diff image, subtract background
                # and use the object mask
                im = np.ma.MaskedArray(hdu['SANGLE'].data, mask=mask)

                usediff = hdu['SANGLE'].header.get('DIFFIMG', False)
                if not usediff:
                    im -= h['BGMEDIAN']
                    im.mask += obj_mask

                # scale by image zero point, scale to rh=delta=1 au
                magzp = (h['MAGZP'] +
                         h['CLRCOEFF'] * COLOR_DEFAULT[h['PCOLOR']])
                im *= (10**(-0.4 * (magzp - 25.0)) *
                       (h['DELTA'] / delta0)**k[0] * (h['RH'] / rh0)**k[1])

                # use reference image, if possible
                if 'SANGLEREF' in hdu:
                    # update mask with nans
                    mask = obj_mask + ~np.isfinite(hdu['SANGLEREF'].data)
                    ref = np.ma.MaskedArray(hdu['SANGLEREF'].data, mask=mask)
                    mms = sigma_clipped_stats(ref)
                    ref -= mms[1]

                    mzp = hdu['REF'].header['MAGZP']
                    ref *= (10**(-0.4 *
                                 (mzp - 25.0)) * (h['DELTA'] / delta0)**k[0] *
                            (h['RH'] / rh0)**k[1])
                else:
                    ref = None

            stack.append(im)
            if ref is not None:
                stack_ref.append(ref)

        if len(stack) == 0:
            raise BadStackSet

        im = np.ma.dstack(stack)
        im.mask += ~np.isfinite(im)
        im = np.ma.median(im, 2).filled(np.nan)
        combined = fits.ImageHDU(im)

        scale_name = {'surface': 'surf'}.get(scale_by, scale_by)

        combined.name = '{}'.format(scale_name)

        if len(stack_ref) == 0:
            combined_ref = None
        else:
            im = np.ma.dstack(stack_ref)
            im = np.ma.median(im, 2).filled(np.nan)
            combined_ref = fits.ImageHDU(im)
            combined_ref.name = '{} REF'.format(scale_name)

        return combined, combined_ref
Exemple #47
0
cax = divider.append_axes("right", size="3%", pad=0.05)
plt.colorbar(im, cax=cax)
plt.show()

#%%
from astropy.stats import mad_std, sigma_clipped_stats

FWHM = 2.7
sky_th = 10  # sky_th * sky_sigma will be used for detection lower limit : sky_th = 5
sky_s = mad_std(img)
thresh = sky_th * sky_s
print('sky_s x sky_th = threshold')
print('{0:8.6f} x {1:4d}   =   {2:8.6f}\n'.format(sky_s, sky_th, thresh))

# What if we do "sigma-clip" than MAD?
sky_a, sky_m, sky_s_sc = sigma_clipped_stats(
    img)  # default is 3-sigma, 5 iters
thresh_sc = sky_th * sky_s_sc
thresh = sky_th * sky_s_sc
print('3 sigma 5 iters clipped case:')
print('{0:8.6f} x {1:4d}   =   {2:8.6f}\n'.format(sky_s_sc, sky_th, thresh_sc))

#%%
from photutils import detect_threshold

thresh_snr = detect_threshold(data=img.data, snr=3)
thresh_snr = thresh_snr[0][0]
# This will give you 3*bkg_std.
print('detect_threshold', thresh_snr)

#%%
import matplotlib.pyplot as plt
Exemple #48
0
def order_phot_scale(spectra,
                     phot_scale_dicts,
                     nsig=3.0,
                     niter=5,
                     debug=False):
    '''
    Scale coadded spectra with photometric data.
    Parameters:
      spectra: XSpectrum1D spectra (longslit) or spectra list (echelle)
      phot_scale_dicts: A dict contains photometric information of each orders (if echelle).
        An example is given below.
        phot_scale_dicts = {0: {'filter': None, 'mag': None, 'mag_type': None, 'masks': None},
                            1: {'filter': 'UKIRT-Y', 'mag': 20.33, 'mag_type': 'AB', 'masks': None},
                            2: {'filter': 'UKIRT-J', 'mag': 20.19, 'mag_type': 'AB', 'masks': None},
                            3: {'filter': 'UKIRT-H', 'mag': 20.02, 'mag_type': 'AB', 'masks': None},
                            4: {'filter': 'UKIRT-K', 'mag': 19.92, 'mag_type': 'AB', 'masks': None}}
      Show QA plot if debug=True
    Return a new scaled XSpectrum1D spectra
    '''

    from pypeit.core.flux_calib import scale_in_filter

    norder = spectra.nspec

    # scaling spectrum order by order.
    spectra_list_new = []
    for iord in range(norder):
        phot_scale_dict = phot_scale_dicts[iord]
        if (phot_scale_dict['filter'] is not None) & (phot_scale_dict['mag']
                                                      is not None):
            speci = scale_in_filter(spectra[iord], phot_scale_dict)
        else:
            #ToDo: Think a better way to do the following
            try:
                spec0 = scale_in_filter(spectra[iord - 1],
                                        phot_scale_dicts[iord - 1])
                speci = spectra[iord]
                med_flux = spec0.data['flux'] / speci.data['flux']
                mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(
                    med_flux, sigma=nsig, iters=niter)
                med_scale = np.minimum(med_scale, 5.0)
                spectra.data['flux'] *= med_scale
                spectra.data['sig'] *= med_scale
                msgs.warn(
                    "Not enough photometric information given. Scaled order {:d} to order {:d}"
                    .format(iord, iord - 1))
            except KeyError:
                msgs.warn(
                    "Not enough photometric information given. Scale order {:d} to order {:d} failed"
                    .format(iord, iord - 1))
                try:
                    spec0 = scale_in_filter(spectra[iord + 1],
                                            phot_scale_dicts[iord + 1])
                    speci = spectra[iord]
                    med_flux = spec0.data['flux'] / speci.data['flux']
                    mn_scale, med_scale, std_scale = stats.sigma_clipped_stats(
                        med_flux, sigma=nsig, iters=niter)
                    med_scale = np.minimum(med_scale, 5.0)
                    speci.data['flux'] *= med_scale
                    speci.data['sig'] *= med_scale
                    msgs.warn(
                        "Not enough photometric information given. Scaled order {:d} to order {:d}"
                        .format(iord, iord + 1))
                except:
                    msgs.warn(
                        "Not enough photometric information given. No scaling on order {:d}"
                        .format(iord))
                    speci = spectra[iord]
        spectra_list_new.append(speci)

        if debug:
            gdp = speci.sig > 0
            plt.plot(spectra[iord].wavelength[gdp],
                     spectra[iord].flux[gdp],
                     'k-',
                     label='raw spectrum')
            plt.plot(speci.wavelength[gdp],
                     speci.flux[gdp],
                     'b-',
                     label='scaled spectrum')
            mny, medy, stdy = stats.sigma_clipped_stats(speci.flux[gdp],
                                                        sigma=3,
                                                        iters=5)
            plt.ylim([0.1 * medy, 4.0 * medy])
            plt.legend()
            plt.xlabel('wavelength')
            plt.ylabel('Flux')
            plt.show()

    return collate(spectra_list_new)
Exemple #49
0
data = hdu.data[
    3000:3500, 3000:
    3500]  #This image is 5644x5895 pixels, and I want to restrict my search to the very small section in the center because the edges of these images are noisy and this code is optimized to work on smaller images.

#Lines 16-18 bring in all the command necessary to detect sources in an image with high precision (using source masking)
from photutils import DAOStarFinder
from photutils import make_source_mask
from astropy.stats import sigma_clipped_stats

#Lines 21-22 create a mask that covers all the sources (something is a source if its signal:noise = 2:1) in an image and compute estimated background level and noise using the mask for higher accuracy. We are only interested in background noise in this case, not level, which is defined as 'std'. This will be more accurate than the noise `make_source_mask` assumed for its signal:noise ratio, which it calculated on its own.
mask = make_source_mask(data,
                        snr=2,
                        npixels=5,
                        dilate_size=10,
                        sigclip_iters=None)
mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=mask, iters=None)

#Lines 25-27 find sources in the image using a higher signal:noise ratio (3:1) and more precise noise estimate. They then define the sources' positions.
daofind = DAOStarFinder(fwhm=4.0, threshold=200 * std)
sources = daofind(data)
positions = (sources['xcentroid'], sources['ycentroid'])

#Lines 30-32 bring in all commands necessary for aperture photometry
from photutils import CircularAperture
from photutils import CircularAnnulus
from photutils import aperture_photometry
from photutils.utils import calc_total_error

#Lines 35-37 create aperture objects (a circle around each source with a concentric annulus around each circle) and group them in a 2x1 array called apers.
apertures = CircularAperture(positions, r=5.)
annuli = CircularAnnulus(positions, r_in=6., r_out=8.)
Exemple #50
0
def PINES_quicklook(image_name='test.fits', interp=True):
    calibration_path = '/Users/obs72/Desktop/PINES_scripts/Calibrations/'
    if image_name == 'test.fits':
        file_path = '/Users/obs72/Desktop/PINES_scripts/test_image/test.fits'
    else:
        date_string = image_name.split('.')[0]
        file_path = '/mimir/data/obs72/' + date_string + '/' + image_name

    if os.path.exists(file_path):
        header = fits.open(file_path)[0].header
        band = header['FILTNME2']
        exptime = str(header['EXPTIME'])
        flat_path = calibration_path + 'Flats/master_flat_' + band + '.fits'
        if os.path.exists(flat_path):
            flat = fits.open(flat_path)[0].data
        else:
            print('ERROR: No ', band, '-band flat exists in ',
                  calibration_path, 'Flats/...make one.')
            return

        #Select the master dark on-disk with the closest exposure time to exptime.
        dark_top_level_path = calibration_path + 'Darks/'
        dark_files = np.array(glob(dark_top_level_path + '*.fits'))
        dark_exptimes = np.array([
            float(i.split('master_dark_')[1].split('.fits')[0])
            for i in dark_files
        ])
        dark_files = dark_files[np.argsort(dark_exptimes)]
        dark_exptimes = dark_exptimes[np.argsort(dark_exptimes)]
        dark_ind = np.where(
            abs(dark_exptimes -
                float(exptime)) == np.min(abs(dark_exptimes -
                                              float(exptime))))[0][0]
        dark_path = dark_files[dark_ind]
        dark = fits.open(dark_path)[0].data

        ut_str = header['DATE-OBS'].split(
            'T')[0] + ' ' + header['DATE-OBS'].split('T')[1].split('.')[0]
        from_zone = tz.gettz('UTC')
        to_zone = tz.gettz('America/Phoenix')
        utc = datetime.datetime.strptime(ut_str, '%Y-%m-%d %H:%M:%S')
        utc = utc.replace(tzinfo=from_zone)
        local = utc.astimezone(to_zone)
        local_str = local.strftime('%Y-%m-%d %H:%M:%S')

        raw_image = fits.open(file_path)[0].data[0:1024, :]
        reduced_image = (raw_image - dark) / flat
        avg, med, std = sigma_clipped_stats(reduced_image)

        if interp:
            bpm = fits.open(
                '/Users/obs72/Desktop/PINES_scripts/Calibrations/Bad_pixel_masks/bpm.fits'
            )[0].data
            reduced_image[bpm == 1] = np.nan
            reduced_image = interpolate_replace_nans(
                reduced_image, kernel=Gaussian2DKernel(0.5))

        fig, ax = plt.subplots(figsize=(9, 8))
        divider = make_axes_locatable(ax)
        cax = divider.append_axes('right', size='5%', pad=0.05)
        ax.set_aspect('equal')
        norm = ImageNormalize(reduced_image, interval=ZScaleInterval())
        im = ax.imshow(reduced_image, origin='lower', norm=norm)
        fig.colorbar(im, cax=cax, orientation='vertical', label='ADU')
        ax.set_title(file_path.split('/')[-1], fontsize=16)
        plt.tight_layout()
        breakpoint()
        plt.close()
    else:
        print('ERROR: file ', file_path, ' does not exist.')
        return
Exemple #51
0
def main(args):

    # List only?
    hdu = fits.open(args.file)
    head0 = hdu[0].header
    if args.list:
        hdu.info()
        return

    # Setup for PYPIT imports
    msgs.reset(verbosity=2)

    # Init
    sdet = get_dnum(args.det, prefix=False)

    # One detector, sky sub for now
    names = [hdu[i].name for i in range(len(hdu))]

    try:
        exten = names.index('DET{:s}-PROCESSED'.format(sdet))
    except:  # Backwards compatability
        msgs.error(
            'Requested detector {:s} was not processed.\n'
            'Maybe you chose the wrong one to view?\n'
            'Set with --det= or check file contents with --list'.format(sdet))
    sciimg = hdu[exten].data
    try:
        exten = names.index('DET{:s}-SKY'.format(sdet))
    except:  # Backwards compatability
        msgs.error(
            'Requested detector {:s} has no sky model.\n'
            'Maybe you chose the wrong one to view?\n'
            'Set with --det= or check file contents with --list'.format(sdet))
    skymodel = hdu[exten].data
    try:
        exten = names.index('DET{:s}-MASK'.format(sdet))
    except ValueError:  # Backwards compatability
        msgs.error(
            'Requested detector {:s} has no bit mask.\n'
            'Maybe you chose the wrong one to view?\n'
            'Set with --det= or check file contents with --list'.format(sdet))
    mask = hdu[exten].data
    try:
        exten = names.index('DET{:s}-IVARMODEL'.format(sdet))
    except ValueError:  # Backwards compatability
        msgs.error(
            'Requested detector {:s} has no IVARMODEL.\n'
            'Maybe you chose the wrong one to view?\n' +
            'Set with --det= or check file contents with --list'.format(sdet))
    ivarmodel = hdu[exten].data
    # Read in the object model for residual map
    try:
        exten = names.index('DET{:s}-OBJ'.format(sdet))
    except ValueError:  # Backwards compatability
        msgs.error(
            'Requested detector {:s} has no object model.\n'
            'Maybe you chose the wrong one to view?\n' +
            'Set with --det= or check file contents with --list'.format(sdet))
    objmodel = hdu[exten].data
    # Get waveimg
    mdir = head0['PYPMFDIR'] + '/'
    if not os.path.exists(mdir):
        mdir_base = os.path.basename(os.path.dirname(mdir)) + '/'
        msgs.warn('Master file dir: {0} does not exist. Using ./{1}'.format(
            mdir, mdir_base))
        mdir = mdir_base

    trace_key = '{0}_{1:02d}'.format(head0['TRACMKEY'], args.det)
    trc_file = os.path.join(
        mdir, MasterFrame.construct_file_name('Trace', trace_key))

    wave_key = '{0}_{1:02d}'.format(head0['ARCMKEY'], args.det)
    waveimg = os.path.join(mdir,
                           MasterFrame.construct_file_name('Wave', wave_key))

    tslits_dict = TraceSlits.load_from_file(trc_file)[0]
    slitmask = pixels.tslits2mask(tslits_dict)
    shape = (tslits_dict['nspec'], tslits_dict['nspat'])
    slit_ids = [
        trace_slits.get_slitid(shape, tslits_dict['slit_left'],
                               tslits_dict['slit_righ'], ii)[0]
        for ii in range(tslits_dict['slit_left'].shape[1])
    ]
    # Show the bitmask?
    mask_in = mask if args.showmask else None

    # Object traces
    spec1d_file = args.file.replace('spec2d', 'spec1d')

    det_nm = 'DET{:s}'.format(sdet)
    if os.path.isfile(spec1d_file):
        hdulist_1d = fits.open(spec1d_file)
    else:
        hdulist_1d = []
        msgs.warn('Could not find spec1d file: {:s}'.format(spec1d_file) +
                  msgs.newline() +
                  '                          No objects were extracted.')

    # Unpack the bitmask
    bitMask = bitmask()
    bpm, crmask, satmask, minmask, offslitmask, nanmask, ivar0mask, ivarnanmask, extractmask \
            = bitMask.unpack(mask)

    # Now show each image to a separate channel

    # SCIIMG
    image = sciimg  # Raw science image
    (mean, med, sigma) = sigma_clipped_stats(image[mask == 0],
                                             sigma_lower=5.0,
                                             sigma_upper=5.0)
    cut_min = mean - 1.0 * sigma
    cut_max = mean + 4.0 * sigma
    chname_skysub = 'sciimg-det{:s}'.format(sdet)
    # Clear all channels at the beginning
    viewer, ch = ginga.show_image(image,
                                  chname=chname_skysub,
                                  waveimg=waveimg,
                                  bitmask=mask_in,
                                  clear=True)
    #, cuts=(cut_min, cut_max), wcs_match=True)
    ginga.show_slits(viewer, ch, tslits_dict['slit_left'],
                     tslits_dict['slit_righ'], slit_ids)
    #, args.det)

    # SKYSUB
    image = (sciimg - skymodel) * (mask == 0)  # sky subtracted image
    (mean, med, sigma) = sigma_clipped_stats(image[mask == 0],
                                             sigma_lower=5.0,
                                             sigma_upper=5.0)
    cut_min = mean - 1.0 * sigma
    cut_max = mean + 4.0 * sigma
    chname_skysub = 'skysub-det{:s}'.format(sdet)
    # Clear all channels at the beginning
    # TODO: JFH For some reason Ginga crashes when I try to put cuts in here.
    viewer, ch = ginga.show_image(
        image, chname=chname_skysub, waveimg=waveimg,
        bitmask=mask_in)  #, cuts=(cut_min, cut_max),wcs_match=True)
    show_trace(hdulist_1d, det_nm, viewer, ch)
    ginga.show_slits(viewer, ch, tslits_dict['slit_left'],
                     tslits_dict['slit_righ'], slit_ids)
    #, args.det)

    # SKRESIDS
    chname_skyresids = 'sky_resid-det{:s}'.format(sdet)
    image = (sciimg - skymodel) * np.sqrt(ivarmodel) * (mask == 0
                                                        )  # sky residual map
    viewer, ch = ginga.show_image(image,
                                  chname_skyresids,
                                  waveimg=waveimg,
                                  cuts=(-5.0, 5.0),
                                  bitmask=mask_in)  #,wcs_match=True)
    show_trace(hdulist_1d, det_nm, viewer, ch)
    ginga.show_slits(viewer, ch, tslits_dict['slit_left'],
                     tslits_dict['slit_righ'], slit_ids)
    #, args.det)

    # RESIDS
    chname_resids = 'resid-det{:s}'.format(sdet)
    # full model residual map
    image = (sciimg - skymodel - objmodel) * np.sqrt(ivarmodel) * (mask == 0)
    viewer, ch = ginga.show_image(image,
                                  chname=chname_resids,
                                  waveimg=waveimg,
                                  cuts=(-5.0, 5.0),
                                  bitmask=mask_in)  #,wcs_match=True)
    show_trace(hdulist_1d, det_nm, viewer, ch)
    ginga.show_slits(viewer, ch, tslits_dict['slit_left'],
                     tslits_dict['slit_righ'], slit_ids)
    #, args.det)

    # After displaying all the images sync up the images with WCS_MATCH
    shell = viewer.shell()
    out = shell.start_global_plugin('WCSMatch')
    out = shell.call_global_plugin_method('WCSMatch', 'set_reference_channel',
                                          [chname_resids], {})

    if args.embed:
        IPython.embed()
Exemple #52
0
    header = fits.getheader(path + "//" + fit_file)
    image_tmp = fits.getdata(path + "//" + fit_file)

    if dark_frame:
        dark_arr = fits.getdata(dark_frame)
        image_tmp = image_tmp - dark_arr
        ph_image = image_tmp

        minI = np.min(image_tmp)
        if minI < 0:
            print("Warning! Image - Dark has negativ pixels...")
            log_file.write("Warning! Image - Dark has negativ pixels...\n")
    else:
        ph_image = image_tmp
        mean, median, std = sigma_clipped_stats(image_tmp, sigma=3.0)
        image_tmp = image_tmp - mean

    xc = header["NAXIS1"] / 2.
    yc = header["NAXIS2"] / 2.

    w = WCS(header)
    ra_c, dec_c = w.wcs_pix2world(xc, yc, 1)  # RA DEC of FRAME center
    print("Grab stars from Vizier....")
    log_file.write("Grab stars from Vizier....\n")

    table_res = get_from_NOMAD(ra_c, dec_c, radius="3.0deg", Filter={'Vmag': '<' + max_m})
    table_res = table_res["I/297/out"]  # get NOMAD data

    table_res.remove_columns(["YM", 'r', 'pmRA', 'e_pmRA', 'pmDE', 'e_pmDE', 'Jmag', 'Hmag', 'Kmag', 'R', 'r_Bmag', 'r_Vmag', 'r_Rmag'])
    table_res.sort(["Vmag"])
Exemple #53
0
            lf = np.min(goodXind)
            goodXind = np.where(np.sum(xx2 < nx - 2, axis=0) == ny)[0]
            rt = np.max(goodXind) + 1
            goodYind = np.where(np.sum(yy2 > 0, axis=1) == nx)[0]
            bt = np.min(goodYind)
            goodYind = np.where(np.sum(yy2 < ny - 2, axis=1) == nx)[0]
            tp = np.max(goodYind) + 1
            yy2, xx2, = yy2[bt:tp, lf:rt], xx2[bt:tp, lf:rt]

            # Locate which corresponding pixels fall in background regions
            bkgRegion = np.zeros((ny, nx), dtype=bool)
            bkgRegion[bt:tp, lf:rt] = this2MASSbkg.data[yy2, xx2].astype(bool)
            bkgRegion = np.logical_and(bkgRegion, thisImg.data > -1e5)

            # Compute the image background statistics
            _, bkgMedian, _ = sigma_clipped_stats(thisImg.data[bkgRegion])

        else:
            # Locate the non-stellar pixels in this image
            bkgRegion = np.logical_not(find_dim_stars(thisImg.data))
            bkgRegion = np.logical_and(bkgRegion, thisImg.data > -1e5)

            # Compute the image background statistics
            _, bkgMedian, _ = sigma_clipped_stats(thisImg.data[bkgRegion])

    # Something went wrong, so just fill in with a junk value for now. This can
    # be reprocessed later.
    except:
        bkgMedian = -1e6

    # Append that background levels to the list
Exemple #54
0
def get_most_confident_outputs(img_id, patch_center_row, patch_center_col, confident_th, gpu_id, connected_same_vessel):

    patch_size = 64
    center = (patch_center_col, patch_center_row)

    x_tmp = int(center[0]-patch_size/2)
    y_tmp = int(center[1]-patch_size/2)

    confident_connections = {}
    confident_connections['x_peak'] = []
    confident_connections['y_peak'] = []
    confident_connections['peak_value'] = []

    root_dir = './gt_dbs/DRIVE/'
    img = Image.open(os.path.join(root_dir, 'test', 'images', '%02d_test.tif' % img_id))
    img = np.array(img, dtype=np.float32)
    h, w = img.shape[:2]

    if x_tmp > 0 and y_tmp > 0 and x_tmp+patch_size < w and y_tmp+patch_size < h:

        img_crop = img[y_tmp:y_tmp+patch_size,x_tmp:x_tmp+patch_size,:]

        img_crop = img_crop.transpose((2, 0, 1))
        img_crop = torch.from_numpy(img_crop)
        img_crop = img_crop.unsqueeze(0)

        inputs = img_crop / 255 - 0.5

        # Forward pass of the mini-batch
        inputs = Variable(inputs)

        if gpu_id >= 0:
            #torch.cuda.set_device(device=gpu_id)
            inputs = inputs.cuda()

        p = {}
        p['useRandom'] = 1  # Shuffle Images
        p['useAug'] = 0  # Use Random rotations in [-30, 30] and scaling in [.75, 1.25]
        p['inputRes'] = (64, 64)  # Input Resolution
        p['outputRes'] = (64, 64)  # Output Resolution (same as input)
        p['g_size'] = 64  # Higher means narrower Gaussian
        p['trainBatch'] = 1  # Number of Images in each mini-batch
        p['numHG'] = 2  # Number of Stacked Hourglasses
        p['Block'] = 'ConvBlock'  # Select: 'ConvBlock', 'BasicBlock', 'BottleNeck'
        p['GTmasks'] = 0 # Use GT Vessel Segmentations as input instead of Retinal Images
        model_dir = './results_dir_vessels/'
        if connected_same_vessel:
            modelName = tb.construct_name(p, "HourGlass-connected-same-vessel")
        else:
            modelName = tb.construct_name(p, "HourGlass-connected")
        numHGScales = 4  # How many times to downsample inside each HourGlass
        net = nt.Net_SHG(p['numHG'], numHGScales, p['Block'], 128, 1)
        epoch = 1800
        net.load_state_dict(torch.load(os.path.join(model_dir, os.path.join(model_dir, modelName+'_epoch-'+str(epoch)+'.pth')),
                                   map_location=lambda storage, loc: storage))

        if gpu_id >= 0:
            net = net.cuda()

        output = net.forward(inputs)
        pred = np.squeeze(np.transpose(output[len(output)-1].cpu().data.numpy()[0, :, :, :], (1, 2, 0)))

        mean, median, std = sigma_clipped_stats(pred, sigma=3.0)
        threshold = median + (10.0 * std)
        sources = find_peaks(pred, threshold, box_size=3)

        indxs = np.argsort(sources['peak_value'])
        for ii in range(0,len(indxs)):
            idx = indxs[len(indxs)-1-ii]
            if sources['peak_value'][idx] > confident_th:
                confident_connections['x_peak'].append(sources['x_peak'][idx])
                confident_connections['y_peak'].append(sources['y_peak'][idx])
                confident_connections['peak_value'].append(sources['peak_value'][idx])
            else:
                break

        confident_connections = Table([confident_connections['x_peak'], confident_connections['y_peak'], confident_connections['peak_value']], names=('x_peak', 'y_peak', 'peak_value'))

    return confident_connections
Exemple #55
0
def fit_2dgaussian(array,
                   crop=False,
                   cent=None,
                   cropsize=15,
                   fwhmx=4,
                   fwhmy=4,
                   theta=0,
                   threshold=False,
                   sigfactor=6,
                   full_output=True,
                   debug=True):
    """ Fitting a 2D Gaussian to the 2D distribution of the data.

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True a square sub image will be cropped equal to cropsize.
    cent : tuple of int, optional
        X,Y integer position of source in the array for extracting the subimage.
        If None the center of the frame is used for cropping the subframe (the
        PSF is assumed to be ~ at the center of the frame).
    cropsize : int, optional
        Size of the subimage.
    fwhmx, fwhmy : float, optional
        Initial values for the standard deviation of the fitted Gaussian, in px.
    theta : float, optional
        Angle of inclination of the 2d Gaussian counting from the positive X
        axis.
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Gaussian
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle,
        and the uncertainties on each parameter.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
        'centroid_y': Y coordinate of the centroid.
        'centroid_x': X coordinate of the centroid.
        'fwhm_y': Float value. FHWM in X [px].
        'fwhm_x': Float value. FHWM in Y [px].
        'amplitude': Amplitude of the Gaussian.
        'theta': Float value. Rotation angle. 
        # and fit uncertainties on the above values: 
        'centroid_y_err'
        'centroid_x_err'
        'fwhm_y_err'
        'fwhm_x_err'
        'amplitude_err' 
        'theta_err'

    """
    check_array(array, dim=2, msg='array')

    if crop:
        if cent is None:
            ceny, cenx = frame_center(array)
        else:
            cenx, ceny = cent

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              ceny,
                                              cenx,
                                              position=True)
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    # Creating the 2D Gaussian model
    init_amplitude = np.ptp(psf_subimage)
    xcom, ycom = cen_com(psf_subimage)
    gauss = models.Gaussian2D(amplitude=init_amplitude,
                              theta=theta,
                              x_mean=xcom,
                              y_mean=ycom,
                              x_stddev=fwhmx * gaussian_fwhm_to_sigma,
                              y_stddev=fwhmy * gaussian_fwhm_to_sigma)
    # Levenberg-Marquardt algorithm
    fitter = fitting.LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(gauss, x, y, psf_subimage)

    if crop:
        mean_y = fit.y_mean.value + suby
        mean_x = fit.x_mean.value + subx
    else:
        mean_y = fit.y_mean.value
        mean_x = fit.x_mean.value
    fwhm_y = fit.y_stddev.value * gaussian_sigma_to_fwhm
    fwhm_x = fit.x_stddev.value * gaussian_sigma_to_fwhm
    amplitude = fit.amplitude.value
    theta = np.rad2deg(fit.theta.value)

    # compute uncertainties
    if fitter.fit_info['param_cov'] is not None:
        perr = np.sqrt(np.diag(fitter.fit_info['param_cov']))
        amplitude_e, mean_x_e, mean_y_e, fwhm_x_e, fwhm_y_e, theta_e = perr
        fwhm_x_e /= gaussian_fwhm_to_sigma
        fwhm_y_e /= gaussian_fwhm_to_sigma
    else:
        amplitude_e, theta_e, mean_x_e = None, None, None
        mean_y_e, fwhm_x_e, fwhm_y_e = None, None, None

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage - fit(x, y)),
                    grid=True,
                    grid_spacing=1,
                    label=label)
        print('FWHM_y =', fwhm_y)
        print('FWHM_x =', fwhm_x, '\n')
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_mean.value)
        print('centroid x subim =', fit.x_mean.value, '\n')
        print('amplitude =', amplitude)
        print('theta =', theta)

    if full_output:
        return pd.DataFrame(
            {
                'centroid_y': mean_y,
                'centroid_x': mean_x,
                'fwhm_y': fwhm_y,
                'fwhm_x': fwhm_x,
                'amplitude': amplitude,
                'theta': theta,
                'centroid_y_err': mean_y_e,
                'centroid_x_err': mean_x_e,
                'fwhm_y_err': fwhm_y_e,
                'fwhm_x_err': fwhm_x_e,
                'amplitude_err': amplitude_e,
                'theta_err': theta_e
            },
            index=[0],
            dtype=np.float64)
    else:
        return mean_y, mean_x
Exemple #56
0
def get_spectrum(img,h_params=[],w_arr=[],\
                lambda_bin=1,vtol=3,v_width=10,ysource=None,offset=-625):
    '''Gets a spectrum, given an image. Only takes one spectrum per image (the
    brighter of two objects, if there are two in the image). img must be a file
    path as a string.
    Output: a spectrum, which is a list of two tuples, the first being
    wavelengths, while the second is observations.
    NOTE: offset=-1965 or so for the HR1544 images and their peers, using the
        old arcs
    offset = -625 for the newer batch with the newer arcs.
    '''
    print "start"
    out = []
    hdu = fits.open(img)
    #    header = hdu[0].header
    #    xbin = header['XBINNING']
    #    ybin = header['YBINNING']
    xbin = 1
    ybin = 1
    arr = np.asarray(CCDData.read(img, unit="adu"))
    arr = ndim.zoom(arr, (xbin, ybin), order=0)
    #find the source
    if (ysource == None):
        imgt = arr.T
        stats = st.sigma_clipped_stats(imgt)
        mean = stats[0]
        std = stats[2]
        peak_tuple = sig.find_peaks(
            imgt[0], mean + vtol * std, None,
            5)  #want to find highest initial peak, to use as starting guess
        p = peak_tuple[0]
        h = peak_tuple[1]['peak_heights']
        hmax = 0
        hmloc = -1
        for i in range(0, len(h)):
            if (h[i] > hmax):
                hmax = h[i]
                hmloc = i
        if (hmloc == -1):
            print "Could not find source"
            return out
        else:
            ystart = p[hmloc]
    else:
        ystart = ysource
    #trace along it, from ystart, like you do in xy_to_h, and get spectrum using
    #xy_to_lambda
    print "source at: " + str(ystart)
    if (len(h_params) == 0):
        print "No h_fit_params given!"
        m1, m2, m3, b = h_fit_params()
    else:
        m1 = h_params[0]
        m2 = h_params[1]
        m3 = h_params[2]
        b = h_params[3]
    print "parameters found"
    wavflux = []  # will be an array of 2-tuples of wavelengths and fluxes
    #no distinction is made at this point between subsequent columns extracted
    #from the image.
    if (len(w_arr) == 0):
        print "No xy_to_lambda mapping given!"
        wav_arr = xy_to_lambda()
    else:
        wav_arr = w_arr
    for i in range(0, len(arr[0])):
        y = ystart + m1 * i**3 + m2 * i**2 + m3 * i  #e.g. 112.43
        ylow = int(y - v_width)  #e.g. 107
        lowfrac = ylow - (y - v_width) + 1  #e.g. 107 - 107.43 + 1 = 0.67
        yhigh = int(y + v_width)  #e.g. 117
        highfrac = (y + v_width) - yhigh  #e.g. 117.43 - 117 = 0.43
        wavflux.append((wav_arr[ylow][i], arr[ylow][i] * lowfrac))
        for j in range(
                ylow + 1, yhigh
        ):  #the plus 1 makes it: (v_width*pixels),mid pixel,(v_width*pixels)
            wavflux.append((wav_arr[j][i], arr[j][i]))
        wavflux.append((wav_arr[yhigh][i], arr[yhigh][i] * highfrac))

    wavflux.sort()
    new = []
    Warr = np.asarray(wavflux)
    Warr = Warr.T
    #average it out
    for i in range(0, len(wavflux), (2 * v_width + 1) * lambda_bin):
        wav = Warr[0][i:i + (2 * v_width + 1) * lambda_bin].mean() + offset
        flu = abs(Warr[1][i:i + (2 * v_width + 1) * lambda_bin].sum())
        new.append((wav, flu))
    #instead of 765 tuples of length 2, make it 2 of length 765
    final = zip(*new)
    #resample evenly
    mi = min(final[0])
    ma = max(final[0])
    delwav = (ma - mi) / len(final[0])
    newxs = np.arange(mi, ma, delwav)
    newys = np.interp(newxs, final[0], final[1])
    newxs = tuple(newxs)
    newys = tuple(newys)
    truefinal = [newxs, newys]

    return truefinal
Exemple #57
0
def fit_2d2gaussian(array,
                    crop=False,
                    cent=None,
                    cropsize=15,
                    fwhm_neg=4,
                    fwhm_pos=4,
                    theta_neg=0,
                    theta_pos=0,
                    neg_amp=1,
                    fix_neg=True,
                    threshold=False,
                    sigfactor=2,
                    full_output=False,
                    debug=True):
    """ Fitting a 2D superimposed double Gaussian (negative and positive) to 
    the 2D distribution of the data (reproduce e.g. the effect of a coronagraph)

    Parameters
    ----------
    array : numpy ndarray
        Input frame with a single PSF.
    crop : bool, optional
        If True a square sub image will be cropped equal to cropsize.
    cent : tuple of float, optional
        X,Y position of the source in the array for extracting the 
        subimage. If None the center of the frame is used for cropping the 
        subframe. If fix_neg is set to True, this will also be used as the 
        fixed position of the negative gaussian.
    cropsize : int, optional
        Size of the subimage.
    fwhm_neg, fwhm_pos : float or tuple of floats, optional
        Initial values for the FWHM of the fitted negative and positive 
        Gaussians, in px. If a tuple, should be the FWHM value along x and y.
    theta_neg, theta_pos: float, optional
        Angle of inclination of the 2d Gaussian counting from the positive X
        axis (only matters if a tuple was provided for fwhm_neg or fwhm_pos).
    neg_amp: float, optional
        First guess on the amplitude of the negative gaussian, relative to the
        amplitude of the positive gaussian (i.e. 1 means the negative gaussian
        has the same amplitude as the positive gaussian)
    fix_neg: bool, optional
        Whether to fix the position and FWHM of the negative gaussian for a 
        fit with less free parameters. In that case, the center of the negative
        gaussian is assumed to be cent
    threshold : bool, optional
        If True the background pixels (estimated using sigma clipped statistics)
        will be replaced by small random Gaussian noise.
    sigfactor : int, optional
        The background pixels will be thresholded before fitting a 2d Gaussian
        to the data using sigma clipped statistics. All values smaller than
        (MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
        noise.
    full_output : bool, optional
        If False it returns just the centroid, if True also returns the
        FWHM in X and Y (in pixels), the amplitude and the rotation angle,
        and the uncertainties on each parameter.
    debug : bool, optional
        If True, the function prints out parameters of the fit and plots the
        data, model and residuals.

    Returns
    -------
    mean_y : float
        Source centroid y position on input array from fitting.
    mean_x : float
        Source centroid x position on input array from fitting.

    If ``full_output`` is True it returns a Pandas dataframe containing the
    following columns:
    - for the positive gaussian:
    'amplitude' : Float value. Amplitude of the Gaussian.
    'centroid_x' : Float value. X coordinate of the centroid.
    'centroid_y' : Float value. Y coordinate of the centroid.
    'fwhm_x' : Float value. FHWM in X [px].
    'fwhm_y' : Float value. FHWM in Y [px].
    'theta' : Float value. Rotation angle of x axis
    - for the negative gaussian:
    'amplitude_neg' : Float value. Amplitude of the Gaussian.
    'centroid_x_neg' : Float value. X coordinate of the centroid.
    'centroid_y_neg' : Float value. Y coordinate of the centroid.
    'fwhm_x_neg' : Float value. FHWM in X [px].
    'fwhm_y_neg' : Float value. FHWM in Y [px].
    'theta_neg' : Float value. Rotation angle of x axis
    """
    if not array.ndim == 2:
        raise TypeError('Input array is not a frame or 2d array')

    if cent is None:
        ceny, cenx = frame_center(array)
    else:
        cenx, ceny = cent

    if crop:
        x_sub_px = cenx % 1
        y_sub_px = ceny % 1

        imside = array.shape[0]
        psf_subimage, suby, subx = get_square(array,
                                              min(cropsize, imside),
                                              int(ceny),
                                              int(cenx),
                                              position=True)
        ceny, cenx = frame_center(psf_subimage)
        ceny += y_sub_px
        cenx += x_sub_px
    else:
        psf_subimage = array.copy()

    if threshold:
        _, clipmed, clipstd = sigma_clipped_stats(psf_subimage, sigma=2)
        indi = np.where(psf_subimage <= clipmed + sigfactor * clipstd)
        subimnoise = np.random.randn(psf_subimage.shape[0],
                                     psf_subimage.shape[1]) * clipstd
        psf_subimage[indi] = subimnoise[indi]

    if isinstance(fwhm_neg, tuple):
        fwhm_neg_x, fwhm_neg_y = fwhm_neg
    else:
        fwhm_neg_x = fwhm_neg
        fwhm_neg_y = fwhm_neg

    if isinstance(fwhm_pos, tuple):
        fwhm_pos_x, fwhm_pos_y = fwhm_pos
    else:
        fwhm_pos_x = fwhm_pos
        fwhm_pos_y = fwhm_pos

    # Creating the 2D Gaussian model
    init_amplitude = np.ptp(psf_subimage)
    #xcom, ycom = cen_com(psf_subimage)
    ycom, xcom = frame_center(psf_subimage)
    fix_dico_pos = {'theta': True}
    bounds_dico_pos = {
        'amplitude': [0.8 * init_amplitude, 1.2 * init_amplitude],
        'x_mean': [xcom - 3, xcom + 3],
        'y_mean': [ycom - 3, ycom + 3],
        'x_stddev': [
            0.5 * fwhm_pos_x * gaussian_fwhm_to_sigma,
            2 * fwhm_pos_x * gaussian_fwhm_to_sigma
        ],
        'y_stddev': [
            0.5 * fwhm_pos_y * gaussian_fwhm_to_sigma,
            2 * fwhm_pos_y * gaussian_fwhm_to_sigma
        ]
    }

    gauss_pos = models.Gaussian2D(amplitude=init_amplitude,
                                  x_mean=xcom,
                                  y_mean=ycom,
                                  x_stddev=fwhm_pos_x * gaussian_fwhm_to_sigma,
                                  y_stddev=fwhm_pos_y * gaussian_fwhm_to_sigma,
                                  theta=np.deg2rad(theta_pos) % (np.pi),
                                  fixed=fix_dico_pos,
                                  bounds=bounds_dico_pos)
    if fix_neg:
        fix_dico_neg = {
            'x_mean': True,
            'y_mean': True,
            'x_stddev': True,
            'y_stddev': True,
            'theta': True
        }
        bounds_dico_neg = {
            'amplitude':
            [neg_amp * 0.5 * init_amplitude, neg_amp * 2 * init_amplitude]
        }
    else:
        fix_dico_neg = {}  #{'theta':True}
        bounds_dico_neg = {
            'amplitude':
            [neg_amp * 0.5 * init_amplitude, neg_amp * 2 * init_amplitude],
            'x_mean': [xcom - 3, xcom + 3],
            'y_mean': [ycom - 3, ycom + 3],
            'x_stddev': [
                0.5 * fwhm_neg_x * gaussian_fwhm_to_sigma,
                2 * fwhm_neg_x * gaussian_fwhm_to_sigma
            ],
            'y_stddev': [
                0.5 * fwhm_neg_y * gaussian_fwhm_to_sigma,
                2 * fwhm_neg_y * gaussian_fwhm_to_sigma
            ],
            'theta': [0, np.pi]
        }

    gauss_neg = models.Gaussian2D(amplitude=init_amplitude * neg_amp,
                                  x_mean=cenx,
                                  y_mean=ceny,
                                  x_stddev=fwhm_neg_x * gaussian_fwhm_to_sigma,
                                  y_stddev=fwhm_neg_y * gaussian_fwhm_to_sigma,
                                  theta=np.deg2rad(theta_neg) % (np.pi),
                                  fixed=fix_dico_neg,
                                  bounds=bounds_dico_neg)

    double_gauss = gauss_pos - gauss_neg

    fitter = fitting.LevMarLSQFitter()  #SLSQPLSQFitter() #LevMarLSQFitter()
    y, x = np.indices(psf_subimage.shape)
    fit = fitter(double_gauss, x, y, psf_subimage, maxiter=100000, acc=1e-08)

    # positive gaussian
    if crop:
        mean_y = fit.y_mean_0.value + suby
        mean_x = fit.x_mean_0.value + subx
    else:
        mean_y = fit.y_mean_0.value
        mean_x = fit.x_mean_0.value
    fwhm_y = fit.y_stddev_0.value * gaussian_sigma_to_fwhm
    fwhm_x = fit.x_stddev_0.value * gaussian_sigma_to_fwhm
    amplitude = fit.amplitude_0.value
    theta = np.rad2deg(fit.theta_0.value)

    # negative gaussian
    if crop:
        mean_y_neg = fit.y_mean_1.value + suby
        mean_x_neg = fit.x_mean_1.value + subx
    else:
        mean_y_neg = fit.y_mean_1.value
        mean_x_neg = fit.x_mean_1.value
    fwhm_y_neg = fit.y_stddev_1.value * gaussian_sigma_to_fwhm
    fwhm_x_neg = fit.x_stddev_1.value * gaussian_sigma_to_fwhm
    amplitude_neg = fit.amplitude_1.value
    theta_neg = np.rad2deg(fit.theta_1.value)

    if debug:
        if threshold:
            label = ('Subimage thresholded', 'Model', 'Residuals')
        else:
            label = ('Subimage', 'Model', 'Residuals')
        plot_frames((psf_subimage, fit(x, y), psf_subimage - fit(x, y)),
                    grid=True,
                    grid_spacing=1,
                    label=label)
        print('FWHM_y =', fwhm_y)
        print('FWHM_x =', fwhm_x, '\n')
        print('centroid y =', mean_y)
        print('centroid x =', mean_x)
        print('centroid y subim =', fit.y_mean_0.value)
        print('centroid x subim =', fit.x_mean_0.value, '\n')
        print('amplitude =', amplitude)
        print('theta =', theta)
        print('FWHM_y (neg) =', fwhm_y_neg)
        print('FWHM_x (neg) =', fwhm_x_neg, '\n')
        print('centroid y (neg) =', mean_y_neg)
        print('centroid x (neg) =', mean_x_neg)
        print('centroid y subim (neg) =', fit.y_mean_1.value)
        print('centroid x subim (neg) =', fit.x_mean_1.value, '\n')
        print('amplitude (neg) =', amplitude_neg)
        print('theta (neg) =', theta_neg)

    if full_output:
        return pd.DataFrame(
            {
                'centroid_y': mean_y,
                'centroid_x': mean_x,
                'fwhm_y': fwhm_y,
                'fwhm_x': fwhm_x,
                'amplitude': amplitude,
                'theta': theta,
                'centroid_y_neg': mean_y_neg,
                'centroid_x_neg': mean_x_neg,
                'fwhm_y_neg': fwhm_y_neg,
                'fwhm_x_neg': fwhm_x_neg,
                'amplitude_neg': amplitude_neg,
                'theta_neg': theta_neg
            },
            index=[0],
            dtype=np.float64)
    else:
        return mean_y, mean_x
def getEclipseTimes(fnames, coords, obsname, myLoc=None):
    '''
    Searches <myLoc> for .log files, and uses them to get the times of the eclipses.

    The technique for this is to make a smoothed plot of the numerical gradient, and look for two mirrored peaks - one
    where the lightcurve enters eclipse (showing as a trough in gradient), and one for egress (showing as a peak in
    gradient). Ideally, they will be mirrors of each other, with the same width and height (though one will be the negative
    of the other).

    A double gaussian is fitted to it using a gaussian process, and the midpoint between their peaks is taken to be the
    eclipse time. To characterise the error of the eclipse time, an MCMC is used to sample the found fit. This is beefy,
    and takes a while, but the Hessian error matrix we were getting out of scipy.optimize was heavily dependant on initial
    conditions, so was untrustworthy.


    Arguments:
    ----------
    coords: str
        The RA and Dec of the stars in the eclipses you're fitting. Note that all data being fitted is assumed to be for
        the same object, hence the RA and Dec used in each log file is the same. i.e., make sure you're not fitting data
        for more than one object at once!
        Note: must be readable by astropy!

    obsname: str
        The observatory name. See coord.EarthLocation.get_site_names() for a list. If a site is not in the registry,
        this string is assumed to be longitude and latitude, and will be attempted again.

    myLoc: str, default None
        The directory to search for eclipses. If None, searches the current working directory.

    Returns:
    --------
    None, but creates a file with eclipse times in it.
    '''
    plt.ion()
    printer("\n\n--- Getting eclipse times from the data ---")

    star = coord.SkyCoord(
        coords,
        frame='icrs',
        unit=(units.hour, units.deg)
    )

    # Where are we working?
    if myLoc == None:
        myLoc = path.curdir
        printer("Defaulting to current directory: {}".format(myLoc))


    # Make the ephemeris dir, if needed
    ephem_dir = path.join(myLoc, "EPHEMERIS")

    # Make the ephemeris directory, where I'll put my stuff
    print("Putting the ephemeris data in {}".format(ephem_dir))
    try:
        mkdir(ephem_dir)
        print("Created the directory!")
    except:
        print("The directory already exists!")

    # Where am I looking for prior data, and saving my new data?
    oname = 'eclipse_times.txt'
    oname = path.join(ephem_dir, oname)



    source_key, tl = read_ecl_file(oname)

    # # What am I using to get new data from?
    # printer("Grabbing log files...")
    # fnames = list(glob.iglob('{}/**/*.log'.format(myLoc), recursive=True))
    # fnames = sorted(fnames)
    #
    # if len(fnames) == 0:
    #     printer("I couldn't find any log files in:")
    #     printer("{}".format(myLoc))
    #     raise FileNotFoundError("No log files in {}".format(myLoc))

    # List the files we found
    printer("Getting eclipse times from these log files: ")
    for i, fname in enumerate(fnames):
        printer("  {:>2d} - {}".format(i, fname))
    printer('  ')

    temp_file = open('eclipse_times.tmp', 'w')

    for lf in fnames:
        printer("  Looking at the file {}".format(lf))
        # lets make the file reading more robust
        log = Hlog.read(lf)
        if log == {}:
            printer("  Failed to get data from Hlog.read function, skipping this file.")
            continue
        aps = log.apnames

        printer("File: {}".format(lf))
        if len(aps['1']) < 2:
            printer("-> Not looking for eclipses in {}, as only one aperture in the file.".format(lf))
            continue

        # Get the first CCD lightcurve, and correct it to the barycentric time
        try:
            inspect = log.tseries('2', '1') / log.tseries('2', aps['1'][1])
        except:
            inspect = log.tseries('1', '1') / log.tseries('1', aps['1'][1])
        printer("Correcting observations from MJD to Barycentric MJD")
        printer("  -> Location: {}".format(obsname))
        printer("  -> Star: {}".format(star))
        inspect_corr = tcorrect(inspect, star, obsname)
        # Discard the first 10 observations, as they're often junk
        inspect_corr = inspect_corr[10:]

        x, y = smooth_derivative(inspect_corr, 9, 5)
        yerr = 0.001*np.ones_like(x)

        fig, ax = plt.subplots(2, figsize=[16,8], sharex=True)
        ax[0].set_title("{}".format(lf))
        ax[0].plot(x, y)

        ax[1].set_title('Lightcurve:')
        inspect_corr.mplot(ax[1])

        gauss = PlotPoints(fig)
        gauss.connect()
        plt.tight_layout()

        plt.show(block=True)

        try:
            lowerlim = gauss.lowerlim
        except:
            lowerlim = x.min()

        try:
            upperlim = gauss.upperlim
        except:
            upperlim = x.max()

        # Apply upper/lower limits
        mask = (x < upperlim) * (x > lowerlim)
        mask = np.where(mask==1)

        y    = y[mask]
        yerr = yerr[mask]
        x    = x[mask]

        if gauss.flag:
            printer("-> No eclipse taken from {}".format(lf))
            continue

        kwargs = gauss.gaussPars()
        # hold values close to initial guesses
        bounds = dict(
            t0=(kwargs['t0']-kwargs['sep']/8, kwargs['t0']+kwargs['sep']/8),
            sep=(0.9*kwargs['sep'], 1.1*kwargs['sep']),
            log_sigma2=(np.log(kwargs['sep']**2/10000), np.log(kwargs['sep']**2/25)),
            peak=(0.9*kwargs['peak'], 1.1*kwargs['peak'])
        )
        kwargs['bounds'] = bounds

        mean_model = TwoGaussians(**kwargs)

        mean, median, std = sigma_clipped_stats(y)
        delta_t = np.mean(np.diff(x))*5
        kernel = terms.RealTerm(log_a=np.log(std**2), log_c=-np.log(delta_t))
        gp = celerite.GP(kernel, mean=mean_model, fit_mean=True)
        gp.compute(x, yerr)
        # print("  Initial log-likelihood: {0}".format(gp.log_likelihood(y)))


        # Fit for the maximum likelihood parameters
        initial_params = gp.get_parameter_vector()
        bounds = gp.get_parameter_bounds()


        # Find a solution using Stu's minimisation method
        soln = minimize(neg_log_like, initial_params, jac=grad_neg_log_like,
                        method="L-BFGS-B", bounds=bounds, args=(y, gp))
        if not soln.success:
            printer('  Warning: may not have converged')
            printer(soln.message)

        print("solution from minimise:")
        print(soln)

        gp.set_parameter_vector(soln.x)
        mean_model.set_parameter_vector(gp.get_parameter_vector()[2:])

        out = soln['x']
        t_ecl = out[2]

        printer("\n\nUsing MCMC to characterise error at peak likelihood...")


        # Use an MCMC model, starting from the solution we found, to model the errors
        ndim     = 6
        nwalkers = 100

        # Initial positions.
        p0 = np.random.rand(ndim * nwalkers).reshape((nwalkers, ndim))
        scatter = 0.0005 # Scatter by ~ 40s in time
        p0 *= scatter
        p0 -= (scatter/2)
        p0 = np.transpose(np.repeat(out, nwalkers).reshape((ndim, nwalkers))) + p0

        try:
            # Construct a sampler
            pool = Pool()
            sampler = emcee.EnsembleSampler(
                nwalkers, ndim,
                log_like,
                args=[y, gp],
                pool=pool
            )

            # Burn in
            width=40
            nsteps = 1000
            for i, result in enumerate(sampler.sample(p0, iterations=nsteps)):
                n = int((width+1) * float(i) / nsteps)
                # print(result[0])
                sys.stdout.write("\r  Sampling...    [{}{}]".format('#'*n, ' '*(width - n)))
            pool.close()
            pos, prob, state = result

            t_ecl = np.mean(sampler.flatchain[:,2])
            err = np.std(sampler.flatchain[:,2])
            sep = np.mean(sampler.flatchain[:,3])

            temp_file.write("{},{},{}".format(float(t_ecl), float(err), lf))
            printer("Got a solution: {:.7f}+/-{:.7f}\n".format(t_ecl, err))

            # Make the maximum likelihood prediction
            mu, var = gp.predict(y, x, return_var=True)
            std = np.sqrt(var)

            # Plot the data
            color = "#ff7f0e"
            plt.close('all')
            fig, ax = plt.subplots(2, 1, sharex=True)
            ax[0].plot(x, y, '.', label='Data')
            ax[0].plot(x, mu, color=color, label='Data GP interpolation')
            ax[0].fill_between(x, mu+std, mu-std, color=color, alpha=0.3, edgecolor="none")
            ax[0].plot(x, mean_model.get_value(x), linestyle='--', color='blue', label='MCMC result')
            # mean_model.set_parameter_vector(soln.x[2:])
            # ax[0].plot(x, mean_model.get_value(x), linestyle='--', color='red', label='scipy result')
            ax[0].axvline(t_ecl, color='magenta', label='Eclipse time')

            inspect_corr.mplot(ax[1])
            ax[1].set_title('Lightcurve')
            ax[1].axvline(t_ecl, color='magenta')
            ax[1].axvline(t_ecl+(sep/2.), color='red')
            ax[1].axvline(t_ecl-(sep/2.), color='red')

            ax[0].set_xlim(x[0], x[-1])
            ax[0].set_title("maximum likelihood prediction - {}".format(lf.split('/')[-1]))
            ax[0].legend()
            plt.tight_layout()
            print("  Plotting fit...")
            plt.show(block=False)

            cont = input("  Save these data? y/n: ")
            if cont.lower() == 'y':
                figname = lf
                figname = figname.replace('/', '_').replace(".log", ".png")
                figname = path.join("EPHEMERIS", figname)
                printer("Saved the ephemeral fit to:\n   {}".format(figname))
                plt.savefig(figname)

                locflag = input("    What is the source of these data: ")

                key = '-1' # This ensures that if source_key is empty, the new data are pushed to index '0'
                for key in source_key:
                    if locflag == source_key[key]:
                        locflag = key
                        break
                if locflag != key:
                    key = str(int(key)+1)
                    source_key[key] = locflag
                    locflag = key
                tl.append(['0', float(t_ecl), float(err), locflag])
                printer("Saved the data: {}".format(['0', float(t_ecl), float(err), locflag]))
            else:
                printer("  Did not store eclipse time from {}.".format(lf))

            plt.close()
            printer("")
        except celerite.solver.LinAlgError:
            printer('  Celerite failed to factorize or solve matrix. This can happen when the data are poorly fitted by the double gaussian!')
            printer("  Skipping this file.")
            input("> ")

    printer("\nDone all the files!")

    write_ecl_file(source_key, tl, oname)
    plt.ioff()

    #TODO:
    # Temporary placeholder. Think about this.
    # - Get the rounded ephemeris fit from the period and T0 supplied?
    # - Might be best to force the user to do this manually, to make it more reliable?
    printer("This string might help:\ncode {}".format(path.abspath(oname)))
    printer("Please open the file, and edit in the eclipse numbers for each one.")
    input("Hit enter when you've done this!")

    remove('eclipse_times.tmp')
Exemple #59
0
def master_photometry(hdul, dtype=np.float32, method=None, sigma=3, fwhm=4,
                      kappa=5, r=3, r_in=5, r_out=9, save_format="fits",
                      saveto=None, overwrite=False):
    r"""
    Generate statistics, positions and more fore a given hdul object.
    This uses photutils package, see it for more information,
    also astropy tables could be relevant to look at.

    Parameters
    ----------
    hdul : astropy.io.fits.HDUList
        Image to process.
    dtype : data-type, optional
        Type to use for computing, defaults to np.float32.
    method : str, optional
        Method to use for normalization if value is not None.
        Valid values/metods are 'mean' or 'median'.
        Pay attention if you already used this with master_science,
        it's the same thing!
    sigma : float, optional
        Sigma level for mean, median and standard deviation, defaults to 3.
    fwhm : float, optional
        Expected FWHM in pixels, defaults to 4.
    kappa : float, optional
        Sigma level for source detection above background, defaults to 5.
    r : float, optional
        Aperture radius, defaults to 3.
    r_in : float, optional
        Inner sky annulus radius, defaults to 5.
    r_out : float, optional
        Outer sky annulus radius, defaults to 9.
    save_format : str, optional
        Format to save the table with, defaults to 'fits',
        has no effect if saveto is None.
    saveto : str, optional
        If this is not set (None, default) files won't be saved.
        If this is set to a string, save the file with the string as name.
    overwrite : bool, optional
        While saving, overwrite existing files? Defaults to False.

    Returns
    -------
    table : photutils.aperture_photometry
    """
    data = normalize(hdul[0].data.astype(dtype), method)
    mean, median, std = sigma_clipped_stats(data, sigma=sigma)
    DAOfind = DAOStarFinder(fwhm=fwhm, threshold=kappa*std)
    positions = DAOfind(data-median)
    centroid = (positions["xcentroid"], positions["ycentroid"])
    aperture = CircularAperture(centroid, r)
    annulus = CircularAnnulus(centroid, r_in, r_out)
    apertures = [aperture, annulus]
    table = aperture_photometry(data, apertures)
    bg_mean = table["aperture_sum_1"] / annulus.area()
    bg_sum = bg_mean * aperture.area()
    table["residual_aperture_sum"] = table["aperture_sum_0"] - bg_sum
    table["mag"] = - 2.5 * np.log10(table["residual_aperture_sum"])
    if saveto is not None:
        # TODO: dtype?
        table.write(saveto, overwrite=overwrite, format=save_format)
    return table
Exemple #60
0
def optimal_subtraction(new_fits, ref_fits):
    """Function that accepts a new and a reference fits image, finds their
    WCS solution using Astrometry.net, runs SExtractor (inside
    Astrometry.net), PSFex to extract the PSF from the images, and
    performs Barak's optimal subtraction to produce the subtracted
    image (D), the significance image (S), and the corrected
    significance image (Scorr - see Zackay, Ofek & Gal-Yam 2016, ApJ,
    830, 27).

    Requirements:
    - Astrometry.net (in particular "solve-field" and index files)
    - SExtractor
    - SWarp
    - PSFex
    - ds9
    - pyfftw to speed up the many FFTs performed
    - the other modules imported at the top
 
    Written by Paul Vreeswijk ([email protected])

    """

    start_time1 = os.times()

    # define the base names of input fits files, base_new and
    # base_ref, as global so they can be used in any function in this
    # module
    global base_new, base_ref
    base_new = new_fits[0:-5]
    base_ref = ref_fits[0:-5]

    # read in header of new_fits
    t = time.time()
    with pyfits.open(new_fits) as hdulist:
        header_new = hdulist[0].header
    keywords = ['NAXIS2', 'NAXIS1', 'GAIN', 'RDNOISE', 'SATURATE', 'RA', 'DEC']
    ysize, xsize, gain_new, readnoise_new, satlevel_new, ra_new, dec_new = read_header(
        header_new, keywords)
    if verbose:
        print keywords
        print read_header(header_new, keywords)

    # read in header of ref_fits
    with pyfits.open(ref_fits) as hdulist:
        header_ref = hdulist[0].header
    ysize_ref, xsize_ref, gain_ref, readnoise_ref, satlevel_ref, ra_ref, dec_ref = read_header(
        header_ref, keywords)
    if verbose:
        print keywords
        print read_header(header_ref, keywords)

    if docosmics:
        # clean new image of cosmic rays
        new_fits_crr = base_new + '_crr.fits'
        new_fits_crrmask = base_new + '_crrmask.fits'
        if not os.path.isfile(new_fits_crr) or redo:
            result = clean_cosmics(new_fits, new_fits, new_fits_crrmask,
                                   gain_new, readnoise_new, 5.0, 0.3, 5.0, 4,
                                   -1., False)

        # clean ref image of cosmic rays
        ref_fits_crr = base_ref + '_crr.fits'
        ref_fits_crrmask = base_ref + '_crrmask.fits'
        if not os.path.isfile(ref_fits_crr) or redo:
            result = clean_cosmics(ref_fits, ref_fits, ref_fits_crrmask,
                                   gain_ref, readnoise_ref, 5.0, 0.3, 5.0, 4,
                                   -1., False)

    # determine WCS solution of new_fits
    new_fits_wcs = base_new + '_wcs.fits'
    if not os.path.isfile(new_fits_wcs) or redo:
        result = run_wcs(base_new + '.fits', new_fits_wcs, ra_new, dec_new,
                         gain_new, readnoise_new)

    # determine WCS solution of ref_fits
    ref_fits_wcs = base_ref + '_wcs.fits'
    if not os.path.isfile(ref_fits_wcs) or redo:
        result = run_wcs(base_ref + '.fits', ref_fits_wcs, ra_ref, dec_ref,
                         gain_ref, readnoise_ref)

    # remap ref to new
    ref_fits_remap = base_ref + '_wcs_remap.fits'
    if not os.path.isfile(ref_fits_remap) or redo:
        result = run_remap(base_new + '_wcs.fits',
                           base_ref + '_wcs.fits',
                           ref_fits_remap, [ysize, xsize],
                           gain=gain_new,
                           config='Config/swarp.config')

    # initialize full output images
    data_D_full = np.ndarray((ysize, xsize), dtype='float32')
    data_S_full = np.ndarray((ysize, xsize), dtype='float32')
    data_Scorr_full = np.ndarray((ysize, xsize), dtype='float32')
    if addfakestar:
        data_new_full = np.ndarray((ysize, xsize), dtype='float32')
        data_ref_full = np.ndarray((ysize, xsize), dtype='float32')

    # determine cutouts
    centers, cuts_ima, cuts_ima_fft, cuts_fft, sizes = centers_cutouts(
        subimage_size, ysize, xsize)
    ysize_fft = subimage_size + 2 * subimage_border
    xsize_fft = subimage_size + 2 * subimage_border
    nsubs = centers.shape[0]
    if verbose:
        print 'nsubs', nsubs
        for i in range(nsubs):
            print 'i', i
            print 'cuts_ima[i]', cuts_ima[i]
            print 'cuts_ima_fft[i]', cuts_ima_fft[i]
            print 'cuts_fft[i]', cuts_fft[i]

    # prepare cubes with shape (nsubs, ysize_fft, xsize_fft) with new,
    # ref, psf and background images
    data_new, psf_new, psf_orig_new, data_new_bkg = prep_optimal_subtraction(
        base_new + '_wcs.fits', nsubs, 'new')
    data_ref, psf_ref, psf_orig_ref, data_ref_bkg = prep_optimal_subtraction(
        base_ref + '_wcs.fits', nsubs, 'ref')

    # determine corresponding variance images
    var_new = data_new + readnoise_new**2
    var_ref = data_ref + readnoise_ref**2

    if verbose:
        print 'readnoise_new, readnoise_ref', readnoise_new, readnoise_ref

    # get x, y and fratios from matching PSFex stars across entire frame
    x_fratio, y_fratio, fratio, dra, ddec = get_fratio_radec(
        base_new + '_wcs.psfexcat', base_ref + '_wcs.psfexcat',
        base_new + '_wcs.sexcat', base_ref + '_wcs.sexcat')
    dx = dra / pixelscale
    dy = ddec / pixelscale

    dr = np.sqrt(dx**2 + dy**2)
    if verbose: print 'standard deviation dr over the full frame:', np.std(dr)
    dr_full = np.sqrt(np.median(dr)**2 + np.std(dr)**2)
    dx_full = np.sqrt(np.median(dx)**2 + np.std(dx)**2)
    dy_full = np.sqrt(np.median(dy)**2 + np.std(dy)**2)
    #dr_full = np.std(dr)
    #dx_full = np.std(dx)
    #dy_full = np.std(dy)
    if verbose:
        print 'np.median(dr), np.std(dr)', np.median(dr), np.std(dr)
        print 'np.median(dx), np.std(dx)', np.median(dx), np.std(dx)
        print 'np.median(dy), np.std(dy)', np.median(dy), np.std(dy)
        print 'dr_full, dx_full, dy_full', dr_full, dx_full, dy_full

    #fratio_median, fratio_std = np.median(fratio), np.std(fratio)
    fratio_mean, fratio_median, fratio_std = sigma_clipped_stats(fratio,
                                                                 sigma=2.)
    if verbose:
        print 'fratio_mean, fratio_median, fratio_std', fratio_mean, fratio_median, fratio_std

    if makeplots:
        # plot dy vs dx
        plt.axis((-1, 1, -1, 1))
        plt.plot(dx, dy, 'go')
        plt.xlabel('dx (pixels)')
        plt.ylabel('dy (pixels)')
        plt.title(new_fits + '\n vs ' + ref_fits, fontsize=12)
        plt.savefig('dxdy.png')
        plt.show()
        plt.close()

        # plot dr vs x_fratio
        plt.axis((0, xsize, 0, 1))
        plt.plot(x_fratio, dr, 'go')
        plt.xlabel('x (pixels)')
        plt.ylabel('dr (pixels)')
        plt.title(new_fits + '\n vs ' + ref_fits, fontsize=12)
        plt.savefig('drx.png')
        plt.show()
        plt.close()

        # plot dr vs y_fratio
        plt.axis((0, ysize, 0, 1))
        plt.plot(y_fratio, dr, 'go')
        plt.xlabel('y (pixels)')
        plt.ylabel('dr (pixels)')
        plt.title(new_fits + '\n vs ' + ref_fits, fontsize=12)
        plt.savefig('dry.png')
        plt.show()
        plt.close()

        # plot dx vs x_fratio
        plt.axis((0, xsize, -1, 1))
        plt.plot(x_fratio, dx, 'go')
        plt.xlabel('x (pixels)')
        plt.ylabel('dx (pixels)')
        plt.title(new_fits + '\n vs ' + ref_fits, fontsize=12)
        plt.savefig('dxx.png')
        plt.show()
        plt.close()

        # plot dy vs y_fratio
        plt.axis((0, ysize, -1, 1))
        plt.plot(y_fratio, dy, 'go')
        plt.xlabel('y (pixels)')
        plt.ylabel('dy (pixels)')
        plt.title(new_fits + '\n vs ' + ref_fits, fontsize=12)
        plt.savefig('dyy.png')
        plt.show()
        plt.close()

    start_time2 = os.times()

    for nsub in range(nsubs):

        if timing: tloop = time.time()

        if verbose:
            print '\nNsub:', nsub + 1
            print '----------'

        # determine clipped mean, median and stddev
        #mean_new, median_new, stddev_new = sigma_clipped_stats(data_new[nsub], sigma=3.)
        #print 'mean_new, median_new, stddev_new', mean_new, median_new, stddev_new
        median_new = np.median(data_new[nsub])
        stddev_new = np.sqrt(median_new + readnoise_new**2)
        if verbose:
            print 'median_new, stddev_new', median_new, stddev_new

        #mean_ref, median_ref, stddev_ref = sigma_clipped_stats(data_ref[nsub], sigma=3.)
        #print 'mean_ref, median_ref, stddev_ref', mean_ref, median_ref, stddev_ref
        median_ref = np.median(data_ref[nsub])
        stddev_ref = np.sqrt(median_ref + readnoise_ref**2)
        if verbose:
            print 'median_ref, stddev_ref', median_ref, stddev_ref

        show = False
        if makeplots and show:
            print 'data_new[nsub] data type:', data_new[nsub].dtype
            range_new = (median_new - 3. * stddev_new,
                         median_new + 3. * stddev_new)
            bins = np.linspace(range_new[0], range_new[1], 100)
            plt.hist(np.ravel(data_new[nsub]), bins, color='green')
            plt.xlabel('pixel value (e-)')
            plt.ylabel('number')
            plt.title('subsection of ' + new_fits)
            plt.show()
            plt.close()

            print 'data_ref[nsub] data type:', data_ref[nsub].dtype
            range_ref = (median_ref - 3. * stddev_ref,
                         median_ref + 3. * stddev_ref)
            bins = np.linspace(range_ref[0], range_ref[1], 100)
            plt.hist(np.ravel(data_ref[nsub]), bins, color='green')
            plt.xlabel('pixel value (e-)')
            plt.ylabel('number')
            plt.title('subsection of ' + ref_fits)
            plt.show()
            plt.close()

        # replace low values in subimages
        data_new[nsub][data_new[nsub] <= 0.] = median_new
        data_ref[nsub][data_ref[nsub] <= 0.] = median_ref

        # replace low values in variance subimages
        #var_new[nsub][var_new[nsub] < stddev_new**2] = stddev_new**2
        #var_ref[nsub][var_ref[nsub] < stddev_ref**2] = stddev_ref**2

        if addfakestar:
            # add fake star to new image
            # first normalize psf_orig_new
            psf_orig_new[nsub] /= np.amax(psf_orig_new[nsub])
            psf_orig_new[nsub] *= 3. * stddev_new
            # place it at the center of the new image
            xpos = xsize_fft / 2
            ypos = ysize_fft / 2
            data_new[nsub][ypos - 50 / 2:ypos + 50 / 2,
                           xpos - 50 / 2:xpos + 50 / 2] += psf_orig_new[nsub]

        if background_sex:
            # use background subimages
            bkg_new = data_new_bkg[nsub]
            bkg_ref = data_ref_bkg[nsub]
        else:
            # or median values of subimages
            bkg_new = median_new
            bkg_ref = median_ref

        # subtract the background
        data_new[nsub] -= bkg_new
        data_ref[nsub] -= bkg_ref

        # replace saturated pixel values with zero
        #data_new[nsub][data_new[nsub] > 0.95*satlevel_new] = 0.
        #data_ref[nsub][data_ref[nsub] > 0.95*satlevel_ref] = 0.

        # get median fratio from PSFex stars across subimage
        subcut = cuts_ima[nsub]
        index_sub = ((y_fratio > subcut[0]) & (y_fratio < subcut[1]) &
                     (x_fratio > subcut[2]) & (x_fratio < subcut[3]))

        # take local or full-frame values for fratio
        if fratio_local and any(index_sub):
            #fratio_mean, f_new, f_new_std = sigma_clipped_stats(fratio[index_sub], sigma=2.5)
            f_new, f_new_std = np.median(fratio[index_sub]), np.std(
                fratio[index_sub])
        else:
            f_new, f_new_std = fratio_median, fratio_std
        # and the same for dx and dy
        if dxdy_local and any(index_sub):
            dx_sub = np.sqrt(
                np.median(dx[index_sub])**2 + np.std(dx[index_sub])**2)
            dy_sub = np.sqrt(
                np.median(dy[index_sub])**2 + np.std(dy[index_sub])**2)
            if dx_sub > 2. * dx_full or not np.isfinite(dx_sub):
                dx_sub = dx_full
            if dy_sub > 2. * dy_full or not np.isfinite(dy_sub):
                dy_sub = dy_full
        else:
            dx_sub = dx_full
            dy_sub = dy_full

        # f_ref is set to one - could also opt to set f_new to unity instead
        f_ref = 1.
        if verbose:
            print 'f_new, f_new_std, f_ref', f_new, f_new_std, f_ref
            print 'dx_sub, dy_sub', dx_sub, dy_sub

        # call Barak's function: optimal_binary_image_subtraction
        data_D, data_S, data_Scorr = run_ZOGY(data_ref[nsub], data_new[nsub],
                                              psf_ref[nsub], psf_new[nsub],
                                              stddev_ref, stddev_new, f_ref,
                                              f_new, var_ref[nsub],
                                              var_new[nsub], dx_sub, dy_sub)

        # check that robust stddev of Scorr is around unity
        if verbose:
            mean_Scorr, median_Scorr, stddev_Scorr = sigma_clipped_stats(
                data_Scorr, sigma=3.)
            print 'mean_Scorr, median_Scorr, stddev_Scorr', mean_Scorr, median_Scorr, stddev_Scorr

        # put sub images into output frames
        subcut = cuts_ima[nsub]
        fftcut = cuts_fft[nsub]
        y1 = subimage_border
        x1 = subimage_border
        y2 = subimage_border + subimage_size
        x2 = subimage_border + subimage_size
        data_D_full[subcut[0]:subcut[1],
                    subcut[2]:subcut[3]] = data_D[y1:y2, x1:x2] / gain_ref
        data_S_full[subcut[0]:subcut[1], subcut[2]:subcut[3]] = data_S[y1:y2,
                                                                       x1:x2]
        data_Scorr_full[subcut[0]:subcut[1],
                        subcut[2]:subcut[3]] = data_Scorr[y1:y2, x1:x2]

        if addfakestar:
            if background_sex:
                data_new_full[
                    subcut[0]:subcut[1],
                    subcut[2]:subcut[3]] = (data_new[nsub][y1:y2, x1:x2] +
                                            bkg_new[y1:y2, x1:x2]) / gain_new
                data_ref_full[
                    subcut[0]:subcut[1],
                    subcut[2]:subcut[3]] = (data_ref[nsub][y1:y2, x1:x2] +
                                            bkg_ref[y1:y2, x1:x2]) / gain_ref
            else:
                data_new_full[subcut[0]:subcut[1], subcut[2]:subcut[3]] = (
                    data_new[nsub][y1:y2, x1:x2] + bkg_new) / gain_new
                data_ref_full[subcut[0]:subcut[1], subcut[2]:subcut[3]] = (
                    data_ref[nsub][y1:y2, x1:x2] + bkg_ref) / gain_ref

        if display and (nsub == 65 or nsub == 0):
            # just for displaying purpose:
            pyfits.writeto('D.fits', data_D, clobber=True)
            pyfits.writeto('S.fits', data_S, clobber=True)
            pyfits.writeto('Scorr.fits', data_Scorr, clobber=True)
            #pyfits.writeto('Scorr_1sigma.fits', data_Scorr_1sigma, clobber=True)

            # write new and ref subimages to fits
            subname = '_sub' + str(nsub)
            newname = base_new + '_wcs' + subname + '.fits'
            pyfits.writeto(newname, data_new[nsub] + bkg_new, clobber=True)
            refname = base_ref + '_wcs' + subname + '.fits'
            pyfits.writeto(refname, data_ref[nsub] + bkg_ref, clobber=True)
            # variance images
            pyfits.writeto('Vnew.fits', var_new[nsub], clobber=True)
            pyfits.writeto('Vref.fits', var_ref[nsub], clobber=True)

            # and display
            cmd = [
                'ds9', '-zscale', newname, refname, 'D.fits', 'S.fits',
                'Scorr.fits'
            ]
            cmd = [
                'ds9', '-zscale', newname, refname, 'D.fits', 'S.fits',
                'Scorr.fits', 'Vnew.fits', 'Vref.fits', 'VSn.fits', 'VSr.fits',
                'VSn_ast.fits', 'VSr_ast.fits', 'Sn.fits', 'Sr.fits',
                'kn.fits', 'kr.fits'
            ]
            result = call(cmd)

        if timing: print 'wall-time spent in nsub loop', time.time() - tloop

    end_time = os.times()
    dt_usr = end_time[2] - start_time2[2]
    dt_sys = end_time[3] - start_time2[3]
    dt_wall = end_time[4] - start_time2[4]
    print
    print "Elapsed user time in {0}:  {1:.3f} sec".format("optsub", dt_usr)
    print "Elapsed CPU time in {0}:  {1:.3f} sec".format("optsub", dt_sys)
    print "Elapsed wall time in {0}:  {1:.3f} sec".format("optsub", dt_wall)

    dt_usr = end_time[2] - start_time1[2]
    dt_sys = end_time[3] - start_time1[3]
    dt_wall = end_time[4] - start_time1[4]
    print
    print "Elapsed user time in {0}:  {1:.3f} sec".format("total", dt_usr)
    print "Elapsed CPU time in {0}:  {1:.3f} sec".format("total", dt_sys)
    print "Elapsed wall time in {0}:  {1:.3f} sec".format("total", dt_wall)

    # write full new, ref, D and S images to fits
    if addfakestar:
        pyfits.writeto('new.fits', data_new_full, header_new, clobber=True)
        pyfits.writeto('ref.fits', data_ref_full, header_ref, clobber=True)
    pyfits.writeto('D.fits', data_D_full, clobber=True)
    pyfits.writeto('S.fits', data_S_full, clobber=True)
    pyfits.writeto('Scorr.fits', data_Scorr_full, clobber=True)

    # and display
    if addfakestar:
        cmd = [
            'ds9', '-zscale', 'new.fits', 'ref.fits', 'D.fits', 'S.fits',
            'Scorr.fits'
        ]
    else:
        cmd = [
            'ds9', '-zscale', new_fits, ref_fits_remap, 'D.fits', 'S.fits',
            'Scorr.fits'
        ]
    result = call(cmd)