Ejemplo n.º 1
0
def find_psf(image,fwhm,show=False):
	'''
	Obtain the position of the centroids in a image matrix
	---
	Input:

	- image:	str		image path including the name
	-
	'''
	#importing useful packages from astropy
	from photutils import datasets
	from astropy.stats import sigma_clipped_stats
	from photutils import DAOStarFinder #daofind

	im,hdr = fits.getdata(image,header=True) #reading the fits image (data + header)
	im = np.array(im,dtype='Float64') #transform the data for a matrix
	tam = np.shape(im) #dimension of the matrix
	mean, median, std = sigma_clipped_stats(im, sigma=fwhm, iters=5)

	# sources = daofind(im - median,fwhm=fwhm, threshold=5.*std)
	result = DAOStarFinder(threshold=median,fwhm=3.5)
	sources = result.find_stars(im)

	if show == True:
		plt.figure()
		plt.imshow(im,origin='lower', cmap=plt.cm.gray,vmin=np.mean(im)-np.std(im),vmax=np.mean(im)+np.std(im))
		plt.colorbar()
		plt.scatter(sources['xcentroid'],sources['ycentroid'],color='red')
		plt.savefig('psf_sources_.png')

	# sources = sources.to_
	return sources
Ejemplo n.º 2
0
    def _detect_sources(self):
        from photutils import DAOStarFinder

        fwhm = 3.
        detection_threshold = 3.
        daofind = DAOStarFinder(threshold=(self._med +
                                           self._std * detection_threshold),
                                fwhm=fwhm)
        sources = daofind.find_stars(self.image)
        pl.plot(sources['xcentroid'], sources['ycentroid'], 'r.')
Ejemplo n.º 3
0
def find_sources(file_, fwhm):
    """
    Uses DAOStarFinder to extract source positions from fits file
    :param file_ (str): name of target .fits file
    :param fwhm (float): The full width half maximum of the gaussian kernel in pixels
    For more config see
    https://photutils.readthedocs.io/en/stable/api/photutils.detection.DAOStarFinder.html
    """
    # Read in fits file as numpy array
    data = read_fits(file_, return_array=True)
    # Calculate background level
    mean, median, std = stats.sigma_clipped_stats(data)
    print(('mean', 'median', 'std'))
    print((mean, median, std))
    # Set up DAO Finder and run on bg subtracted image, printing results
    # sharplo=.2, sharphi=1., roundlo=-.3, roundhi=.3,
    daofind = DAOStarFinder(exclude_border=True, fwhm=fwhm, threshold=std)
    sources = daofind.find_stars(data - median)  # daofind(data-median) #
    print('Sources:')
    print(sources)
    # Save positions of detected sources to csv file
    positions = (sources['xcentroid'], sources['ycentroid'])
    print_positions = zip(*[
        sources[x] for x in [
            'id', 'xcentroid', 'ycentroid', 'sharpness', 'roundness1',
            'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag'
        ]
    ])
    header = 'id,xcentroid,ycentroid,sharpness,roundness1,roundness2,npix,sky,peak,flux,mag'
    np.savetxt(file_[:-5] + '_positions.csv',
               print_positions,
               fmt='%.5e',
               header=header)
    # Show image with detected sources circled in blue
    apertures = CircularAperture(positions, r=4.)
    norm = ImageNormalize(stretch=SqrtStretch())
    plt.imshow(data, cmap='Greys', origin='lower', norm=norm)
    apertures.plot(color='blue', lw=1.5, alpha=0.5)
    plt.draw()
    # Scatter plot sharpness vs magnitude
    plt.figure(2)
    sharp, round_, mags = (sources['sharpness'], sources['roundness1'],
                           sources['mag'])
    plt.scatter(mags, sharp)
    plt.title('Sharpness vs Magnitude')
    plt.xlabel('Mag')
    plt.ylabel('Sharp')
    # Scatter plot roundness vs magnitude
    plt.figure(3)
    plt.scatter(mags, round_)
    plt.title('Roundness vs Magnitude')
    plt.xlabel('Mag')
    plt.ylabel('Roundness1')
    plt.show()
Ejemplo n.º 4
0
    def centroid(self, x, y, data, radius):
        dist = 1024
        x0, y0, arr = self.cut_region(x, y, radius, data)
        mean, median, std = sigma_clipped_stats(arr, sigma=sigma)
        daofind = DAOStarFinder(threshold=5.*std, fwhm=fwhm_daofind)
        sources = daofind.find_stars(arr - median)
        cx, cy = x, y
        for i in sources:
            dist_temp = math.sqrt(abs(i[1]+x0-x)**2 + abs(i[2]+y0-y)**2)
            if dist_temp < dist:
                dist = dist_temp
                cx = i[1] + x0
                cy = i[2] + y0

        print('center: ', cx + 1, cy + 1)
        return (cx, cy)
Ejemplo n.º 5
0
def begin(index):
    print(index)
    #mgi = int(index.split('-')[1])
    i = int(index.split('-')[0])
    #mgi = int(index.split('-')[1])
    color = index.split('-')[1]
    #print("%f, %f" % (i, mgi))
    #i = int(index)

    line = linecache.getline('Full Data.txt', i)
    #filename = 'Final Data Extract/' + str(i) + '.fit'
    #filename = 'MG II Dataset/' + str(i) + '.fit'


    #f1 = '/data/marvels/billzhu/2175 Dataset/' + str(i) + '-' + str(mgi) + '-g.fit'
    f1 = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '.fit'
    #f1 = '/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + str(mgi) + '-' + color + '.fit'


    hdulist = 0
    try:
        hdulist = fits.open(f1)
    except:
        print("FILE DOES NOT EXIST")
        return
    
    prihdr = hdulist[0].header

    #raDegColPix = 0
    #raDegRowPix = 0
    #decDegColPix = 0
    #decDegRowPix = 0


    # If statements for finding out ra and dec pixel specifications

    """
    if 'RA' in prihdr.comments['CD1_1']:
        raDegColPix = prihdr['CD1_1']
        raDegRowPix = prihdr['CD1_2']
        decDegColPix = prihdr['CD2_1']
        decDegRowPix = prihdr['CD2_2']
    else:
        decDegColPix = prihdr['CD1_1']
        decDegRowPix = prihdr['CD1_2']
        raDegColPix = prihdr['CD2_1']
        raDegRowPix = prihdr['CD2_2']

    ra1 = 0
    dec1 = 0
    if 'RA' in prihdr.comments['CRVAL1']:
        ra1 = prihdr['CRVAL1']
        dec1 = prihdr['CRVAL2']
    else:
        ra1 = prihdr['CRVAL2']
        dec1 = prihdr['CRVAL1']

    x1 = 0
    y1 = 0
    if 'X' in prihdr.comments['CRPIX1']:
        x1 = prihdr['CRPIX1']
        y1 = prihdr['CRPIX2']
    else:
        x1 = prihdr['CRPIX2']
        y1 = prihdr['CRPIX1']

    refpix = point(ra1, dec1, x1, y1)
    #print("%f, %f, %f, %f" % (x1, y1, ra1, dec1))
    """

    # Calculates the coordinates given RA and DEC and the data of a reference pixel

    data = line.split()
    qRA = float(data[1])
    qDEC = float(data[2])

    #bigtable = Table.read('final_catalog_full.fit')
    #qRA = bigtable['RA'][0][i]
    #qDEC = bigtable['DEC'][0][i]

    #print("%f, %f, %f" % (i, qRA, qDEC))
    #print("%f, %f, %f, %f" % (raDegColPix, raDegRowPix, decDegColPix, decDegRowPix))


    
    # Package function yields much higher precision than hard-coded function

    print("%f, %f" % (qRA, qDEC))
    wcstransform = wcs.WCS(prihdr)
    x_test, y_test = wcstransform.wcs_world2pix(qRA, qDEC, 0)
    print("%f, %f" % (x_test, y_test))

    


    

    """
    p = qRA - refpix.ra + raDegColPix * refpix.x + raDegRowPix * refpix.y
    q = qDEC - refpix.dec + decDegColPix * refpix.x + decDegRowPix * refpix.y
    qX = int((p * decDegRowPix - q * raDegRowPix) / (raDegColPix * decDegRowPix - raDegRowPix * decDegColPix))
    qY = int((q * raDegColPix - p * decDegColPix) / (raDegColPix * decDegRowPix - raDegRowPix * decDegColPix))


    # Checks for RA and DEC boundary issues
    
    if source.inbounds(qX, qY) == False:
        #print(str(i))
        if qRA < refpix.ra and refpix.ra - qRA > 358:
            qRA += 360
        else:
            if qRA > refpix.ra and qRA - refpix.ra > 358:
                refpix.ra += 360
        if qDEC < refpix.dec and refpix.dec - qDEC > 178:
            qDEC += 90                      
        else:
             if qDEC > refpix.dec and qDEC - refpix.dec > 178:
                refpix.dec += 90
        p = qRA - refpix.ra + raDegColPix * refpix.x + raDegRowPix * refpix.y
        q = qDEC - refpix.dec + decDegColPix * refpix.x + decDegRowPix * refpix.y
        qX = int((p * decDegRowPix - q * raDegRowPix) / (raDegColPix * decDegRowPix - raDegRowPix * decDegColPix))
        qY = int((q * raDegColPix - p * decDegColPix) / (raDegColPix * decDegRowPix - raDegRowPix * decDegColPix))
    
       
    print("%f,  %f" % (qX, qY))
    """

    """
    scidata = hdulist[0].data.astype(float)
    obj_table = Table.open('/data/marvels/billzhu/MG II Obj/0.37 - 0.55/' + str(i) + '.fit')[0]

    pointer = 0
    if color == 'g':
        pointer = 1
    if color == 'r':
        pointer = 2
    if color == 'i':
        pointer = 3
    if color == 'z':
        pointer = 4
    if color = 'u':
        pointer = 0

    quasar = 0
    for j in range(len(obj_table)):
        if abs(obj_table['rowc'][j] - y_test) < 5 and abs(obj_table['colc'][j] - x_test) < 5 and obj_table['nchild'][j] > 0:
            quasar = obj_table[j]
            break

    children = []
    for j in range(len(obj_table)):
        if obj_table['parent'][j] == quasar['id']:
            children.append(obj_table[j])
    """
    
    try:
        qX = int(x_test)
        qY = int(y_test)
    except:
        return

    half = 15
    yl = qY - half
    yu = qY + half
    xl = qX - half
    xu = qX + half

    if yl < 0:
        yl = 0
    if yu > len(scidata):
        yu = len(scidata)
    if xl < 0:
        xl = 0
    if xu > len(scidata[0]):
        xu = len(scidata[0])
    image = scidata[yl : yu, xl : xu]

    bkg_sigma = mad_std(scidata)
    daofind = DAOStarFinder(fwhm = 2., threshold = 5.*bkg_sigma)
    sources = daofind(image)
    sources['xcentroid'] += xl
    sources['ycentroid'] += yl


    print(sources)

    """
    deg_diff = 1000000
    quasar = 0
    ra11 = 0
    dec11 = 0
    for j in range(len(sources)):
        dra_maybe = abs(sources['xcentroid'][j] - refpix.x) * raDegColPix + abs(sources['ycentroid'][j] - refpix.y) * raDegRowPix
        ddec_maybe = abs(sources['xcentroid'][j] - refpix.x) * decDegColPix + abs(sources['ycentroid'][j] - refpix.y) * decDegRowPix
        dra_real = abs(qRA - refpix.ra)
        ddec_real = abs(qDEC - refpix.dec)

        #print(math.sqrt((dra_maybe - dra_real)**2 + (ddec_maybe - ddec_real)**2))
        
        if math.sqrt((dra_maybe - dra_real)**2 + (ddec_maybe - ddec_real)**2) < deg_diff:
            deg_diff = math.sqrt((dra_maybe - dra_real)**2 + (ddec_maybe - ddec_real)**2)
            quasar = sources[j]
    """

    diff = 1000000
    quasar = 0
    for j in range(len(sources)):
        dist1 = distance(sources['xcentroid'][j], sources['ycentroid'][j], qX, qY)
        if dist1 < diff:
            diff = dist1
            quasar = sources[j]

    if quasar == 0:
        #print(deg_diff)
        print("ERROR NO QUASAR")
        return

    print(quasar)
    
    
                
    # Write the right coordinates to fits file header for access later for PSF subtraction
    
    hdulist[0].header.append(('XCOORD', int(quasar['xcentroid']), 'x coordinate of quasar in image'), end = True)
    hdulist[0].header.append(('YCOORD', int(quasar['ycentroid']), 'y coordinate of quasar in image'), end = True)

    
    # Code for shifting the quasar to the actual centroid
    
    chunk_size = 50
    if source.inbounds(quasar['xcentroid'] + chunk_size, quasar['ycentroid'] + chunk_size) and source.inbounds(quasar['xcentroid'] - chunk_size, quasar['ycentroid'] - chunk_size):
        #print("SUCCESS")

        
        """
        #try:
        preshift = scidata[qYmax - int(chunk_size) : qYmax + int(chunk_size) + 1, qXmax - int(chunk_size) : qXmax + int(chunk_size) + 1]

        mean, median, std = sigma_clipped_stats(preshift, sigma=3.0, iters=5)

        qXc, qYc = centroid_1dg(preshift, mask = None)
        #print("%f  %f" % (qXc, qYc))
        qXc += qXmax - chunk_size
        qYc += qYmax - chunk_size
        #print("%f  %f" % (qXc, qYc))
        """        

        qXc = quasar['xcentroid']
        qYc = quasar['ycentroid']

        
        preshift = scidata[int(qYc) - chunk_size - 5: int(qYc) + chunk_size + 6, int(qXc) - chunk_size - 5 : int(qXc) + chunk_size + 6]

        
        """
        plt.imshow(preshift, origin='lower', interpolation='nearest', cmap='viridis')
        marker = '+'
        ms, mew = 30, 2.
        #plt.plot(qXc - qXma + chunk_size, qYc - qYmax + chunk_size, color='#1f77b4', marker=marker, ms=ms, mew=mew)
        plt.show()
        plt.pause(3)
        """
        

        xr = np.arange(int(qXc) - chunk_size - 5, int(qXc) + chunk_size + 6, 1)
        yr = np.arange(int(qYc) - chunk_size - 5, int(qYc) + chunk_size + 6, 1)


        # Shifts the data to center around the centroid using 2d interpolation

        try:
            if(len(xr) == len(preshift[0])):
                #print("SUCCESS 2.0")
                shifted = []

                spline = interpolate.interp2d(xr, yr, preshift)

                xrf = np.arange(qXc - chunk_size, qXc + chunk_size + 1, 1)
                yrf = np.arange(qYc - chunk_size, qYc + chunk_size + 1, 1)

                if len(xrf) > 101:
                    xrf = xrf[:-1].copy()
                if len(yrf) > 101:
                    yrf = yrf[:-1].copy()

                shifted = spline(xrf, yrf)

                """
                mean, median, stddev = sigma_clipped_stats(preshift, sigma=3.0, iters=5)
                preshift -= median
                check_sources = daofind.find_stars(preshift)

                if len(check_sources) > 1:
                    print(len(check_sources))
                    return
                """

                # If the source has a weird shape i.e. due to gravitational lensing, then check if the maximum pixel is within 3 pixels of the center to ensure consistency
                daofind = DAOStarFinder(fwhm = 2, threshold=3.0*bkg_sigma)
                sources = daofind.find_stars(shifted)
                #sources['xcentroid'] += xl
                #sources['ycentroid'] += yl
                cont = checkInner(shifted, sources)

                if cont == True:
                    return
                mean1, median1, std1 = sigma_clipped_stats(shifted, sigma=3.0, iters=5)
                #print("%f, %f" % (mean1, std1))
                shifted = checkOutter(shifted, mean1, std1)
                


                """
                max_coords = np.unravel_index(shifted.argmax(), shifted.shape)
                max_coords = list(max_coords)
                print(max_coords)
                for k in range(len(max_coords)//2):
                    yt = max_coords[2 * k]
                    xt = max_coords[2 * k + 1]
                    print("%f, %f" % (xt, yt))
                    if distance(xt, yt, 40, 40) > 3:
                        print('MAX TOO FAR')
                        return
                """

                print('NO FAIL YET')
                #fits.writeto('/data/marvels/billzhu/2175 Quasar Cut/' + str(i) + '-' + str(mgi) + '_DUST.fit', shifted, hdulist[0].header, clobber = True)
                #fits.writeto('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + str(i) + '-' + str(mgi) + '_REF.fit', shifted, hdulist[0].header, clobber = True)
                #fits.writeto('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit', shifted, hdulist[0].header, clobber = True)
                fits.writeto('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + str(mgi) + '-' + color + '_REF.fit', shifted, hdulist[0].header, clobber = True)

        except:
            print(False)
            return

        return
def begin(index):
    chunk_size = 50
    #i = int(index)
    i = int(index.split('-')[0])
    mgi = int(index.split('-')[1])
    color = index.split('-')[2]
    #mgi = int(index.split('-')[1])
    
    try:
        print(index)
        #filename = 'Test Data Extract/' + str(i) + '.fit'
        #filename = str(i) + '-g.fit'


        #filename = '/data/marvels/billzhu/2175 Dataset/' + str(index) + '-g.fit'
        filename = '/data/marvels/billzhu/Reference Dataset/0.37 - 0.55/' + color + '/' + str(index) + '.fit'
        #print(filename)
        #filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '.fit'
        hdulist = fits.open(filename)

        #qlist = fits.open('MG II Test Cut/' + str(i) + '_MG.fit')

        #qlist = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + str(index) + '_DUST.fit')
        qlist = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit')
        #qlist = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit')
        x = qlist[0].header['XCOORD']
        y = qlist[0].header['YCOORD']
        #print("%f, %f" % (x, y))
        qlist.close()

    except:
        print("No coordinates")
        return

    # Save some frickin time

        
    
    half = 700
    scidata = hdulist[0].data.astype(float)
    mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, iters=5)
    
    if x + chunk_size > 2048:
        filler = np.array([float(median)] * len(scidata))
        for j in range(chunk_size):
            scidata = np.insert(scidata, len(scidata[0]), filler, 1)

    if x - chunk_size < 0:
        x += chunk_size
        filler = np.array([float(median)] * len(scidata))
        for j in range(chunk_size):
            scidata = np.insert(scidata, 0, filler, 1)

    if y + chunk_size > 1489:
        filler = np.array([float(median)] * len(scidata[0]))
        for j in range(chunk_size):
            scidata = np.insert(scidata, len(scidata), filler, 0)

    if y - chunk_size < 0:
        y += chunk_size
        filler = np.array([float(median)] * len(scidata[0]))
        for j in range(chunk_size):
            scidata = np.insert(scidata, 0, filler, 0)


    #if 'SKY' in hdulist[0].header.keys():
    #    scidata -= float(hdulist[0].header['SOFTBIAS'])
    #    scidata -= float(hdulist[0].header['SKY'])
    #else:
    scidata -= median
        
    psfindex = -1
    quasar = 0
    bkg_sigma = mad_std(scidata)

    
    # DAOStarFinder algorithm that finds all sources greater than 3 sigma above the background value, with strict roundness parameters
    
    daofind = DAOStarFinder(fwhm = 2., threshold = 5.*bkg_sigma)
    #print("%f, %f" % (x, y))
    sources = daofind(scidata[y - 10 : y + 10, x - 10 : x + 10])

    
    # Update coordinates of the sources
    
    sources['xcentroid'] += x - 10
    sources['ycentroid'] += y - 10

    
    #print(sources)

    
    # Create new column that contains the FWHM of each source for comparison later
    
    FWHM = np.empty([len(sources)])
    column = Column(FWHM, name = 'FWHM')
    sources.add_column(column)


    
    # Find the quasar and calculate its FWHM
    
    for j in range(len(sources)):
        if abs(round(sources['xcentroid'][j]) - x) < 3.0 and abs(round(sources['ycentroid'][j]) - y) < 3.0:
            quasar = sources[j]
            width = int(np.sqrt(sources['npix'][j]))
            #print("%d   %d   %f   %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j]))
            data = scidata[int(sources['ycentroid'][j] - width - 1) : int(sources['ycentroid'][j] + width) + 2, int(sources['xcentroid'][j] - width - 1) : int(sources['xcentroid'][j] + width) + 2]


            """
            plt.imshow(data, origin='lower', interpolation='nearest', cmap='viridis')
            plt.show()
            plt.pause(3)
            """
            
            gauss = 0
            
            if(np.ma.count(data) >= 7):
                gauss = photutils.fit_2dgaussian(data, mask = None)
            
            fwhm = 0
            if gauss != 0:
                fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss.x_stddev)
                quasar['FWHM'] = fwhm
                qsigma = np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2)
                print(quasar['FWHM'])
                break

            
    ztot = 10000
    print(quasar)

    
    # If no quasar is found, the field image is deemed corrupt and not used
    
    if quasar == 0:
        return


    # Define cutout image limits, adjusted to field image boundaries as necessary i.e. x, y < 0 or > max x/y values
    
    yl = y - half
    yu = y + half
    xl = x - half
    xu = x + half

    if yl < 0:
        yl = 0
    if yu > len(scidata):
        yu = len(scidata)
    if xl < 0:
        xl = 0
    if xu > len(scidata[0]):
        xu = len(scidata[0])
    image = scidata[yl : yu, xl : xu]

    bkg_sigma = mad_std(scidata)
    daofind = DAOStarFinder(fwhm = quasar['FWHM'], threshold=7.*bkg_sigma, roundlo = -0.20, roundhi = 0.20)
    sources = daofind.find_stars(scidata)
    #print(len(sources))


    #qsocut = fits.open('/data/marvels/billzhu/2175 Quasar Cut/' + str(index) + '_DUST.fit')
    qsocut = fits.open('/data/marvels/billzhu/Reference Quasar Cut/0.37 - 0.55/' + color + '/' + str(index) + '_REF.fit')
    #qsocut = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_MG.fit')
    qsodata = qsocut[0].data.astype(float)


    

    # Shift the source coordinates to the actual image coordinates

    #sources['xcentroid'] += xl
    #sources['ycentroid'] += yl


    # If no sources found, skip iteration
    
    if len(sources) <= 0:
        return


    # Interpolates the PSF sources to their actual centroids, upsampling it 2x, and adds it to large array for PCA

    temp = 1
    largearr = []
    for j in range(len(sources)):
        if abs(sources['xcentroid'][j] - quasar['xcentroid']) < 2 and abs(sources['ycentroid'][j] - quasar['ycentroid']) < 2:
            continue
        
        chunk_size = 50

        pXc = sources['xcentroid'][j]
        pYc = sources['ycentroid'][j]
        #print("%f,  %f" % (pXc, pYc))

        xr = np.arange(int(pXc) - chunk_size - 5, int(pXc) + chunk_size + 6)
        yr = np.arange(int(pYc) - chunk_size - 5, int(pYc) + chunk_size + 6)


        preshift = scidata[int(pYc) - chunk_size - 5 : int(pYc) + chunk_size + 6, int(pXc) - chunk_size - 5: int(pXc) + chunk_size + 6]

        shifted = []
        try:
            spline = interpolate.interp2d(xr, yr, preshift)
            xrf = np.arange(pXc - chunk_size, pXc + chunk_size + 1, 1)
            yrf = np.arange(pYc - chunk_size, pYc + chunk_size + 1, 1)
        except:
            #print("ERROR")
            continue
        
        if len(xrf) > 101:
            xrf = xrf[:-1].copy()
        if len(yrf) > 101:
            yrf = yrf[:-1].copy()

        shifted = spline(xrf, yrf)
        cont = False


        
        
        # Safety that screens out images with multiple sources by checking incremental means
        # CHECK DISCONTINUED DUE TO INCOMPLETENESS / SOME ERRORS

        """
        meanarr = []
        for k in range(0, 5):
            tempcut = list(shifted[20 - 4 * k : 21 + 4 * k, 20 - 4 * k : 21 + 4 * k])
            mean1 = np.mean(tempcut)
            #print(mean1)
            if len(meanarr) > 0 and mean1 > meanarr[len(meanarr) - 1]:
                cont = True
                #print(temp)
                #fits.writeto(str(temp) + '.fit', shifted, clobber = True)
                #temp += 1
                break
            
            meanarr.append(mean1)
        """

        

        # Originally discontinued, but upon closer inspection, the same source finder parameters is used as in the original source search, thus sources found
        # will be the same, i.e. ideal source finder
        
        #check_source = daofind.find_stars(preshift)
        #if len(check_source) > 1:
        #    continue

        
        # If the source has a weird shape i.e. due to gravitational lensing, then check if the maximum pixel is within 2 pixels of the center to ensure consistency

        mean1, median1, std1 = sigma_clipped_stats(shifted, sigma=3.0, iters=5)
        daofind = DAOStarFinder(fwhm = 2, threshold=3.0*bkg_sigma)
        sources1 = daofind.find_stars(shifted)
        cont = checkInner(shifted, sources1)

        if cont == True:
            continue
        
        shifted = checkOutter(shifted, mean1, std1)

        """
        max_coords = np.unravel_index(shifted.argmax(), shifted.shape)
        max_coords = list(max_coords)
        #print(max_coords)
        for k in range(len(max_coords)//2):
            yt = max_coords[2 * k]
            xt = max_coords[2 * k + 1]
            #print("%f, %f" % (xt, yt))
            if distance(xt, yt, 20, 20) > 4:
                cont = True
                break
        """

        


        #fits.writeto(str(temp) + '.fit', shifted, clobber = True)
        #print(temp)
        #print(meanarr)
        #shifted /= np.max(shifted)
        #shifted *= np.max(qsodata)
        largearr.append(np.reshape(shifted, 10201))

    largearr = np.array(largearr)
    print(np.shape(largearr))


    # Set number of components in PCA, use incremental PCA (IPCA) due to high efficiency and speed
    
    numcomp = 20


    # Need a healthy number of sources to make the PSF fitting in order to decrease noise, setting at 5% threshold
    
    if len(largearr) < 8:
        return

    print(numcomp)
    mean_vector = []

    #print(np.shape(largearr))

    try:
        for j in range(0, 10201):
            mean_vector.append(np.mean(largearr[:, j]))
    except:
        print("NO SOURCE FOUND")
        return

    largearr -= mean_vector
        
    ipca = IncrementalPCA(n_components=numcomp)
    ipca.fit(largearr)
    ipca_comp = ipca.components_
    #print(np.shape(ipca_comp))
    ipca_comp = ipca_comp.T
    #print(ipca_comp)

    #print(np.shape(largearr[0, :]))
    #print(np.shape(ipca_comp))
    total_res = 0
    max_median = 10000000



    """
    # Calculate optimal number of coefficients to be taken

    for p, take in enumerate([12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120]):
        if take > len(largearr)//4 * 3 or take < len(largearr) //2:
            continue
        
        totalres = 0
        for j in range(len(largearr)): 
            coeff = np.dot(largearr[j, :], ipca_comp[:, 0:take])
            fit = np.dot(ipca_comp[:, 0:take], coeff[0:take])
            resfit = largearr[j, :] - fit
            total_res += resfit

        m1, m2, s1 = sigma_clipped_stats(total_res, sigma=3.0, iters=5)

        if m2 < max_median:
            max_median = m1
            take_final = take

            # Lowest mean means lowest residual
            
            
            plt.imshow(np.reshape(fit, (42, 42)), origin='lower', interpolation='nearest', cmap='viridis')
            plt.show()
            plt.pause(2)
            plt.close()
    """


    

    #if 'SKY' in hdulist[0].header.keys():
    #    qsodata -= float(hdulist[0].header['SOFTBIAS'])
    #    qsodata -= float(hdulist[0].header['SKY'])
    #else:
    qsodata -= median


    take_final = 4
    

    # Final fitting of the first n components, as determined by take_final, into the quasar to build a PSF fit
    
    qsodata = np.reshape(qsodata, 10201)
    coeff = np.dot(qsodata, ipca_comp[:, 0:take_final])
    final_fit = np.dot(ipca_comp[:, 0:take_final], coeff[0:take_final])
    final_fit += mean_vector
    final_fit = np.reshape(final_fit, (101, 101))
    #final_fit /= len(largearr)
    qsodata = np.reshape(qsodata, (101, 101))

    """
    qx = np.arange(0, len(qsodata))
    qy = np.arange(0, len(qsodata))
    spline = interpolate.interp2d(qx, qy, qsodata)
    qxf = np.arange(0, len(qsodata), 0.1)
    qyf = np.arange(0, len(qsodata), 0.1)
    qsodata = spline(qxf, qyf)
    spline = interpolate.interp2d(qx, qy, final_fit)
    final_fit = spline(qxf, qyf)
    """

    gauss_fit = photutils.fit_2dgaussian(final_fit[40 : 61, 40 : 61], mask = None)
    fit_fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss_fit.x_stddev)
    #print(fit_fwhm)

    
    #print("%f, %f" % (quasar['FWHM'], fit_fwhm))
    ffwhm = max(quasar['FWHM'], fit_fwhm)
    ffphoton_1sig = photonCount(50, 50, 2 * ffwhm, final_fit) 
    #qsophoton_1sig = photonCount(50, 50, 6, qsodata)

    """
    for j in range(len(final_fit)):
        for k in range(len(final_fit)):
            if distance(50, 50, j, k) < 3:
                final_fit[j][k] /= ffphoton_1sig
                final_fit[j][k] *= qsophoton_1sig
    """
    

    
    #final_fit /= ffphoton_1sig
    #final_fit *= qsophoton_1sig
    
    line_data = linecache.getline('Full Data.txt', i).split()

    if color == 'g':
        mag = float(line_data[6])
    if color == 'r':
        mag = float(line_data[8])
    if color == 'i':
        mag = float(line_data[10])
    if color == 'z':
        mag = float(line_data[12])
    if color == 'u':
        mag = float(line_data[4])
        

    
    try:
        multiplier = 10**(mag / (-2.5)) * 10**8 * hdulist[0].header['FLUX20']
        final_fit /= ffphoton_1sig
        final_fit *= multiplier
    except:
        #final_fit *= qsodata[50, 50]
        return
    

        
    """
    header = hdulist[0].header
    mag20 = header['flux20'] - median
    
    plt.imshow(qsodata, origin='lower', interpolation='nearest', cmap='viridis')
    plt.show()

    plt.imshow(totalfit, origin='lower', interpolation='nearest', cmap='viridis')
    plt.show()
    #plt.pause(3)
    """


    print("%f, %f" % (qsodata[50][50], final_fit[50][50]))
    residue = qsodata - final_fit

    """
    residue /= mag20

    f1 = fits.open('/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/4-g.fit')
    h1 = f1[0].header
    mean1, median1, stddev1 = sigma_clipped_stats(f1[0].data.astype(float), sigma=3.0, iters=5)
    mag20_1 = h1['flux20'] - median1
    residue *= mag20_1

    qdata = linecache.readline('Full Data.txt', i)
    c = coord.SkyCoord(ra = float(qdata[1]), dec = float(qdata[2]))

    sfd = SFDQuery()
    residue *= 10**(0.4 * sfd(c))
    """

    #plt.imshow(residue, origin='lower', interpolation='nearest', cmap='viridis')
    #plt.show()



    # Only used for reference quasars

    """
    for j in range(42):
        for k in range(42):
            if shifted[k, j] > threshold:
                #print("Over")
                check = checkNoise(j, k, 21, 21, residue)
                if check == True:
                    #print("Over True")
                    counter += 1

                if counter > 10:
                    return
    """

    try:
        #fits.writeto('/data/marvels/billzhu/2175 PSF Cut/' + str(index) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True)
        #fits.writeto('/data/marvels/billzhu/2175 PSF Subtract/' + str(index) + '_SUB.fit', residue, hdulist[0].header, clobber = True)

        fits.writeto('/data/marvels/billzhu/Reference PSF Cut/0.37 - 0.55/' + color + '/' + str(index) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True)
        fits.writeto('/data/marvels/billzhu/Reference PSF Subtract/0.37 - 0.55/' + color + '/' + str(index) + '_SUB.fit', residue, hdulist[0].header, clobber = True)
        #fits.writeto('/data/marvels/billzhu/MG II PSF Cut/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_PSF.fit', final_fit, hdulist[0].header, clobber = True)
        #fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' + color + '/' + str(i) + '-' + color + '_SUB.fit', residue, hdulist[0].header, clobber = True)

        #fits.writeto('Reference Subtract/' + str(i) + '_SUB.fit', residue, hdulist[0].header, clobber = True)
        #fits.writeto('Reference PSF Cut/' + str(i) + '_PSF.fit', final_fit, hdulist[0].header, clobber = True)
        print('\n')

        print("DONE TO BOTTOM")
    except:
        print('HEADER IS CORRUPT')
Ejemplo n.º 7
0
def begin(i):
    chunk_size = 10

    #for j in range(3):
    #    num, x, y = reader.readline().split()

    num, x, y = reader.readline().split()
    x = int(x)
    y = int(y)
    print("%f   %f" % (x, y))
    print(str(i))
    
    filename = 'Test Data Extract/' + str(i) + '.fit'
    hdulist = fits.open(filename)
    half = 300
    scidata = hdulist[0].data.astype(float)
    mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, iters=5)
    
    if x + half + chunk_size > 2048:
        filler = np.array([float(median)] * len(scidata))
        for j in range(10):
            scidata = np.insert(scidata, len(scidata[0]), filler, 1)

    if x - half - chunk_size < 0:
        x += chunk_size
        filler = np.array([float(median)] * len(scidata))
        for j in range(10):
            scidata = np.insert(scidata, 0, filler, 1)

    if y + half + chunk_size > 1489:
        filler = np.array([float(median)] * len(scidata[0]))
        for j in range(10):
            scidata = np.insert(scidata, len(scidata), filler, 0)

    if y - half - chunk_size < 0:
        y += chunk_size
        filler = np.array([float(median)] * len(scidata[0]))
        for j in range(10):
            scidata = np.insert(scidata, 0, filler, 0)

    scidata -= median    
    psfindex = -1
    quasar = 0
    bkg_sigma = mad_std(scidata)
    daofind = DAOStarFinder(fwhm = 2., threshold = 3.*bkg_sigma)
    sources = daofind(scidata[y - 10 : y + 10, x - 10 : x + 10])

    sources['xcentroid'] += x - 10
    sources['ycentroid'] += y - 10
    
    FWHM = np.empty([len(sources)])
    column = Column(FWHM, name = 'FWHM')
    sources.add_column(column)

    # Find the quasar and calculate its FWHM
    
    for j in range(len(sources)):
        if abs(round(sources['xcentroid'][j]) - x) < 2 and abs(round(sources['ycentroid'][j]) - y) < 2:
            quasar = sources[j]
            width = int(np.sqrt(sources['npix'][j]))
            #print("%d   %d   %f   %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j]))
            data = scidata[int(sources['ycentroid'][j] - width/2) : int(sources['ycentroid'][j] + width/2), int(sources['xcentroid'][j] - width/2) : int(sources['xcentroid'][j] + width/2)]
            gauss = 0
            
            if(np.ma.count(data) >= 7):
                gauss = photutils.fit_2dgaussian(data, mask = None)
            
            fwhm = 0
            if gauss != 0:
                fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2)
                quasar['FWHM'] = fwhm
                #print(quasar['FWHM'])
                break

    ztot = 10000
    print(quasar)

    if quasar == 0:
        return


    yl = y - half
    yu = y + half
    xl = x - half
    xu = x + half

    if yl < 0:
        yl = 0
    if yu > len(scidata):
        yu = len(scidata)
    if xl < 0:
        xl = 0
    if xu > len(scidata[0]):
        xu = len(scidata[0])
    image = scidata[yl : yu, xl : xu]

    bkg_sigma = mad_std(scidata)
    daofind = DAOStarFinder(fwhm = quasar['FWHM'], threshold=3.*bkg_sigma, roundlo = -0.15, roundhi = 0.15)
    sources = daofind.find_stars(image)


    # Shift the source coordinates to the actual image coordinates

    sources['xcentroid'] += xl
    sources['ycentroid'] += yl

    #print(sources)

    # If no sources found, go to next iteration with larger dimensions
    if len(sources) <= 0:
        return

    # Calculate the FWHM of each identified source, and append them into a column that is added to the source table
    # Splices the data array for the quasar, with the alleged centroid at the center
    # Fits a 2D Gaussian curve onto the array, and uses the relation between sigma and fwhm

    FWHM = []
    for j in range(len(sources)):
        width = int(np.sqrt(sources['npix'][j]))
        #print("%d   %d   %f   %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j]))
        data = scidata[int(sources['ycentroid'][j] - width/2) - 1 : int(sources['ycentroid'][j] + width/2) + 1, int(sources['xcentroid'][j] - width/2) - 1 : int(sources['xcentroid'][j] + width/2) + 1]
        gauss = 0

        if(np.ma.count(data) >= 7):
            gauss = photutils.fit_2dgaussian(data, mask = None)

        fwhm = 0
        if gauss != 0:
            fwhm = 2*np.sqrt(2*np.log(2))*np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2)
        FWHM.append(fwhm)

    column = Column(FWHM, name = 'FWHM')
    sources.add_column(column)


    def distance(x, y):
        return math.sqrt((x - quasar['xcentroid']) ** 2 + (y - quasar['ycentroid']) ** 2)

    def fwhmdiff(fwhm):
        return (fwhm - quasar['FWHM'])

    def lumdiff(flux):
        return (flux - quasar['flux'])

    def xdiff(x):
        return quasar['xcentroid'] - x

    def ydiff(y):
        return quasar['ycentroid'] - y


    davg = 0
    favg = 0
    lavg = 0
    distset = []
    fwhmset = []
    lumset = []

    # Standardize the sources and calculate the best source by combining distance, fwhm difference, and peak flux difference

    for j in range(len(sources)):
        d = distance(sources['xcentroid'][j], sources['ycentroid'][j])
        distset.append(d)
        davg += d
        f = fwhmdiff(sources['FWHM'][j])
        fwhmset.append(f)
        favg += f
        l = lumdiff(sources['flux'][j])
        lumset.append(l)
        lavg += l

    davg /= len(sources)
    favg /= len(sources)
    lavg /= len(sources)
    dstd = np.std(distset)
    fstd = np.std(fwhmset)
    lstd = np.std(lumset)

    # Weight of the three variables places FWHM difference as most important, flux difference as next important, and distance as least important

    psflist = []
    indexlist = []
    zlist = []

    for j in range(len(sources)):
        z = 1/2 * abs(distance(sources['xcentroid'][j], sources['ycentroid'][j])/(dstd)) + 4/3 * abs(fwhmdiff(sources['FWHM'][j])/(fstd)) + 2/3 * abs(lumdiff(sources['flux'][j])/(lstd))
        #print(str(z) + " " + str(abs(sources['peak'][j] - quasar['peak'])))
        if z > 0 and sources['peak'][j] > 0.7 * quasar['peak'] and inbounds(sources['xcentroid'][j], sources['ycentroid'][j]) and math.sqrt((sources['xcentroid'][j] - quasar['xcentroid'])**2 + (sources['ycentroid'][j] - quasar['ycentroid'])**2) > 1:
            #print(str(z))
            ztot = z
            psfindex = j

            if len(psflist) < 5 and z < 4:
                psflist.append(tuplet(j, z))
                indexlist.append(j)
                zlist.append(z)
            else:
                if len(psflist) > 5 and z < max(zlist) and z < 4:
                    faker = psflist.remove(psf.getZ(max(zlist)))
                    indexlist.remove(faker.getIndex())
                    zlist.remove(faker.getZ())
                    psflist.append(tuple(j, z))
                    indexlist.append(j)
                    zlist.append(z)

                

    if len(psflist) == 0:
        return


    stdev = 10000000
    residue = 0
    cutout = 0

    print(indexlist)
    for j in indexlist:       
        psf = sources[j]
        chunk_size = 10

        # Find the actual centroid of the PSF using 2D Gaussian Fitting, since DAOStarFinder is inaccurate

        print(psf)
        """
        preshift = scidata[int(psf['ycentroid'] - chunk_size) : int(psf['ycentroid'] + chunk_size + 1), int(psf['xcentroid'] - chunk_size) : int(psf['xcentroid'] + chunk_size + 1)]
        mean, med2, std = sigma_clipped_stats(preshift, sigma=3.0, iters=5)
        mask = [[False for x in range(int(chunk_size*2) + 1)] for y in range(int(chunk_size*2) + 1)] 
        for j in range(0, int(chunk_size*2 + 1)):
            for k in range(0, int(chunk_size*2 + 1)):
                if scidata[int(psf['ycentroid'] + k - chunk_size), int(psf['xcentroid'] + j - chunk_size)] < med2:
                    mask[j][k] = True

        #pXc, pYc = centroid_2dg(preshift, mask = mask, error = None)
        #pXc += int(psf['xcentroid']) - chunk_size
        #pYc += int(psf['ycentroid']) - chunk_size
        """
        
        pXc = psf['xcentroid']
        pYc = psf['ycentroid']
        print("%f,  %f" % (pXc, pYc))
        xr = np.arange(int(pXc) - chunk_size, int(pXc) + chunk_size + 1)
        yr = np.arange(int(pYc) - chunk_size, int(pYc) + chunk_size + 1)

        
        preshift = scidata[int(pYc) - chunk_size : int(pYc) + chunk_size + 1, int(pXc) - chunk_size : int(pXc) + chunk_size + 1]

        shifted = []
        spline = interpolate.interp2d(xr, yr, preshift)
        xrf = np.arange(pXc - chunk_size, pXc + chunk_size + 1, 0.5)
        yrf = np.arange(pYc - chunk_size, pYc + chunk_size + 1, 0.5)

        if len(xrf) > 42:
            xrf = xrf[:-1].copy()
        if len(yrf) > 42:
            yrf = yrf[:-1].copy()

        shifted = spline(xrf, yrf)


        qsocut = fits.open('c:/Research Project/Final Quasar Cut/' + str(i) + '_QSO.fit')
        qsodata = qsocut[0].data.astype(float)
        qsodata -= median
        qsodata /= qsodata[21, 21]  #quasar['peak']
        shifted /= shifted[21, 21]  #psf['peak']

        res = qsodata - shifted

        mean, med, std = sigma_clipped_stats(res, sigma=3.0, iters=5)

        if std < stdev:
            residue = res
            stdev = std
            cutout = shifted

        
        print(std)
    
    fits.writeto('Test PSF Cut/' + str(i) + '_PSF.fit', cutout, hdulist[0].header, clobber = True)
    fits.writeto('Test PSF Subtract/' + str(i) + '_1.fit', residue, hdulist[0].header, clobber = True)
    #print(stdev)
    print('\n')

    

    PSF.append(psf)
Ejemplo n.º 8
0
def cube_detect_badfr_ellipticity(array,
                                  fwhm,
                                  crop_size=30,
                                  roundlo=-0.2,
                                  roundhi=0.2,
                                  plot=True,
                                  verbose=True):
    """ Returns the list of bad frames  from a cube by measuring the PSF 
    ellipticity of the central source. Should be applied on a recentered cube.
    
    Parameters
    ----------
    array : numpy ndarray 
        Input 3d array, cube.
    fwhm : float
        FWHM size in pixels.
    crop_size : int, optional
        Size in pixels of the square subframe to be analyzed.
    roundlo, roundhi : float, optional
        Lower and higher bounds for the ellipticity. See ``Notes`` below for
        details.
    plot : bool, optional
        If true it plots the central PSF roundness for each frame.
    verbose : bool, optional
        Whether to print to stdout or not.
        
    Returns
    -------
    good_index_list : numpy ndarray
        1d array of good indices.
    bad_index_list : numpy ndarray
        1d array of bad frames indices.
    
    Notes
    -----
    From photutils.DAOStarFinder documentation:
    DAOFIND calculates the object roundness using two methods. The 'roundlo'
    and 'roundhi' bounds are applied to both measures of roundness. The first
    method ('roundness1'; called 'SROUND' in DAOFIND) is based on the source 
    symmetry and is the ratio of a measure of the object's bilateral (2-fold) 
    to four-fold symmetry. The second roundness statistic ('roundness2'; called 
    'GROUND' in DAOFIND) measures the ratio of the difference in the height of
    the best fitting Gaussian function in x minus the best fitting Gaussian 
    function in y, divided by the average of the best fitting Gaussian 
    functions in x and y. A circular source will have a zero roundness. A source
    extended in x or y will have a negative or positive roundness, respectively.
    
    """
    from .cosmetics import cube_crop_frames

    check_array(array, 3, msg='array')

    if verbose:
        start_time = time_ini()

    array = cube_crop_frames(array, crop_size, verbose=False)
    n = array.shape[0]
    goodfr = []
    badfr = []
    roundness1 = []
    roundness2 = []
    for i in range(n):
        ff_clipped = sigma_clip(array[i], sigma=3, maxiters=None)
        thr = ff_clipped.max()
        DAOFIND = DAOStarFinder(threshold=thr, fwhm=fwhm)
        tbl = DAOFIND.find_stars(array[i])
        table_mask = (tbl['peak'] == tbl['peak'].max())
        tbl = tbl[table_mask]
        roun1 = tbl['roundness1'][0]
        roun2 = tbl['roundness2'][0]
        roundness1.append(roun1)
        roundness2.append(roun2)
        # we check the roundness
        if roundhi > roun1 > roundlo and roundhi > roun2 > roundlo:
            goodfr.append(i)
        else:
            badfr.append(i)

    bad_index_list = np.array(badfr)
    good_index_list = np.array(goodfr)

    if plot:
        _, ax = plt.subplots(figsize=vip_figsize)
        x = range(len(roundness1))
        if n > 5000:
            marker = ','
        else:
            marker = 'o'
        ax.plot(x,
                roundness1,
                '-',
                alpha=0.6,
                color='#1f77b4',
                label='roundness1')
        ax.plot(x, roundness1, marker=marker, alpha=0.4, color='#1f77b4')
        ax.plot(x,
                roundness2,
                '-',
                alpha=0.6,
                color='#9467bd',
                label='roundness2')
        ax.plot(x, roundness2, marker=marker, alpha=0.4, color='#9467bd')
        ax.hlines(roundlo,
                  xmin=-1,
                  xmax=n + 1,
                  lw=2,
                  colors='#ff7f0e',
                  linestyles='dashed',
                  label='roundlo',
                  alpha=0.6)
        ax.hlines(roundhi,
                  xmin=-1,
                  xmax=n + 1,
                  lw=2,
                  colors='#ff7f0e',
                  linestyles='dashdot',
                  label='roundhi',
                  alpha=0.6)
        plt.xlabel('Frame number')
        plt.ylabel('Roundness')
        plt.xlim(xmin=-1, xmax=n + 1)
        plt.legend(fancybox=True, framealpha=0.5, loc='best')
        plt.grid('on', alpha=0.2)

    if verbose:
        bad = len(bad_index_list)
        percent_bad_frames = (bad * 100) / n
        msg1 = "Done detecting bad frames from cube: {} out of {} ({:.3}%)"
        print(msg1.format(bad, n, percent_bad_frames))
        timing(start_time)

    return good_index_list, bad_index_list
Ejemplo n.º 9
0
def begin(index):
    chunk_size = 50
    i = int(index.split('-')[0])
    color = index.split('-')[1]

    #line = linecache.getline('Full Data.txt', i)
    #num, x, y = linecache.getline('Pixel Coordinates 50000.txt', i).split()
    #x = int(x)
    #y = int(y)
    #print("%f   %f" % (x, y))

    try:
        #filename = 'Test Data Extract/' + str(i) + '.fit'
        filename = '/data/marvels/billzhu/MG II Dataset/0.37 - 0.55/' + color + '/' + str(
            i) + '-' + color + '.fit'

        #filename = 'Reference Dataset/' + str(i) + '_REF.fit'
        hdulist = fits.open(filename)

        qlist = fits.open(
            '/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' + color +
            '/' + str(i) + '-' + color + '_MG.fit')

        #qlist = fits.open('Reference Quasar Cut/' + str(i) + '_REF.fit')
        x = qlist[0].header['XCOORD']
        y = qlist[0].header['YCOORD']
    except:
        print("No coordinates")
        return

    #half = 500
    scidata = hdulist[0].data.astype(float)
    mean, median, std = sigma_clipped_stats(scidata, sigma=3.0, iters=5)
    change_top = False

    if x + chunk_size > 2048:
        change_top = True
        filler = np.array([float(median)] * len(scidata))
        for j in range(10):
            scidata = np.insert(scidata, len(scidata[0]), filler, 1)

    if x - chunk_size < 0:
        x += chunk_size
        filler = np.array([float(median)] * len(scidata))
        for j in range(10):
            scidata = np.insert(scidata, 0, filler, 1)

    if y + chunk_size > 1489:
        change_top = True
        filler = np.array([float(median)] * len(scidata[0]))
        for j in range(10):
            scidata = np.insert(scidata, len(scidata), filler, 0)

    if y - chunk_size < 0:
        y += chunk_size
        filler = np.array([float(median)] * len(scidata[0]))
        for j in range(10):
            scidata = np.insert(scidata, 0, filler, 0)

    scidata -= median
    psfindex = -1
    quasar = 0
    bkg_sigma = mad_std(scidata)
    daofind = DAOStarFinder(fwhm=2., threshold=5. * bkg_sigma)
    sources = daofind(scidata[int(y - 10):int(y + 10),
                              int(x - 10):int(x + 10)])

    # Update coordinates of the sources

    sources['xcentroid'] += x - 10
    sources['ycentroid'] += y - 10

    # Create new column that contains the FWHM of each source for comparison later

    FWHM = np.empty([len(sources)])
    column = Column(FWHM, name='FWHM')
    sources.add_column(column)

    # Find the quasar and calculate its FWHM

    qsigma = 0

    for j in range(len(sources)):
        if abs(round(sources['xcentroid'][j]) -
               x) < 2 and abs(round(sources['ycentroid'][j]) - y) < 2:
            quasar = sources[j]
            width = int(np.sqrt(sources['npix'][j]))
            #print("%d   %d   %f   %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j]))
            data = scidata[
                int(sources['ycentroid'][j] -
                    width / 2):int(sources['ycentroid'][j] + width / 2) + 1,
                int(sources['xcentroid'][j] -
                    width / 2):int(sources['xcentroid'][j] + width / 2) + 1]
            """
            plt.imshow(data, origin='lower', interpolation='nearest', cmap='viridis')
            plt.show()
            plt.pause(3)
            """

            gauss = 0

            if (np.ma.count(data) >= 7):
                gauss = photutils.fit_2dgaussian(data, mask=None)

            fwhm = 0
            if gauss != 0:
                fwhm = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(gauss.x_stddev)
                quasar['FWHM'] = fwhm
                #qsigma = np.sqrt(gauss.x_stddev**2 + gauss.y_stddev**2)
                #print(quasar['FWHM'])
                break

    ztot = 10000
    print(quasar)

    # If no quasar is found, the field image is deemed corrupt and not used

    if quasar == 0:
        return

    # Define cutout image limits, adjusted to field image boundaries as necessary i.e. < 0 or > max x/y values
    """
    yl = y - half
    yu = y + half
    xl = x - half
    xu = x + half

    if yl < 0:
        yl = 0
    if yu > len(scidata):
        yu = len(scidata)
    if xl < 0:
        xl = 0
    if xu > len(scidata[0]):
        xu = len(scidata[0])
    image = scidata[yl : yu, xl : xu]
    """
    bkg_sigma = mad_std(scidata)
    daofind = DAOStarFinder(fwhm=0.8 * quasar['FWHM'],
                            threshold=5. * bkg_sigma,
                            roundlo=-0.15,
                            roundhi=0.15)
    sources = daofind.find_stars(scidata)

    # Shift the source coordinates to the actual image coordinates

    #sources['xcentroid'] += xl
    #sources['ycentroid'] += yl

    # If no sources found, skip iteration

    if len(sources) <= 0:
        return

    print(len(sources))

    # Calculate the FWHM of each identified source, and append them into a column that is added to the source table
    # Splices the data array for the quasar, with the alleged centroid at the center
    # Fits a 2D Gaussian curve onto the array, and uses the relation between sigma and fwhm

    FWHM = []
    stddev_list = []

    for j in range(len(sources)):
        width = int(np.sqrt(sources['npix'][j]))
        #print("%d   %d   %f   %f" % (j, width, sources['xcentroid'][j], sources['ycentroid'][j]))
        data = scidata[
            int(sources['ycentroid'][j] -
                width / 2):int(sources['ycentroid'][j] + width / 2) + 1,
            int(sources['xcentroid'][j] -
                width / 2):int(sources['xcentroid'][j] + width / 2) + 1]
        """
        plt.imshow(data, origin='lower', interpolation='nearest', cmap='viridis')
        plt.show()
        plt.pause(3)
        """

        gauss = 0

        if (np.ma.count(data) >= 7):
            gauss = photutils.fit_2dgaussian(data, mask=None)

        fwhm = 0
        if gauss != 0:
            fwhm = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(gauss.x_stddev)
        FWHM.append(fwhm)

        if gauss == 0:
            stddev_list.append(0)
        else:
            stddev_list.append(np.sqrt(gauss.x_stddev))

    column = Column(FWHM, name='FWHM')
    sources.add_column(column)
    column = Column(stddev_list, name='stddev')
    sources.add_column(column)

    #print(sources)

    # Helper methods for determining differences between PSF source and QSO

    def distance1(x, y):
        return math.sqrt((x - quasar['xcentroid'])**2 +
                         (y - quasar['ycentroid'])**2)

    def fwhmdiff(fwhm):
        return (fwhm - quasar['FWHM'])

    def lumdiff(flux):
        return (flux - quasar['flux'])

    def xdiff(x):
        return quasar['xcentroid'] - x

    def ydiff(y):
        return quasar['ycentroid'] - y

    distset = []
    fwhmset = []
    lumset = []

    # Standardize the sources and calculate the best source by combining distance, fwhm difference, and peak flux difference

    for j in range(len(sources)):
        d = distance1(sources['xcentroid'][j], sources['ycentroid'][j])
        distset.append(d)
        f = fwhmdiff(sources['FWHM'][j])
        fwhmset.append(f)
        l = lumdiff(sources['flux'][j])
        lumset.append(l)

    dstd = np.std(distset)
    fstd = np.std(fwhmset)
    lstd = np.std(lumset)

    # Weight of the three variables places FWHM difference as most important, flux difference as next important, and distance as least important

    psflist = []
    indexlist = []
    zlist = []

    for j in range(len(sources)):
        z = 1 / 3 * abs(
            distance1(sources['xcentroid'][j], sources['ycentroid'][j]) /
            (dstd)) + 2 * abs(fwhmdiff(sources['FWHM'][j]) /
                              (fstd)) + 2 / 3 * abs(
                                  lumdiff(sources['flux'][j]) / (lstd))
        """
        tempcut = scidata[int(sources['ycentroid'][j]) - chunk_size : int(sources['ycentroid'][j]) + chunk_size, int(sources['xcentroid'][j]) - chunk_size : int(sources['xcentroid'][j]) + chunk_size]


        s1 = daofind.find_stars(tempcut)

        if len(s1) > 1:
            print(len(s1))
            continue
        """

        #print(str(z))
        if z > 0 and math.sqrt(
            (sources['xcentroid'][j] - quasar['xcentroid'])**2 +
            (sources['ycentroid'][j] - quasar['ycentroid'])**2
        ) > 1 and sources['xcentroid'][j] - chunk_size >= 0 and sources[
                'ycentroid'][j] - chunk_size >= 0 and sources['xcentroid'][
                    j] + chunk_size < 2048 and sources['ycentroid'][
                        j] + chunk_size < 1489:
            #print(str(z))
            ztot = z
            psfindex = j

            # If the list contains less than 5 suitable sources that satisfy all the conditions, then directly add to list
            # If the list already contains 5 sources, then check if the current source has a lower Z value than the source with the largest Z value

            if len(psflist) < 9 and z < 3:
                #print(True)
                psflist.append(tuplet(j, z))
                indexlist.append(j)
                zlist.append(z)
            else:
                if len(psflist) > 9 and z < max(zlist) and z < 3:
                    faker = psflist.remove(psf.getZ(max(zlist)))
                    indexlist.remove(faker.getIndex())
                    zlist.remove(faker.getZ())
                    psflist.append(tuplet(j, z))
                    indexlist.append(j)
                    zlist.append(z)

    # If no suitable PSF sources are found that satisfy the boundaries, then the file is not used

    if len(psflist) == 0:
        print("FAILURE")
        return

    PSFlist = []
    FWHMlist = []
    psf = 0

    fwhmdiff = 10000000
    for j in indexlist:
        if abs(sources['FWHM'][j] - quasar['FWHM']) < fwhmdiff:
            psf = sources[j]
            fwhmdiff = abs(sources['FWHM'][j] - quasar['FWHM'])
    """
        PSFlist.append(sources[j])
        FWHMlist.append(sources['FWHM'][j])


    medFWHM = 0
    if len(FWHMlist) % 2 == 0:
        medFWHM = FWHMlist[len(FWHMlist) // 2 - 1]
    else:
        medFWHM = np.median(FWHMlist)


    for k in PSFlist:
        if k['FWHM'] == medFWHM:
            print(True)
            psf = k
            break

    """

    # Interpolates the PSF source to its actual centroid, upsampling it 2x

    #try:
    chunk_size = 50

    print(psf)

    pXc = psf['xcentroid']
    pYc = psf['ycentroid']
    print("%f,  %f" % (pXc, pYc))
    xr = np.arange(int(pXc) - chunk_size - 5, int(pXc) + chunk_size + 6)
    yr = np.arange(int(pYc) - chunk_size - 5, int(pYc) + chunk_size + 6)

    preshift = scidata[int(pYc) - chunk_size - 5:int(pYc) + chunk_size + 6,
                       int(pXc) - chunk_size - 5:int(pXc) + chunk_size + 6]
    print(np.shape(preshift))

    shifted = []
    spline = interpolate.interp2d(xr, yr, preshift)
    xrf = np.arange(pXc - chunk_size, pXc + chunk_size + 1, 1)
    yrf = np.arange(pYc - chunk_size, pYc + chunk_size + 1, 1)

    if len(xrf) > 101:
        xrf = xrf[:-1].copy()
    if len(yrf) > 101:
        yrf = yrf[:-1].copy()

    shifted = spline(xrf, yrf)

    mean1, median1, std1 = sigma_clipped_stats(shifted, sigma=3.0, iters=5)
    shifted = checkOutter(shifted, mean1, std1)

    #qsocut = fits.open('c:/Research Project/Final Quasar Cut/' + str(i) + '_QSO.fit')
    qsocut = fits.open('/data/marvels/billzhu/MG II Quasar Cut/0.37 - 0.55/' +
                       color + '/' + str(i) + '-' + color + '_MG.fit')
    #qsocut = fits.open('c:/Research Project/Reference Quasar Cut/' + str(i) + '_REF.fit')
    qsodata = qsocut[0].data.astype(float)
    qsodata -= median

    #qsocount = photoncount(21, 21, qsigma, qsodata)
    #qsodata /= qsocount  #quasar['peak']

    #psfcount = photoncount(21, 21, psf['stddev'], shifted)
    #shifted /= np.max(shifted)  #psf['peak']
    #shifted *= np.max(qsodata)

    gauss_fit = photutils.fit_2dgaussian(shifted[40:61, 40:61], mask=None)
    fit_fwhm = 2 * np.sqrt(2 * np.log(2)) * np.sqrt(abs(gauss_fit.x_stddev))
    #print(fit_fwhm)

    print("%f, %f" % (quasar['FWHM'], fit_fwhm))
    ffwhm = max(quasar['FWHM'], fit_fwhm)
    ffphoton_5sig = photonCount(50, 50, ffwhm, shifted)
    """
    qsophoton_4sig = photonCount(50, 50, ffwhm, qsodata)
    for j in range(len(shifted)):
        for k in range(len(shifted)):
            if distance(50, 50, j, k) < 4 * ffwhm:
                shifted[j][k] /= ffphoton_4sig
                shifted[j][k] *= qsophoton_4sig

    """

    line_data = linecache.getline('Full Data.txt', i).split()
    gmag = float(line_data[6])

    try:
        multiplier = 10**(gmag / (-2.5)) * 10**8 * hdulist[0].header['FLUX20']
        shifted /= ffphoton_5sig
        shifted *= multiplier
    except:
        #final_fit *= qsodata[50, 50]
        return

    residue = qsodata - shifted
    """
    mean, med, std = sigma_clipped_stats(residue, sigma=3.0, iters=5)

    check = False
    print("%f, %f" % (med, std))
    threshold = mean + 3 * std
    for j in range(42):
        for k in range(42):
            if shifted[k, j] > threshold:
                #print("Over")
                check = checkNoise(j, k, 21, 21, residue)
                if check == True:
                    #print("Over True")
                    return

    """

    #fits.writeto('Reference PSF Cut/' + str(i) + '_PSF.fit', shifted, hdulist[0].header, clobber = True)
    #fits.writeto('Test PSF Subtract/' + str(i) + '_1.fit', residue, hdulist[0].header, clobber = True)
    fits.writeto('/data/marvels/billzhu/MG II PSF Cut/0.37 - 0.55/' + color +
                 '/' + str(i) + '-' + color + '_PSF.fit',
                 shifted,
                 hdulist[0].header,
                 clobber=True)
    fits.writeto('/data/marvels/billzhu/MG II PSF Subtract/0.37 - 0.55/' +
                 color + '/' + str(i) + '-' + color + '_SUB.fit',
                 residue,
                 hdulist[0].header,
                 clobber=True)
    #fits.writeto('Reference Subtract/' + str(i) + '_SUB.fit', residue, hdulist[0].header, clobber = True)
    print('\n')

    #except:
    #    return

    return
Ejemplo n.º 10
0
def extract_sources(img,
                    dqmask=None,
                    fwhm=3.0,
                    threshold=None,
                    source_box=7,
                    classify=True,
                    centering_mode="starfind",
                    nlargest=None,
                    outroot=None,
                    plot=False,
                    vmax=None,
                    deblend=False):
    """Use photutils to find sources in image based on segmentation.

    Parameters
    ----------
    img : ndarray
        Numpy array of the science extension from the observations FITS file.
    dqmask : ndarray
        Bitmask which identifies whether a pixel should be used (1) in source
        identification or not(0). If provided, this mask will be applied to the
        input array prior to source identification.
    fwhm : float
        Full-width half-maximum (fwhm) of the PSF in pixels.
    threshold : float or None
        Value from the image which serves as the limit for determining sources.
        If None, compute a default value of (background+5*rms(background)).
        If threshold < 0.0, use absolute value as scaling factor for default value.
    source_box : int
        Size of box (in pixels) which defines the minimum size of a valid source.
    classify : bool
        Specify whether or not to apply classification based on invarient moments
        of each source to determine whether or not a source is likely to be a
        cosmic-ray, and not include those sources in the final catalog.
    centering_mode : str
        "segmentaton" or "starfind"
        Algorithm to use when computing the positions of the detected sources.
        Centering will only take place after `threshold` has been determined, and
        sources are identified using segmentation.  Centering using `segmentation`
        will rely on `photutils.segmentation.source_properties` to generate the
        properties for the source catalog.  Centering using `starfind` will use
        `photutils.IRAFStarFinder` to characterize each source in the catalog.
    nlargest : int, None
        Number of largest (brightest) sources in each chip/array to measure
        when using 'starfind' mode.
    outroot : str, optional
        If specified, write out the catalog of sources to the file with this name rootname.
    plot : bool, optional
        Specify whether or not to create a plot of the sources on a view of the image.
    vmax : float, optional
        If plotting the sources, scale the image to this maximum value.
    deblend : bool, optional
        Specify whether or not to apply photutils deblending algorithm when
        evaluating each of the identified segments (sources) from the chip.
    """
    # apply any provided dqmask for segmentation only
    if dqmask is not None:
        imgarr = img.copy()
        imgarr[dqmask] = 0
    else:
        imgarr = img

    bkg_estimator = MedianBackground()
    bkg = None

    exclude_percentiles = [10, 25, 50, 75]
    for percentile in exclude_percentiles:
        try:
            bkg = Background2D(imgarr, (50, 50),
                               filter_size=(3, 3),
                               bkg_estimator=bkg_estimator,
                               exclude_percentile=percentile)
        except Exception:
            bkg = None
            continue

        if bkg is not None:
            # If it succeeds, stop and use that value
            bkg_rms = (5. * bkg.background_rms)
            bkg_rms_mean = bkg.background.mean() + 5. * bkg_rms.std()
            default_threshold = bkg.background + bkg_rms
            if threshold is None:
                threshold = default_threshold
            elif threshold < 0:
                threshold = -1 * threshold * default_threshold
                log.info("{} based on {}".format(threshold.max(),
                                                 default_threshold.max()))
                bkg_rms_mean = threshold.max()
            else:
                bkg_rms_mean = 3. * threshold

            if bkg_rms_mean < 0:
                bkg_rms_mean = 0.
            break

    # If Background2D does not work at all, define default scalar values for
    # the background to be used in source identification
    if bkg is None:
        bkg_rms_mean = max(0.01, imgarr.min())
        bkg_rms = bkg_rms_mean * 5

    sigma = fwhm * gaussian_fwhm_to_sigma
    kernel = Gaussian2DKernel(sigma, x_size=source_box, y_size=source_box)
    kernel.normalize()
    segm = detect_sources(imgarr,
                          threshold,
                          npixels=source_box,
                          filter_kernel=kernel)
    # photutils >= 0.7: segm=None; photutils < 0.7: segm.nlabels=0
    if segm is None or segm.nlabels == 0:
        log.info("No detected sources!")
        return None, None

    if deblend:
        segm = deblend_sources(imgarr,
                               segm,
                               npixels=5,
                               filter_kernel=kernel,
                               nlevels=16,
                               contrast=0.01)
    # If classify is turned on, it should modify the segmentation map
    if classify:
        cat = source_properties(imgarr, segm)
        # Remove likely cosmic-rays based on central_moments classification
        bad_srcs = np.where(classify_sources(cat) == 0)[0] + 1

        if LooseVersion(photutils.__version__) >= '0.7':
            segm.remove_labels(bad_srcs)
        else:
            # this is the photutils >= 0.7 fast code for removing labels
            segm.check_labels(bad_srcs)
            bad_srcs = np.atleast_1d(bad_srcs)
            if len(bad_srcs) != 0:
                idx = np.zeros(segm.max_label + 1, dtype=int)
                idx[segm.labels] = segm.labels
                idx[bad_srcs] = 0
                segm.data = idx[segm.data]

    # convert segm to mask for daofind
    if centering_mode == 'starfind':
        src_table = None
        # daofind = IRAFStarFinder(fwhm=fwhm, threshold=5.*bkg.background_rms_median)
        log.info("Setting up DAOStarFinder with: \n    fwhm={}  threshold={}".
                 format(fwhm, bkg_rms_mean))
        daofind = DAOStarFinder(fwhm=fwhm, threshold=bkg_rms_mean)

        # Identify nbrightest/largest sources
        if nlargest is not None:
            nlargest = min(nlargest, len(segm.labels))
            if LooseVersion(photutils.__version__) >= '0.7':
                large_labels = segm.labels[np.flip(np.argsort(
                    segm.areas))[:nlargest]]
            else:
                # for photutils < 0.7
                areas = np.array([
                    area for area in np.bincount(segm.data.ravel())[1:]
                    if area != 0
                ])
                large_labels = segm.labels[np.flip(
                    np.argsort(areas))[:nlargest]]

        log.info("Looking for sources in {} segments".format(len(segm.labels)))

        for segment in segm.segments:
            # check needed for photutils <= 0.6; it can be removed when
            # the drizzlepac depends on photutils >= 0.7
            if segment is None:
                continue
            if nlargest is not None and segment.label not in large_labels:
                continue  # Move on to the next segment
            # Get slice definition for the segment with this label
            seg_slice = segment.slices
            seg_yoffset = seg_slice[0].start
            seg_xoffset = seg_slice[1].start

            # Define raw data from this slice
            detection_img = img[seg_slice]
            # zero out any pixels which do not have this segments label
            detection_img[segm.data[seg_slice] == 0] = 0

            # Detect sources in this specific segment
            seg_table = daofind.find_stars(detection_img)

            # Pick out brightest source only
            if src_table is None and seg_table:
                # Initialize final master source list catalog
                src_table = Table(
                    names=seg_table.colnames,
                    dtype=[dt[1] for dt in seg_table.dtype.descr])

            if seg_table and seg_table['peak'].max() == detection_img.max():
                max_row = np.where(
                    seg_table['peak'] == seg_table['peak'].max())[0][0]
                # Add row for detected source to master catalog
                # apply offset to slice to convert positions into full-frame coordinates
                seg_table['xcentroid'] += seg_xoffset
                seg_table['ycentroid'] += seg_yoffset
                src_table.add_row(seg_table[max_row])

    else:
        cat = source_properties(img, segm)
        src_table = cat.to_table()
        # Make column names consistent with IRAFStarFinder column names
        src_table.rename_column('source_sum', 'flux')
        src_table.rename_column('source_sum_err', 'flux_err')

    if src_table is not None:
        log.info("Total Number of detected sources: {}".format(len(src_table)))
    else:
        log.info("No detected sources!")
        return None, None

    # Move 'id' column from first to last position
    # Makes it consistent for remainder of code
    cnames = src_table.colnames
    cnames.append(cnames[0])
    del cnames[0]
    tbl = src_table[cnames]

    if outroot:
        tbl['xcentroid'].info.format = '.10f'  # optional format
        tbl['ycentroid'].info.format = '.10f'
        tbl['flux'].info.format = '.10f'
        if not outroot.endswith('.cat'):
            outroot += '.cat'
        tbl.write(outroot, format='ascii.commented_header')
        log.info("Wrote source catalog: {}".format(outroot))

    if plot and plt is not None:
        norm = len(segm.labels)
        if vmax is None:
            norm = ImageNormalize(stretch=SqrtStretch())
        fig, ax = plt.subplots(2, 2, figsize=(8, 8))
        ax[0][0].imshow(imgarr,
                        origin='lower',
                        cmap='Greys_r',
                        norm=norm,
                        vmax=vmax)
        ax[0][1].imshow(segm,
                        origin='lower',
                        cmap=segm.cmap(random_state=12345))
        ax[0][1].set_title('Segmentation Map')
        ax[1][0].imshow(bkg.background, origin='lower')
        if not isinstance(threshold, float):
            ax[1][1].imshow(threshold, origin='lower')
    return tbl, segm
Ejemplo n.º 11
0
                                                box=int(im1data.shape[1] / 2) -
                                                10,
                                                returnmed='y',
                                                returncube='y')

    fits.writeto(outdir + '\\' + name + 'NIRCshiftmed.fits',
                 shiftnodsubmed,
                 im1head,
                 overwrite=True)

    # run starfinder algorithm, make pictures
    thresh = 100
    fwhm = 3
    num = 10
    sf = DAOStarFinder(thresh, fwhm, brightest=num)
    table = sf.find_stars(shiftnodsubmed)
    print(table)
    # create apertures
    positions = np.transpose((table['xcentroid'], table['ycentroid']))
    apertures = CircularAperture(positions, r=3.)

    # Plot image with found stars circled
    norm = ImageNormalize(stretch=LogStretch())
    plt.figure()
    plt.imshow(shiftnodsubmed,
               cmap='inferno',
               origin='lower',
               norm=norm,
               interpolation='nearest')
    apertures.plot(color='red', lw=1.5, alpha=0.7)
    try:
Ejemplo n.º 12
0
quadRU = fits.open('quadRU.fits')
bias = 100
fwhm = 5
quadRU_data = quadRU[0].data
if show_images:
    norm = ImageNormalize(quadRU_data, interval=ZScaleInterval())
    plt.imshow(quadRU_data,
               origin='lower',
               norm=norm,
               cmap='BrBG',
               clim=(0, 1000))
    plt.show()
    plt.clf()

DAO_stars = DAOStarFinder(bias, fwhm)
stars = DAO_stars.find_stars(quadRU_data)
print("Identified stars", stars)

##Use circular aperature
star_coords = [(stars['xcentroid'][i], stars['ycentroid'][i])
               for i in range(len(stars['id']))]
apertures = CircularAperture(star_coords, r=3.)
print("Apertures", apertures)
phot_table = aperture_photometry(quadRU_data, apertures, method='exact')
print("Phot_table", phot_table)

if show_images:
    apertures.plot(color='blue', lw=2)
    norm = ImageNormalize(quadRU_data, interval=ZScaleInterval())
    plt.imshow(quadRU_data,
               origin='lower',