Esempio n. 1
0
def fits2fits(infile, outfile, verbose=False, fix_idr=False):
    """
    Returns: error string, or None on success.
    """
    if fix_idr:
        from astrometry.util.fix_sdss_idr import fix_sdss_idr

    # Read input file.
    fitsin = pyfits.open(infile)
    # Print out info about input file.
    if verbose:
        fitsin.info()

    for i, hdu in enumerate(fitsin):
        if fix_idr:
            hdu = fitsin[i] = fix_sdss_idr(hdu)
        # verify() fails when a keywords contains invalid characters,
        # so go through the primary header and fix them by converting invalid
        # characters to '_'
        hdr = hdu.header
        logging.info('Header has %i cards' % len(hdr))
        # allowed characters (FITS standard section 5.1.2.1)
        pat = re.compile(r'[^A-Z0-9_\-]')

        newcards = []
        for c in hdr.ascard:
            k = c.keyword
            # new keyword:
            knew = pat.sub('_', k)
            if k != knew:
                logging.debug('Replacing illegal keyword %s by %s' % (k, knew))
                # it seems pyfits is not clever enough to notice this...
                if len(knew) > 8:
                    knew = 'HIERARCH ' + knew
            newcards.append(pyfits.Card(keyword=knew, value=c.value,
                                        comment=c.comment))
        hdu.header = pyfits.Header(newcards)
            
        # Fix input header
        hdu.verify('fix')

        # UGH!  Work around stupid pyfits handling of scaled data...
        # (it fails to round-trip scaled data correctly!)
        bzero = hdr.get('BZERO', None)
        bscale = hdr.get('BSCALE', None)
        if (bzero is not None and bscale is not None
            and (bzero != 0. or bscale != 1.)):
            logging.debug('Scaling to bzero=%g, bscale=%g' % (bzero, bscale))
            hdu.scale('int16', '', bscale, bzero)

    # Describe output file we're about to write...
    if verbose:
        print 'Outputting:'
        fitsin.info()

    try:
        pyfits_writeto(fitsin, outfile, output_verify='warn')
    except pyfits.VerifyError, ve:
        return ('Verification of output file failed: your FITS file is probably too broken to automatically fix.' +
                '  Error message is:' + str(ve))
Esempio n. 2
0
def removelines(infile, outfile, xcol='X', ycol='Y', ext=1, cut=None, **kwargs):
    if cut is None:
        cut = 100
    p = pyfits.open(infile)
    xy = p[ext].data
    hdr = p[ext].header
    if xy is None:
        print 'removelines.py: Input file contains no sources.'
        pyfits_writeto(p, outfile)
        return 0
    
    x = xy.field(xcol)
    y = xy.field(ycol)

    if len(x) == 0:
        print 'removelines.py: Your FITS file contains 0 sources (rows)'
        pyfits_writeto(p, outfile)
        return 0
    
    ix = hist_remove_lines(x, 1, 0.5, logcut=-cut)
    iy = hist_remove_lines(y, 1, 0.5, logcut=-cut)
    I = ix * iy
    xc = x[I]
    yc = y[I]
    print 'removelines.py: Removed %i sources' % (len(x) - len(xc))

    p[ext].header.add_history('This xylist was filtered by the "removelines.py" program')
    p[ext].header.add_history('to remove horizontal and vertical lines of sources')
    p[ext].header['REMLINEN'] = (len(x) - len(xc), 'Number of sources removed by "removelines.py"')

    p[ext].data = p[ext].data[I]
    pyfits_writeto(p, outfile)

    return 0
Esempio n. 3
0
def removelines(infile, outfile, xcol='X', ycol='Y', ext=1, cut=None, **kwargs):
    if cut is None:
        cut = 100
    p = pyfits.open(infile)
    xy = p[ext].data
    hdr = p[ext].header
    if xy is None:
        print('removelines.py: Input file contains no sources.')
        pyfits_writeto(p, outfile)
        return 0
    
    x = xy.field(xcol)
    y = xy.field(ycol)

    if len(x) == 0:
        print('removelines.py: Your FITS file contains 0 sources (rows)')
        pyfits_writeto(p, outfile)
        return 0
    
    ix = hist_remove_lines(x, 1, 0.5, logcut=-cut)
    iy = hist_remove_lines(y, 1, 0.5, logcut=-cut)
    I = ix * iy
    xc = x[I]
    yc = y[I]
    print('removelines.py: Removed %i sources' % (len(x) - len(xc)))

    p[ext].header.add_history('This xylist was filtered by the "removelines.py" program')
    p[ext].header.add_history('to remove horizontal and vertical lines of sources')
    p[ext].header['REMLINEN'] = (len(x) - len(xc), 'Number of sources removed by "removelines.py"')

    p[ext].data = p[ext].data[I]
    pyfits_writeto(p, outfile)

    return 0
def removelines(infile, outfile, xcol='X', ycol='Y', plots=False, cut=None, **kwargs):
	p = pyfits.open(infile)
	xy = p[1].data
	hdr = p[1].header
	x = xy.field(xcol)
	y = xy.field(ycol)

	NX = max(x) - min(x)
	NY = max(y) - min(y)
	nangle = int(ceil(sqrt(NX*NY)/4.))

	if cut is None:
		cut = 20

	if plots:
		clf()
		plot(x, y, 'r.')

	I = array([True]*len(x))
	for i,angle in enumerate(0.75 + linspace(0, pi/2., nangle, endpoint=False)):
		cost = cos(angle)
		sint = sin(angle)
		xx = x*cost	 + y*sint
		yy = x*-sint + y*cost
		xx -= min(xx)
		yy -= min(yy)

		if plots:
			print
			clf()
			subplot(2,2,1)
			plot(xx, yy, 'r.')
			subplot(2,2,3)


		#ix = hist_remove_lines(xx, 0.5, 0.5, 5, plots=plots)
		ix = hist_remove_lines(xx, 1, 0.5, logcut=-cut)

		if plots:
			subplot(2,2,4)

		#iy = hist_remove_lines(yy, 0.5, 0.5, 5, plots=plots)
		iy = hist_remove_lines(yy, 1, 0.5, logcut=-cut)

		I *= ix * iy

		removed = (ix * iy == False)
		if plots:
			if sum(removed):
				plot([min(x[removed]), max(x[removed])],
					 [min(y[removed]), max(y[removed])], 'k-', alpha=0.5)
			subplot(2,2,1)
			plot(xx[removed], yy[removed], 'b-', alpha=0.5)
			plot(xx[removed], yy[removed], 'b.')
			savefig('rot-%04i.png' % i)

		print 'angle', angle, 'removed', (len(x) - sum(ix*iy))

	xc = x[I]
	yc = y[I]
	print 'removelines.py: Removed %i sources' % (len(x) - len(xc))

	if plots:
		plot(xc, yc, 'o', mec='r', mfc='none')
		# axes('equal')
		savefig('after.png')

	p[1].header.add_history('This xylist was filtered by the "removelines_rotate.py" program')
	p[1].header.add_history('to remove lines of sources')
	p[1].header.update('REMLINEN', len(x) - len(xc), 'Number of sources removed by "removelines"')

	p[1].data = p[1].data[I]
	pyfits_writeto(p, outfile)

	return 0
Esempio n. 5
0
def removelines_general(infile, outfile, nt=180, nr=180, thresh1=2.,
						thresh2=5., plots=False):
	p = pyfits.open(infile)
	xy = p[1].data
	hdr = p[1].header
	x = xy.field('X').copy()
	y = xy.field('Y').copy()

	imshowargs = { 'interpolation':'nearest', 'origin':'lower' }

	imgw = int(ceil(max(x) - min(x)))
	imgh = int(ceil(max(y) - min(y)))

	x -= min(x)
	y -= min(y)

	Rmax = sqrt(imgw**2 + imgh**2)
	Rmin = -Rmax

	(houghimg, houghnorm, rr, tt, rstep, tstep
	 ) = normalized_hough(x, y, imgw, imgh, Rmin, Rmax, 0, pi, nr, nt)

	hnorm = houghimg / maximum(houghnorm, 1)

	if plots:
		clf()
		plot(x,y,'r.')
		savefig('xy.png')

		clf()
		imshow(houghimg, **imshowargs)
		xlabel('Theta')
		ylabel('Radius')
		colorbar()
		savefig('hough.png')

		clf()
		imshow(houghnorm, **imshowargs)
		xlabel('Theta')
		ylabel('Radius')
		colorbar()
		savefig('norm.png')

		clf()
		imshow(hnorm, **imshowargs)
		xlabel('Theta')
		ylabel('Radius')
		colorbar()
		savefig('hnorm.png')


	I = find(hnorm.ravel() >= thresh1)
	print '%i peaks are above the coarse threshold' % len(I)
	bestri = I / nt
	bestti = I % nt

	if plots:
		a=axis()
		for (ri,ti) in zip(bestri,bestti):
			plot([ti-2, ti-2, ti+2, ti+2, ti-2], [ri-2, ri+2, ri+2, ri-2, ri-2], 'r-')
			axis(a)
			savefig('zooms.png')

		clf()
		plot(x,y,'r.')
		for (ri,ti) in zip(bestri,bestti):
			(x0,x1,y0,y1) = clip_to_image(rr[ri], tt[ti], imgw, imgh)
			plot([x0,x1],[y0,y1], 'b-')
		savefig('xy2.png')

	# how big a search area around each peak?
	boxsize = 1
	# how much more finely to grid.
	finer = 3
	nr2 = (boxsize * 2)*finer + 1
	nt2 = nr2

	bestrt = []
	keep = array([True] * len(x))
	for (ri,ti) in zip(bestri,bestti):
		(subh, subnorm, subrr, subtt, subrstep, subtstep
		 ) = normalized_hough(x, y, imgw, imgh,
							  rr[max(ri-boxsize, 0)], rr[min(ri+boxsize, nr-1)],
							  tt[max(ti-boxsize, 0)], tt[min(ti+boxsize, nt-1)],
							  nr2, nt2)
		#print '  median normalization:', median(subnorm)
		subhnorm = subh / maximum(subnorm,1)
		I = find((subhnorm).ravel() >= thresh2)
		for i in I:
			bestsubri = i / nt2
			bestsubti = i % nt2
			r = subrr[bestsubri]
			t = subtt[bestsubti]
			bestrt.append((r,t))
			#print '  (r=%.1f, t=%.1f): factor %.1f above expected' % (r, t*180/pi, subhnorm.ravel()[i])
			thisr = x * cos(t) + y * sin(t)
			keep *= (abs(thisr - r) > subrstep/2.)

	print 'In finer grid: found %i peaks' % len(bestrt)

	if plots:
		clf()
		subplot(1,1,1)
		plot(x,y,'r.')
		for (r,t) in bestrt:
			(x0,x1,y0,y1) =  clip_to_image(r, t, imgw, imgh)
			plot([x0,x1],[y0,y1],'b-')
		savefig('xy3.png')

		clf()
		plot(x,y,'r.')
		plot(x[keep == False], y[keep == False], 'b.')
		savefig('xy4.png')

	p[1].data = p[1].data[keep]
	pyfits_writeto(p, outfile)
	return 0
Esempio n. 6
0
def removelines_general(infile,
                        outfile,
                        nt=180,
                        nr=180,
                        thresh1=2.,
                        thresh2=5.,
                        plots=False):
    p = pyfits.open(infile)
    xy = p[1].data
    hdr = p[1].header
    x = xy.field('X').copy()
    y = xy.field('Y').copy()

    imshowargs = {'interpolation': 'nearest', 'origin': 'lower'}

    imgw = int(ceil(max(x) - min(x)))
    imgh = int(ceil(max(y) - min(y)))

    x -= min(x)
    y -= min(y)

    Rmax = sqrt(imgw**2 + imgh**2)
    Rmin = -Rmax

    (houghimg, houghnorm, rr, tt, rstep,
     tstep) = normalized_hough(x, y, imgw, imgh, Rmin, Rmax, 0, pi, nr, nt)

    hnorm = houghimg / maximum(houghnorm, 1)

    if plots:
        clf()
        plot(x, y, 'r.')
        savefig('xy.png')

        clf()
        imshow(houghimg, **imshowargs)
        xlabel('Theta')
        ylabel('Radius')
        colorbar()
        savefig('hough.png')

        clf()
        imshow(houghnorm, **imshowargs)
        xlabel('Theta')
        ylabel('Radius')
        colorbar()
        savefig('norm.png')

        clf()
        imshow(hnorm, **imshowargs)
        xlabel('Theta')
        ylabel('Radius')
        colorbar()
        savefig('hnorm.png')

    I = find(hnorm.ravel() >= thresh1)
    print('%i peaks are above the coarse threshold' % len(I))
    bestri = I / nt
    bestti = I % nt

    if plots:
        a = axis()
        for (ri, ti) in zip(bestri, bestti):
            plot([ti - 2, ti - 2, ti + 2, ti + 2, ti - 2],
                 [ri - 2, ri + 2, ri + 2, ri - 2, ri - 2], 'r-')
            axis(a)
            savefig('zooms.png')

        clf()
        plot(x, y, 'r.')
        for (ri, ti) in zip(bestri, bestti):
            (x0, x1, y0, y1) = clip_to_image(rr[ri], tt[ti], imgw, imgh)
            plot([x0, x1], [y0, y1], 'b-')
        savefig('xy2.png')

    # how big a search area around each peak?
    boxsize = 1
    # how much more finely to grid.
    finer = 3
    nr2 = (boxsize * 2) * finer + 1
    nt2 = nr2

    bestrt = []
    keep = array([True] * len(x))
    for (ri, ti) in zip(bestri, bestti):
        (subh, subnorm, subrr, subtt, subrstep, subtstep) = normalized_hough(
            x, y, imgw, imgh, rr[max(ri - boxsize,
                                     0)], rr[min(ri + boxsize, nr - 1)],
            tt[max(ti - boxsize, 0)], tt[min(ti + boxsize, nt - 1)], nr2, nt2)
        #print '  median normalization:', median(subnorm)
        subhnorm = subh / maximum(subnorm, 1)
        I = find((subhnorm).ravel() >= thresh2)
        for i in I:
            bestsubri = i / nt2
            bestsubti = i % nt2
            r = subrr[bestsubri]
            t = subtt[bestsubti]
            bestrt.append((r, t))
            #print '  (r=%.1f, t=%.1f): factor %.1f above expected' % (r, t*180/pi, subhnorm.ravel()[i])
            thisr = x * cos(t) + y * sin(t)
            keep *= (abs(thisr - r) > subrstep / 2.)

    print('In finer grid: found %i peaks' % len(bestrt))

    if plots:
        clf()
        subplot(1, 1, 1)
        plot(x, y, 'r.')
        for (r, t) in bestrt:
            (x0, x1, y0, y1) = clip_to_image(r, t, imgw, imgh)
            plot([x0, x1], [y0, y1], 'b-')
        savefig('xy3.png')

        clf()
        plot(x, y, 'r.')
        plot(x[keep == False], y[keep == False], 'b.')
        savefig('xy4.png')

    p[1].data = p[1].data[keep]
    pyfits_writeto(p, outfile)
    return 0
Esempio n. 7
0
def uniformize(infile, outfile, n, xcol='X', ycol='Y', ext=1, **kwargs):
    p = pyfits.open(infile)
    xy = p[ext].data
    if xy is None:
        print('No sources')
        pyfits_writeto(p, outfile)
        return
    hdr = p[ext].header
    x = xy.field(xcol)
    y = xy.field(ycol)
    if len(x) == 0:
        print('Empty xylist')
        pyfits_writeto(p, outfile)
        return

    # use IMAGEW,H, or compute bounds?
    #  #$)(*&%^ NaNs in LSST source positions.  Seriously, WTF!
    I = np.logical_and(np.isfinite(x), np.isfinite(y))
    if not all(I):
        print('%i source positions are not finite.' %
              np.sum(np.logical_not(I)))
        x = x[I]
        y = y[I]

    W = max(x) - min(x)
    H = max(y) - min(y)
    if W == 0 or H == 0:
        print('Area of the rectangle enclosing all image sources: %i x %i' %
              (W, H))
        pyfits_writeto(p, outfile)
        return
    NX = int(max(1, np.round(W / np.sqrt(W * H / float(n)))))
    NY = int(max(1, np.round(n / float(NX))))
    print('Uniformizing into %i x %i bins' % (NX, NY))
    print('Image bounds: x [%g,%g], y [%g,%g]' %
          (min(x), max(x), min(y), max(y)))

    ix = (np.clip(np.floor((x - min(x)) / float(W) * NX), 0,
                  NX - 1)).astype(int)
    iy = (np.clip(np.floor((y - min(y)) / float(H) * NY), 0,
                  NY - 1)).astype(int)
    assert (np.all(ix >= 0))
    assert (np.all(ix < NX))
    assert (np.all(iy >= 0))
    assert (np.all(iy < NY))
    I = iy * NX + ix
    assert (np.all(I >= 0))
    assert (np.all(I < NX * NY))
    bins = [[] for i in range(NX * NY)]
    for j, i in enumerate(I):
        bins[int(i)].append(j)
    maxlen = max([len(b) for b in bins])
    J = []
    for i in range(maxlen):
        thisrow = []
        for b in bins:
            if i >= len(b):
                continue
            thisrow.append(b[i])
        thisrow.sort()
        J += thisrow
    J = np.array(J)
    p[ext].header.add_history(
        'This xylist was filtered by the "uniformize.py" program')
    p[ext].data = p[ext].data[J]
    pyfits_writeto(p, outfile)
    return 0