def register_reproject_with_errors(inputImages, outputImages, intErrorImages, outputErrorImages, refImage, processDir, headerName="header.hdr"):
	cmds.mGetHdr(refImage,headerName)
	mw.reproject(inputImages,outputImages,header=headerName,north_aligned=True,system='EQUJ',exact_size=True,common=True,silent_cleanup=True)
	for i in range(len(inputImages)):
		idenList=inputImages[i].split('/')[2].split('-')
		run=int(idenList[0])
		camcol=int(idenList[1])
		field=int(idenList[2])
		band=idenList[3].split('.')[0]
		fitsFile=fits.open(inputImages[i])
		fitsImage=fitsFile[0].data
		errorData=fitsFile[1].data
		errorImg=[]
		for j in range(1489):
			errorImg.append(errorData)
		errorImage=np.asarray(errorImg)
		skyImageInit=fitsFile[2].data[0][0]
		xs=np.fromfunction(lambda k: k, (skyImageInit.shape[0],), dtype=int)
		ys=np.fromfunction(lambda k: k, (skyImageInit.shape[1],), dtype=int)
		interpolator=interp2d(xs, ys, skyImageInit, kind='cubic')
		for j in range(errorImage.shape[1]):
			for k in range(errorImage.shape[0]):
				skyImageValue=interpolator.__call__(j*skyImageInit.shape[0]/errorImage.shape[0],k*skyImageInit.shape[1]/errorImage.shape[1])
				errorImage[k][j]=return_sdss_pixelError(band, camcol, run, fitsImage[k][j], skyImageValue, errorImg[k][j])
		fitsFile[0].data=errorImage
		fitsFile.writeto(intErrorImages[i])
	mw.reproject(intErrorImages,outputErrorImages,header=headerName,north_aligned=True,system='EQUJ',exact_size=True,common=True,silent_cleanup=True)
Ejemplo n.º 2
0
def align_images(images, frame_dir="temp", registered_dir="temp"):
    '''
    '''

    if not os.path.exists(registered_dir):
        os.makedirs(registered_dir)

    for image in images:

        #print("Processing {}...".format(image))

        frame_path = [
            os.path.join(frame_dir,
                         image.replace("frame-r-", "frame-{}-").format(b))
            for b in "ugriz"
        ]
        registered_path = [
            os.path.join(registered_dir,
                         image.replace("frame-r-", "registered-{}-").format(b))
            for b in "ugriz"
        ]

        header = os.path.join(
            registered_dir,
            image.replace("frame", "header").replace(".fits", ".hdr"))

        mw.commands.mGetHdr(os.path.join(frame_dir, image), header)
        mw.reproject(frame_path,
                     registered_path,
                     header=header,
                     exact_size=True,
                     silent_cleanup=True,
                     common=True)

    return None
Ejemplo n.º 3
0
def rep():
	for filename in os.listdir(conv_out):
#	for filename in os.listdir(hdrFiles):
		if filename.endswith('.fits'):
#		if filename.endswith('.txt'):


			inFile = conv_out+filename
			p = filename.split('_')
			if p[1] == 'rep':
				continue
			l = 'L2.0'
			for i in range(0,len(L25list)):
				now = L25list[i]
				if p[0] == now:
					l = 'L2.5'
					break
		#	print p[0],'	',l
			if filename.endswith('conv.fits'):
				hdr = hdrFiles+p[0]+'_PMW_'+l+'.txt'
				outPut = conv_out+filename.strip('_conv.fits')+'_rep_1.fits'
			elif filename.endswith('_conv_2.fits'):
				hdr = hdrFiles+p[0]+'_PMW_'+l+'_2.txt'
				outPut = conv_out+filename.strip('_conv_2.fits')+'_rep_2.fits'
			elif filename.endswith('_conv_3.fits'):
				hdr = hdrFiles+p[0]+'_PMW_'+l+'_3.txt'
				outPut = conv_out+filename.strip('_conv_3.fits')+'_rep_3.fits'
			else: 
				print filename
				print 'AAAA EVERYBODY PANIC THERE IS AN ERROR IN THE CODE'
			montage.reproject(inFile,outPut,header=hdr,exact_size=True)
Ejemplo n.º 4
0
def align_images(images, frame_dir="temp", registered_dir="temp", used_bands="ugriz"):
    '''
    '''

    if not os.path.exists(registered_dir):
        os.makedirs(registered_dir)
    
    for image in images:
        
        #print("Processing {}...".format(image))
    
        frame_path = [
            os.path.join(frame_dir, image.replace("frame-r-", "frame-{}-").format(b))
            for b in "ugriz"
            ]
        registered_path = [
            os.path.join(registered_dir, image.replace("frame-r-", "registered-{}-").format(b))
            for b in "ugriz"
            ]

        header = os.path.join(
            registered_dir,
            image.replace("frame", "header").replace(".fits", ".hdr")
            )

        mw.commands.mGetHdr(os.path.join(frame_dir, image), header)
        mw.reproject(
            frame_path, registered_path,
            header=header, exact_size=True, silent_cleanup=True, common=True
            )

    return None
Ejemplo n.º 5
0
def project_extension(fn, ext, alignment, size):
    """Project extension `extname` in file `fn`.

    alignment:
      'vangle': Projected velocity--->+x-axis.
      'sangle': Projected comet-Sun vector--->+x-axis.

    Image distortions should be removed.

    """

    if alignment not in ('vangle', 'sangle'):
        raise ValueError(
            'Alignment must be vangle or sangle: {}'.format(alignment))

    h0 = fits.getheader(fn)
    if alignment not in h0:
        raise ValueError('Alignment vector not in FITS header')

    radec = (h0['tgtra'], h0['tgtdec'])
    temp_header = make_header(radec, 90 + h0[alignment], size)
    bitpix = fits.getheader(fn, ext=ext)['BITPIX']

    # could not reproject diff images with hdu keyword, instead copy
    # all extensions to their own file, and reproject that
    fd_in, inf = mkstemp()
    with fits.open(fn) as original:
        # astype(float) to convert integers
        newhdu = fits.PrimaryHDU(original[ext].data.astype(float),
                                 original[ext].header)
        newhdu.writeto(inf)

    fd_out, outf = mkstemp()
    try:
        m.reproject(inf,
                    outf,
                    header=temp_header,
                    exact_size=True,
                    silent_cleanup=True)
        im, h = fits.getdata(outf, header=True)
        if bitpix == 16:
            im = im.round().astype(int)
            im[im < 0] = 0
        projected = fits.ImageHDU(im, h)
    except m.MontageError as e:
        raise
    finally:
        # temp file clean up
        os.fdopen(fd_in).close()
        os.unlink(inf)
        os.fdopen(fd_out).close()
        os.unlink(outf)
        os.unlink(temp_header)

    return projected
Ejemplo n.º 6
0
def MontageWrapperWrapper(in_fitsdata, in_hdr, montage_path=None, temp_path=None, exten=None):

    # Handle Montage path, if kwargs provided
    if montage_path != None:
        os.environ['PATH'] += ':'+montage_path
    import montage_wrapper

    # Produce on-the-fly temporary file
    timestamp = str(time.time()).replace('.','-')
    if isinstance(temp_path, str):
        temp_dir = os.path.join(temp_path, timestamp)
        os.mkdir(temp_dir)
    else:
        temp_dir = tempfile.mkdtemp()

    # If FITS data provided is path to file, record that path; else if FITS data provided is an astropy HDUList object, write it to the temporary directory
    if isinstance(in_fitsdata, basestring):
        if os.path.exists(in_fitsdata):
            in_fits_path = in_fitsdata
        else:
            raise Exception('No FITS file at path provided')
    elif isinstance(in_fitsdata, astropy.io.fits.hdu.image.PrimaryHDU) or isinstance(in_fitsdata, astropy.io.fits.hdu.image.ImageHDU):
        in_fits_path = os.path.join(temp_dir,'temp_in_'+timestamp+'.fits')
        in_fitsdata = astropy.io.fits.HDUList([in_fitsdata])
        in_fitsdata.writeto(in_fits_path)

    # If header provided is path to file, record that path; else if header previded is a astropy Header object, write it to the temporary directory
    if isinstance(in_hdr, basestring):
        if os.path.exists(in_hdr):
            hdr_path = in_hdr
        else:
            raise Exception('No header file at path provided')
    elif isinstance(in_hdr, astropy.io.fits.Header):
        hdr_path = os.path.join(temp_dir,'temp_header_'+timestamp+'.hdr')
        in_hdr.totextfile(hdr_path)

    # Reproject data with montage_wrapper
    out_fits_path = os.path.join(temp_dir,'temp_out_'+timestamp+'.fits')
    montage_wrapper.reproject(in_fits_path, out_fits_path, hdr_path, exact_size=True, hdu=exten)
    out_img = astropy.io.fits.getdata(out_fits_path)

    # If temporary files were placed inside user-supplied temporary directory, delete those files individually
    if isinstance(temp_path, str):
        os.remove(hdr_path)
        os.remove(out_fits_path)

    # Else if using temporary directory produced with tempfile, delete it wholesale
    else:
        RemoveCrawl(temp_dir)

    # Return output array
    return out_img
Ejemplo n.º 7
0
def register_reproject_with_errors(inputImages,
                                   outputImages,
                                   intErrorImages,
                                   outputErrorImages,
                                   refImage,
                                   processDir,
                                   headerName="header.hdr"):
    cmds.mGetHdr(refImage, headerName)
    mw.reproject(inputImages,
                 outputImages,
                 header=headerName,
                 north_aligned=True,
                 system='EQUJ',
                 exact_size=True,
                 common=True,
                 silent_cleanup=True)
    for i in range(len(inputImages)):
        idenList = inputImages[i].split('/')[2].split('-')
        run = int(idenList[0])
        camcol = int(idenList[1])
        field = int(idenList[2])
        band = idenList[3].split('.')[0]
        fitsFile = fits.open(inputImages[i])
        fitsImage = fitsFile[0].data
        errorData = fitsFile[1].data
        errorImg = []
        for j in range(1489):
            errorImg.append(errorData)
        errorImage = np.asarray(errorImg)
        skyImageInit = fitsFile[2].data[0][0]
        xs = np.fromfunction(lambda k: k, (skyImageInit.shape[0], ), dtype=int)
        ys = np.fromfunction(lambda k: k, (skyImageInit.shape[1], ), dtype=int)
        interpolator = interp2d(xs, ys, skyImageInit, kind='cubic')
        for j in range(errorImage.shape[1]):
            for k in range(errorImage.shape[0]):
                skyImageValue = interpolator.__call__(
                    j * skyImageInit.shape[0] / errorImage.shape[0],
                    k * skyImageInit.shape[1] / errorImage.shape[1])
                errorImage[k][j] = return_sdss_pixelError(
                    band, camcol, run, fitsImage[k][j], skyImageValue,
                    errorImg[k][j])
        fitsFile[0].data = errorImage
        fitsFile.writeto(intErrorImages[i])
    mw.reproject(intErrorImages,
                 outputErrorImages,
                 header=headerName,
                 north_aligned=True,
                 system='EQUJ',
                 exact_size=True,
                 common=True,
                 silent_cleanup=True)
Ejemplo n.º 8
0
def register_reproject(inputImages,
                       outputImages,
                       refImage,
                       processDir,
                       headerName="header.hdr"):
    cmds.mGetHdr(refImage, headerName)
    mw.reproject(inputImages,
                 outputImages,
                 header=headerName,
                 north_aligned=True,
                 system='EQUJ',
                 exact_size=True,
                 common=True,
                 silent_cleanup=True)
def do_primarybeam_correction(pbname, imagename):
    print(' Preparing to apply the primary beam correction...')
    image = fits.open(imagename)[0]
    pb = fits.open(pbname)[0]
    wcs = WCS(pb.header)
    # cutout pb field of view to match image field of view
    x_size = image.header['NAXIS1']
    x_pixel_deg = image.header[
        'CDELT2']  # CDELT1 is negative, so take positive one
    size = (
        x_size * x_pixel_deg * u.degree, x_size * x_pixel_deg * u.degree
    )  # angular size of cutout, using astropy coord. approx 32768*0.6 arcseconds.
    position = SkyCoord(pb.header['CRVAL1'] * u.degree, pb.header['CRVAL2'] *
                        u.degree)  # RA and DEC of beam PB pointing
    print(' Cutting out image FOV from primary beam image...')
    cutout = Cutout2D(pb.data[0, 0, :, :],
                      position=position,
                      size=size,
                      mode='trim',
                      wcs=wcs.celestial,
                      copy=True)
    pb.data = cutout.data  # Put the cutout image in the FITS HDU
    pb.header.update(
        cutout.wcs.to_header())  # Update the FITS header with the cutout WCS
    pb.writeto(pbname[:-5] + '_PBCOR.fits',
               overwrite=True)  # Write the cutout to a new FITS file

    # regrid PB image cutout to match pixel scale of the image FOV
    print(' Regridding image...')
    # get header of image to match PB to
    montage.mGetHdr(imagename, 'hdu_tmp.hdr')
    # regrid pb image (270 pixels) to size of ref image (32k pixels)
    montage.reproject(in_images=pbname[:-5] + '_PBCOR.fits',
                      out_images=pbname[:-5] + '_PBCOR_regrid.fits',
                      header='hdu_tmp.hdr',
                      exact_size=True)
    os.remove('hdu_tmp.hdr')  # get rid of header text file saved to disk

    # do pb correction
    pb = fits.open(pbname[:-5] + '_PBCOR_regrid.fits')[0]
    # fix nans introduced in primary beam by montage at edges
    print(
        ' A small buffer of NaNs is introduced around the image by Montage when regridding to match the size, \n these have been set to the value of their nearest neighbours to maintain the same image dimensions'
    )
    mask = np.isnan(pb.data)
    pb.data[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask),
                              pb.data[~mask])
    image.data = image.data / pb.data
    image.writeto(imagename[:-5] + '_PBCOR.fits', overwrite=True)
Ejemplo n.º 10
0
def register_reproject_direc(direc, ref='r'):
    os.system("rm -rf " + direc + "/" + ref + "_header.hdr " + direc +
              "/*_area* " + direc + "/*_reg*")
    cmds.mGetHdr(direc + "/" + ref + ".fits",
                 direc + "/header_" + ref + ".hdr")
    list_in = [
        direc + '/g.fits', direc + '/r.fits', direc + '/i.fits',
        direc + '/u.fits', direc + '/z.fits'
    ]
    list_out = [
        direc + '/g_reg.fits', direc + '/r_reg.fits', direc + '/i_reg.fits',
        direc + '/u_reg.fits', direc + '/z_reg.fits'
    ]
    mw.reproject(list_in,
                 list_out,
                 header=direc + "/header_" + ref + ".hdr",
                 north_aligned=True,
                 system='EQUJ',
                 exact_size=True,
                 common=True)
Ejemplo n.º 11
0
def align_images(r, tmp_dir):

    frame_path = [
        os.path.join(tmp_dir, get_fits_filename(r, b)) for b in BANDS
    ]
    registered_path = [
        os.path.join(tmp_dir, get_fits_filename(r, b, 'registered'))
        for b in BANDS
    ]

    header = os.path.join(tmp_dir, get_fits_filename(r, 'r', 'header', '.hdr'))

    mw.commands.mGetHdr(os.path.join(tmp_dir, get_fits_filename(r, 'r')),
                        header)
    mw.reproject(frame_path,
                 registered_path,
                 header=header,
                 exact_size=True,
                 silent_cleanup=True,
                 common=True)
def convolve_regrid(imagename, ref_imagename):
    # first need to convolve to match low freq resolution
    print(' Convolving image...')
    hdu1400 = fits.open(imagename)[0]
    hdu560 = fits.open(ref_imagename)[0]
    # degrees per pixel
    cdelt2_1400 = hdu1400.header['CDELT2']
    cdelt2_560 = hdu560.header['CDELT2']
    # how many pixels across the fwhm of 1400 MHz beam in 1400 MHz image:
    fwhm1400_pix_in1400 = hdu1400.header['BMAJ'] / cdelt2_1400  # = 2.48
    # how many pixels across the fwhm of 560 MHz beam in 1400 MHz image:
    fwhm560_pix_in1400 = hdu560.header['BMAJ'] / cdelt2_1400  # = 6.21
    # convert fwhm to sigma
    sigma1400_orig = fwhm1400_pix_in1400 / np.sqrt(8 * np.log(2))
    sigma1400_target560 = fwhm560_pix_in1400 / np.sqrt(8 * np.log(2))
    # calculate gaussian kernels (only need the 560 MHz one to convolve with)
    # By default, the Gaussian kernel will go to 8 sigma in each direction. Go much larger to get better sampling (odd number required).
    psf1400_orig = Gaussian2DKernel(sigma1400_orig,
                                    x_size=29,
                                    y_size=29,
                                    mode='oversample',
                                    factor=10)
    psf1400_target560 = Gaussian2DKernel(sigma1400_target560,
                                         x_size=29,
                                         y_size=29,
                                         mode='oversample',
                                         factor=10)

    # work out convolution kernel required to achieve 560 MHz psf in final image
    # 1400 MHz psf convolved with Y = 560 MHz psf
    # convolution in multiplication in frequency space, so:
    ft1400 = fft.fft2(psf1400_orig)
    ft560 = fft.fft2(psf1400_target560)
    ft_kernel = (ft560 / ft1400)
    kernel = fft.ifft2(ft_kernel)
    kernel = fft.fftshift(kernel)  # centre the kernel

    # convolve input beam with kernel to check output beam is correct, and make plot
    outbeam = convolve(
        psf1400_orig, kernel.real,
        boundary='extend')  # normalising kernel is on by default.
    f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20, 3))
    #im = plt.imshow()
    im1 = ax1.imshow(psf1400_orig)
    im2 = ax2.imshow(psf1400_target560)
    im3 = ax3.imshow(kernel.real)
    im4 = ax4.imshow(outbeam)
    plt.colorbar(im1, ax=ax1)
    plt.colorbar(im2, ax=ax2)
    plt.colorbar(im3, ax=ax3)
    plt.colorbar(im4, ax=ax4)
    plt.subplots_adjust(wspace=0.3)
    ax1.set_title('1400 MHz PSF')
    ax2.set_title('560 MHz PSF')
    ax3.set_title('Kernel')
    ax4.set_title('1400 convolved with kernel')
    plt.savefig('convolution-kernel-' + imagename[:-5] + '.png',
                bbox_inches='tight')

    # Convolve 1400 MHz image with new kernal to get 560 MHz resolution
    hdu1400_data_convolved = convolve(
        hdu1400.data, kernel.real,
        boundary='extend')  # normalising kernel is on by default.
    # update data and correct for Jy/beam scale change (proportional to change in beam area)
    hdu1400.data = hdu1400_data_convolved * ((hdu560.header['BMAJ']**2) /
                                             (hdu1400.header['BMAJ']**2))

    # save convolved image
    hdu1400.writeto(imagename[:-5] + '_convolved.fits', overwrite=True)
    # use montage to regrid image so they both have same pixel dimensions in prep for making cube
    print(' Regredding image...')
    # get header of 560 MHz image to match to
    montage.mGetHdr(ref_imagename, 'hdu560_tmp.hdr')
    # regrid 1400 MHz cropped image to 560 MHz image ref
    montage.reproject(in_images=imagename[:-5] + '_convolved.fits',
                      out_images=imagename[:-5] + '_convolved_regrid.fits',
                      header='hdu560_tmp.hdr',
                      exact_size=True)
    os.remove('hdu560_tmp.hdr')  # get rid of header text file saved to disk
Ejemplo n.º 13
0
def feather(lowres, highres, exportpsf, sdfactor, regrid):
    # Import high-res
    original = fits.open(lowres)
    hdu_highres = fits.open(highres)
    if regrid is False:
        nx1_highres = hdu_highres[0].header["NAXIS1"]
        nx2_highres = hdu_highres[0].header["NAXIS2"]
        nx1_original = original[0].header["NAXIS1"]
        nx2_original = original[0].header["NAXIS2"]
        if nx1_highres != nx1_original or nx2_highres != nx2_original:
            print("Images are not the same size, and --regrid was not selected")
        else:
            # Single -> Double, remove redundant axes
            hr = np.squeeze(np.squeeze(hdu_highres[0].data.astype(float)))
            # Replace NaNs with zeros
            replace = np.isnan(hr)
            hr[replace]=0.0

            try:
                bmaj_highres = hdu_highres[0].header["BMAJ"]
            except KeyError:
                print("No valid beam in header of {0}".format(highres))
                sys.exit(1)
            bmin_highres = hdu_highres[0].header["BMIN"]
            bpa_highres = hdu_highres[0].header["BPA"]

        # Montage copies ALL the fits keys across, including the beam values! So we need to replace those with the original values
        # Test whether the file has a beam
            try:
                bmaj_lowres = original[0].header["BMAJ"]
            except KeyError:
               print("No valid beam in header of {0}".format(lowres))
               sys.exit(1)

            # Regrid low-res
            if regrid:
                lowres_rg = lowres.replace(".fits", "_montaged.fits")
                if not os.path.exists(lowres_rg):
                    montage.mGetHdr(highres,"temp.txt")
                    montage.reproject(lowres,lowres_rg,header="temp.txt",exact_size=True)
                else:
                    print("Will not overwrite existing regridded image {0}".format(lowres_rg))
            else:
                print("Not regridding; expecting image co-ordinates to match exactly.")
                lowres_rg = lowres
        # TODO: add a test for image co-ordinate match rather than letting it get to the FT then fail

            hdu_lowres = fits.open(lowres_rg)
            newhdr = hdu_lowres[0].header
            for fitskey in ["BMAJ", "BMIN", "BPA"]:
                newhdr[fitskey] = original[0].header[fitskey]
        #        print fitskey, original[0].header[fitskey]
            try:
                naxis4 = newhdr["NAXIS4"]
            except:
                naxis4 = None
            if naxis4:
                newhdr["NAXIS"] = 4

            hdu_lowres.writeto(lowres_rg, overwrite = True)

            # Import regridded low-res
            hdu_lowres = fits.open(lowres_rg)
            # Single -> Double, remove redundant axes
            lr = np.squeeze(np.squeeze(hdu_lowres[0].data.astype(float)))
            # Replace NaNs with zeros
            replace = np.isnan(lr)
            lr[replace]=0.0

            bmaj_lowres = hdu_lowres[0].header["BMAJ"]
            bmin_lowres = hdu_lowres[0].header["BMIN"]
            bpa_lowres = hdu_lowres[0].header["BPA"]


        # TODO: enable diagnostic plots
        # if plots
        #    py.figure(1)
        #    py.clf()
        #    py.imshow(np.log10(highres))
        #    py.savefig("highres.png")

            hr_fft = fft(hr)
            lr_fft = fft(lr)

            # According to https://casa.nrao.edu/docs/taskref/feather-task.html
            # Scale the low-resolution image by the ratio of the volumes of the two clean beams
            # (high-res / low-res)

            ratio = (sdfactor*bmaj_highres*bmin_highres) / (bmaj_lowres*bmin_lowres)

            #Add to this, the uv-grid of the high-resolution image, scaled by  
            #                   (1-wt) where "wt" is the Fourier transform of the "clean beam"
            #                   defined in the low-resolution image.  

            # Make a model image of low-resolution psf
            xmax = hdu_lowres[0].header["NAXIS1"]
            ymax = hdu_lowres[0].header["NAXIS2"]
            try:
                pix2deg = hdu_lowres[0].header["CDELT2"]
            except KeyError:
                pix2deg = hdu_lowres[0].header["CD2_2"]

            x, y = np.meshgrid(np.linspace(0,xmax,xmax), np.linspace(0,ymax,ymax))

            sigmax = bmaj_lowres / (pix2deg * sig2fwhm)
            sigmay = bmin_lowres / (pix2deg * sig2fwhm)
            mux = xmax/2 + 0.5
            muy = ymax/2 + 0.5

            g = gaussian2d(x, y, mux, muy, sigmax, sigmay, np.deg2rad(bpa_lowres))
            g_fft = fft(g)

            if exportpsf:
                exportfits(g, hdu_highres[0].header, lowres.replace(".fits","_psf.fits"))
                exportfits(np.real(g_fft), hdu_highres[0].header, lowres.replace(".fits","_psf_fft_real.fits"))
                try:
                    exportfits(np.imag(g_fft), hdu_highres[0].header, lowres.replace(".fits","_psf_fft_imag.fits"))
                except:
        # I get some weird errors when I try to export an incredibly tiny imaginary part, but this works:
                    exportfits(np.zeros(g_fft.shape), hdu_highres[0].header, lowres.replace(".fits","_psf_fft_imag.fits"))

            # Add together
            comb_fft = ratio * lr_fft + (1 - (g_fft/np.nanmax(g_fft))) * hr_fft

            # Inverse FFT
            comb = ifft(comb_fft)
            exportfits(comb, hdu_highres[0].header, highres.replace(".fits","+")+lowres)
Ejemplo n.º 14
0
    out_image_hdr =  datadir + 'hdr/%s_cutout.hdr' % (iauname)
    out_nsa_diff_name = datadir + 'diff/%s.fits' % (iauname)
    out_nsa_diff_re_name = datadir + 'diffre/%s.fits' % (iauname)
    out_nsa_pimage_re_name = datadir + 'pimagere/%s_nsa_pimage_re.fits' % (iauname)

    log.info("Process %s", iauname)
    if not os.path.isfile(ivar_name): save_ivar(filter, run, camcol, field)

    log.info("Cutout field image: %s", out_image_name)
    montage.mSubimage(field_name, out_image_name, ra, dec, size)
    log.info("Making header file: %s", out_image_hdr)
    hdr = montage.mGetHdr(out_image_name, out_image_hdr)

    # reproject everything to cutout field image
    log.info("Cutout ivar image: %s", out_ivar_name)
    montage.reproject(ivar_name, out_ivar_name, header=out_image_hdr, exact_size=True,
                        silent_cleanup=montage_silent)
    
    log.info("Save parent - child: %s", out_nsa_diff_name)
    hdu_child = fits.open(nsa_image_name)
    hdu_parent = fits.open(nsa_parent_name)
    fits.writeto(out_nsa_diff_name, data=hdu_parent[child_ext*2].data - hdu_child[child_ext].data,
                    header=hdu_child[2].header, clobber=True)        
    log.info("Reprojecting diff: %s", out_nsa_diff_re_name)
    montage.reproject(out_nsa_diff_name, out_nsa_diff_re_name, header=out_image_hdr, exact_size=True,
                        silent_cleanup=montage_silent)


    log.info("WCS reproject pimage: %s", out_nsa_pimage_re_name)
    img = fits.open(out_image_name, mode='update')
    wimg = wcs.WCS(img[0].header)
    imnaxis1, imnaxis2 = wimg.naxis1, wimg.naxis2
Ejemplo n.º 15
0
def reproject_snrs(snrs, clobber):
    padding = 4.0
    #    colors = ["072-080MHz", "080-088MHz", "088-095MHz", "095-103MHz", "103-111MHz", "111-118MHz", "118-126MHz", "126-134MHz", "139-147MHz", "147-154MHz", "154-162MHz", "162-170MHz", "170-177MHz", "177-185MHz", "185-193MHz", "193-200MHz", "200-208MHz", "208-216MHz", "216-223MHz", "223-231MHz"]
    colors = [
        "white", "red", "green", "blue", "072-080MHz", "080-088MHz",
        "088-095MHz", "095-103MHz", "103-111MHz", "111-118MHz", "118-126MHz",
        "126-134MHz", "139-147MHz", "147-154MHz", "154-162MHz", "162-170MHz",
        "170-177MHz", "177-185MHz", "185-193MHz", "193-200MHz", "200-208MHz",
        "208-216MHz", "216-223MHz", "223-231MHz"
    ]
    fitsdir = "/home/tash/data/MWA/GLEAM/GP/"

    for color in colors:
        print "Reprojecting " + color + " image"
        if not os.path.exists(color):
            os.makedirs(color)
        if color != "white":
            if not os.path.exists(color + "/rpj"):
                os.makedirs(color + "/rpj")

#        fitsfile = fitsdir+color+"_MOL.fits"
#
#        hdu = fits.open(fitsfile)
#        w = wcs.WCS(hdu[0].header)
#        try:
#            pix2deg = hdu[0].header["CDELT2"]
#        except KeyError:
#            pix2deg = hdu[0].header["CD2_2"]

# Using the astropy cut-out method

        for snr in snrs:
            print "Reprojecting " + snr.name
            # No point doing the ones not in my search region
            l = snr.loc.galactic.l.value
            if (((l > 180) and (l < 240)) or (l > 300) or (l < 60)):
                name = snr.name + ".fits"
                if clobber or not os.path.exists(color + "/" +
                                                 name) or not os.path.exists(
                                                     color + "/rpj/" + name):
                    # Week4 for GC SNR; Week2 for anticentre SNR
                    if ((l > 180) and (l < 240)):
                        psf = fits.open(fitsdir + "Week2/Week2_" + color +
                                        "_lownoise_comp_psf.fits")
                        hdu = fits.open(fitsdir + "Week2/Week2_" + color +
                                        "_lownoise_ddmod_rescaled.fits")
# HACK
#                        orig_hdu = fits.open(fitsdir+"Week2_"+color+"_lownoise_ddmod_rescaled.fits")
#                        hdu = fits.open("/home/tash/data/MWA/GLEAM/allmosaics/SNR_G189.6+3.3/"+color+"/"+snr.name+".fits")
                    else:
                        psf = fits.open(fitsdir + "Week4/Week4_" + color +
                                        "_lownoise_comp_psf.fits")
                        hdu = fits.open(fitsdir + "Week4/Week4_" + color +
                                        "_lownoise_ddmod_rescaled.fits")
                    try:
                        pix2deg = hdu[0].header["CDELT2"]
                    except KeyError:
                        pix2deg = hdu[0].header["CD2_2"]
                    w = wcs.WCS(hdu[0].header)
                    # Set a minimum cutout size; otherwise I can't properly measure the remnant against the background
                    if snr.min * 60 > 20:
                        framesize = u.Quantity(2 * padding * snr.maj, u.deg)
                    else:
                        framesize = u.Quantity(1, u.deg)
# Set a maximum cutout size to avoid getting weird results for some snr
                    if framesize.value > 4.0:
                        framesize = u.Quantity(4, u.deg)

                    print framesize
                    cutout = Cutout2D(hdu[0].data,
                                      snr.loc.fk5,
                                      framesize,
                                      wcs=w)
                    # Read these from the correct PSF image and then put them in the cutout
                    wpsf = wcs.WCS(psf[0].header)
                    cp = np.squeeze(
                        wpsf.wcs_world2pix(
                            [[snr.loc.fk5.ra.value, snr.loc.fk5.dec.value, 1]],
                            0))
                    xp, yp = int(cp[0]), int(cp[1])
                    bmaj = psf[0].data[0, yp, xp]
                    bmin = psf[0].data[1, yp, xp]
                    bpa = psf[0].data[2, yp, xp]
                    #    beamvolume = (1.1331 * bmaj * bmin) # gaussian beam conversion
                    header_new = cutout.wcs.to_header()
                    # Edit the header so that the CD values are copied from the PC values -- makes it DS9-readable
                    header_new["CD1_1"] = header_new["PC1_1"]
                    header_new["CD2_2"] = header_new["PC2_2"]
                    header_new["BMAJ"] = bmaj
                    header_new["BMIN"] = bmin
                    header_new["BPA"] = bpa
                    #HACK
                    header_new["FREQ"] = hdu[0].header["FREQ"]
                    #                    header_new["FREQ"] = orig_hdu[0].header["FREQ"]
                    new = fits.PrimaryHDU(cutout.data,
                                          header=header_new)  #create new hdu
                    newlist = fits.HDUList([new])  #create new hdulist
                    try:
                        newlist.writeto(color + "/" + name, overwrite=True)
# For some reason I get:
# NameError: global name 'VerifyError' is not defined
                    except:
                        print "Invalid fits keys for {0} at {1}".format(
                            snr.name, color)

                    print snr.name, "here"
                    # Reproject the other colours
                    if color != "white":
                        montage.mGetHdr("white/" + name, "temp.txt")
                        oldfile = color + "/" + name
                        newfile = color + "/rpj/" + name
                        try:
                            montage.reproject(oldfile,
                                              newfile,
                                              header="temp.txt",
                                              exact_size=True)
                            oldhdr = fits.open(oldfile)[0].header
                            newhdu = fits.open(newfile)
                            newhdr = newhdu[0].header
                            # This copies ALL the fits keys across, including the beam values! So we need to replace those with the original values
                            for fitskey in ["BMAJ", "BMIN", "BPA", "FREQ"]:
                                newhdr[fitskey] = oldhdr[fitskey]
                            newhdu.writeto(newfile, overwrite=True)


# For some reason I get:
# NameError: global name 'MontageError' is not defined
#                        except MontageError:
                        except:
                            print "Montage reprojection failed for {0} at {1}".format(
                                snr.name, color)
Ejemplo n.º 16
0
            hdr = hdr_in + p[0] + '_PSW_' + l + '.txt'
            outFile = BC_out + filename.strip('_B_C_1.fits') + (
                '_rep_BC_1.fits')
            AFile = A_in + p[0] + '_test_NH2.fits'
            A_rep = A_out + p[0] + '_rep_1.fits'
        elif filename.endswith('2.fits'):
            hdr = hdr_in + p[0] + '_PSW_' + l + '_2.txt'
            outFile = BC_out + filename.strip('_B_C_2.fits') + (
                '_rep_BC_2.fits')
            AFile = A_in + p[0] + '_2_test_NH2.fits'
            A_rep = A_out + p[0] + '_rep_2.fits'
        else:
            print '!!!!!!!!!!!!!!!!!!!!!!!!!'
            print filename
            print 'AAAAA PANIC'
            print '!!!!!!!!!!!!!!!!!!!!!!!!!'

    #	print filename,'\n',hdr
    #	print AFile,'\n',A_rep
    #	print 10*'*'
        montage.reproject(inFile, outFile, header=hdr, exact_size=True)
        montage.reproject(AFile, A_rep, header=hdr, exact_size=True)

for filename in os.listdir(BC_out):
    if filename.endswith('.fits'):
        check(BC_out, filename)

for filename in os.listdir(A_out):
    if filename.endswith('.fits'):
        check(A_out, filename)
Ejemplo n.º 17
0
def make_rgb_cube(files, output, north=False, system=None, equinox=None):
    '''
    Make an RGB data cube from a list of three FITS images.

    This method can read in three FITS files with different
    projections/sizes/resolutions and uses Montage to reproject
    them all to the same projection.

    Two files are produced by this function. The first is a three-dimensional
    FITS cube with a filename give by `output`, where the third dimension
    contains the different channels. The second is a two-dimensional FITS
    image with a filename given by `output` with a `_2d` suffix. This file
    contains the mean of the different channels, and is required as input to
    FITSFigure if show_rgb is subsequently used to show a color image
    generated from the FITS cube (to provide the correct WCS information to
    FITSFigure).

    Parameters
    ----------

    files : tuple or list
       A list of the filenames of three FITS filename to reproject.
       The order is red, green, blue.

    output : str
       The filename of the output RGB FITS cube.

    north : bool, optional
       By default, the FITS header generated by Montage represents the
       best fit to the images, often resulting in a slight rotation. If
       you want north to be straight up in your final mosaic, you should
       use this option.

    system : str, optional
       Specifies the system for the header (default is EQUJ).
       Possible values are: EQUJ EQUB ECLJ ECLB GAL SGAL

    equinox : str, optional
       If a coordinate system is specified, the equinox can also be given
       in the form YYYY. Default is J2000.
    '''

    # Check whether the Python montage module is installed. The Python module
    # checks itself whether the Montage command-line tools are available, and
    # if they are not then importing the Python module will fail.
    try:
        import montage_wrapper as montage
    except ImportError:
        raise Exception("Both the Montage command-line tools and the"
                        " montage-wrapper Python module are required"
                        " for this function")

    # Check that input files exist
    for f in files:
        if not os.path.exists(f):
            raise Exception("File does not exist : " + f)

    # Create work directory
    work_dir = tempfile.mkdtemp()

    raw_dir = '%s/raw' % work_dir
    final_dir = '%s/final' % work_dir

    images_raw_tbl = '%s/images_raw.tbl' % work_dir
    header_hdr = '%s/header.hdr' % work_dir

    # Create raw and final directory in work directory
    os.mkdir(raw_dir)
    os.mkdir(final_dir)

    # Create symbolic links to input files
    for i, f in enumerate(files):
        os.symlink(os.path.abspath(f), '%s/image_%i.fits' % (raw_dir, i))

    # List files and create optimal header
    montage.mImgtbl(raw_dir, images_raw_tbl, corners=True)
    montage.mMakeHdr(images_raw_tbl,
                     header_hdr,
                     north_aligned=north,
                     system=system,
                     equinox=equinox)

    # Read header in with astropy.io.fits
    header = fits.Header.fromtextfile(header_hdr)

    # Find image dimensions
    nx = int(header['NAXIS1'])
    ny = int(header['NAXIS2'])

    # Generate emtpy datacube
    image_cube = np.zeros((len(files), ny, nx), dtype=np.float32)

    # Loop through files
    for i in range(len(files)):

        # Reproject channel to optimal header
        montage.reproject('%s/image_%i.fits' % (raw_dir, i),
                          '%s/image_%i.fits' % (final_dir, i),
                          header=header_hdr,
                          exact_size=True,
                          bitpix=-32)

        # Read in and add to datacube
        image_cube[i, :, :] = fits.getdata('%s/image_%i.fits' % (final_dir, i))

    # Write out final cube
    fits.writeto(output, image_cube, header, clobber=True)

    # Write out collapsed version of cube
    fits.writeto(output.replace('.fits', '_2d.fits'), \
                   np.mean(image_cube, axis=0), header, clobber=True)

    # Remove work directory
    shutil.rmtree(work_dir)
Ejemplo n.º 18
0
from astropy.io import fits
from os import listdir
import reproject 
import montage_wrapper as montage
names_i = listdir('./data_corr/i')
names_v = listdir('./data_corr/v')

base_i = names_i[0]
base_v = names_v[0]

ins_i = ['./data_corr/i/'+i for i in names_i]
ins_v = ['./data_corr/v/'+i for i in names_v]

outs_i = ['./data_align/i/'+i for i in names_i]
outs_v = ['./data_align/v/'+i for i in names_v]

montage.reproject(ins_i,outs_i,header=0)
Ejemplo n.º 19
0
def register_reproject(inputImages, outputImages, refImage, processDir, headerName="header.hdr"):
	cmds.mGetHdr(refImage,headerName)
	mw.reproject(inputImages,outputImages,header=headerName,north_aligned=True,system='EQUJ',exact_size=True,common=True,silent_cleanup=True)
Ejemplo n.º 20
0
def rep():
    for filename in os.listdir(dir_in):
        #	for filename in os.listdir(hdrFiles):
        if filename.endswith('.fits'):
            #		if filename.endswith('.txt'):

            if filename.endswith('NH2.fits'):
                continue  #You already did these
            if filename.endswith('BETA.fits'):  #Not necessary
                continue

            inFile = dir_in + filename
            p = filename.split('_')
            if p[1] == 'rep':
                continue
            l = 'L2.0'
            for i in range(0, len(L25list)):
                now = L25list[i]
                if p[0] == now:
                    l = 'L2.5'
                    break

            field = p[0]
            if p[1] == 'test':
                num = '1'
                typ = p[2]
                hdr = hdrFiles + field + '_PMW_' + l + '.txt'
                if typ == 'tau.fits':
                    end = 't' + typ.strip('.fits') + '_rep_' + num + '.fits'
                else:
                    end = typ.strip('.fits') + '_rep_' + num + '.fits'
            else:
                num = p[1]
                hdr = hdrFiles + p[0] + '_PMW_' + l + '_' + num + '.txt'
                typ = p[3]
                if typ == 'tau.fits':
                    end = 't' + typ.strip('.fits') + '_rep_' + num + '.fits'
                else:
                    end = typ.strip('.fits') + '_rep_' + num + '.fits'

            outPut = conv_out + field + '_' + end
            outNAME = field + '_' + end

            print filename, '\n', outNAME, '\n', hdr
            #	print p[0],'	',l

            #	if filename.endswith('_2_test_NH2.fits'):
            #		hdr = hdrFiles+p[0]+'_PMW_'+l+'_2.txt'
            #		outPut = conv_out+filename.strip('_2_test_NH2.fits')+'_rep_2.fits'
            #	elif filename.endswith('_3_test_NH2.fits'):
            #		hdr = hdrFiles+p[0]+'_PMW_'+l+'_3.txt'
            #		outPut = conv_out+filename.strip('_3_test_NH2.fits.fits')+'_rep_3.fits'
            #	elif filename.endswith('NH2.fits'):
            #		hdr = hdrFiles+p[0]+'_PMW_'+l+'.txt'
            #		outPut = conv_out+filename.strip('_test_NH2.fits')+'_rep_1.fits'
            #	else:
            #		print filename
            #		print 'AAAA EVERYBODY PANIC THERE IS AN ERROR IN THE CODE'
            #	print filename, '	',hdr
            print 5 * '*'
            montage.reproject(inFile, outPut, header=hdr, exact_size=True)
            print 10 * '*'
Ejemplo n.º 21
0
        G[0].data[-3:, :] = 0.0
        G[0].data[:, 0:3] = 0.0
        G[0].data[:, -3:] = 0.0
        if (1):  # rotate the input image
            rot = 30 * DEGREE_TO_RADIAN
            G[0].header['CD1_1'] = G[0].header['CDELT1'] * cos(rot)
            G[0].header['CD1_2'] = -G[0].header['CDELT2'] * sin(rot)
            G[0].header['CD2_1'] = G[0].header['CDELT1'] * sin(rot)
            G[0].header['CD2_2'] = G[0].header['CDELT2'] * cos(rot)
        G.writeto('g.fits', overwrite=True)

        # Run montage.reproject
        A[0].header.totextfile('ref.header', overwrite=True)
        montage.reproject('g.fits',
                          'A.fits',
                          'ref.header',
                          exact_size=True,
                          factor=factor)
        A = pyfits.open('A.fits')

        # Run the OpenCL routine
        Reproject(G, B, GPU=GPU, cstep=cstep, shrink=1.0)

        # ignore the borders .... note that pixel values may be very small for the last pixels
        #  on the edges of the area covered by the input data
        MASK = ones((N, M), float32)
        MASK[nonzero((A[0].data < 0.5)
                     | (~isfinite(A[0].data)))] = 0.0  # all map data >=1.0
        MASK[0:3, :] = 0.0
        MASK[-3:, :] = 0.0
        MASK[:, 0:3] = 0.0
Ejemplo n.º 22
0
from astropy.io import fits
from os import listdir
import reproject
import montage_wrapper as montage
names_i = listdir('./data_corr/i')
names_v = listdir('./data_corr/v')

base_i = names_i[0]
base_v = names_v[0]

ins_i = ['./data_corr/i/' + i for i in names_i]
ins_v = ['./data_corr/v/' + i for i in names_v]

outs_i = ['./data_align/i/' + i for i in names_i]
outs_v = ['./data_align/v/' + i for i in names_v]

montage.reproject(ins_i, outs_i, header=0)
Ejemplo n.º 23
0
def freproj2D_EQ_GAL(filedir_in,filedir_out):

    '''
    Reprojects a two-dimensional FITS image from equatorial to Galactic coordinates using Montage.

    Input
    filedir_in   : input file in equatorial coordinates
    filedir_out  : output file in Galactic coordinates

    Output
    saves the reprojected FITS image to the input path filedir_out
    '''

    # extract data and headers
    data_EQ,header_EQ                   = fits.getdata(filedir_in,header=True)
    w_EQ                                = wcs.WCS(fits.open(filedir_in)[0].header)
    header_EQ_NAXIS1,header_EQ_NAXIS2   = header_EQ["NAXIS1"],header_EQ["NAXIS2"]

    # change WCS from equatorial to Galactic
    header_GAL_CTYPE1,header_GAL_CTYPE2 = ("GLON-TAN","GLAT-TAN")
    header_GAL_CUNIT1,header_GAL_CUNIT2 = ("deg","deg")
    header_GAL_CROTA1,header_GAL_CROTA2 = (0,0)

    ############################## make Galactic footprint larger ##############################
    #header_GAL_NAXIS1,header_GAL_NAXIS2 = (6000,6000)                                       # N1
    #header_GAL_NAXIS1,header_GAL_NAXIS2 = (3000,6500)                                       # N2
    #header_GAL_NAXIS1,header_GAL_NAXIS2 = (4000,7500)                                       # N3
    #header_GAL_NAXIS1,header_GAL_NAXIS2 = (6000,5500)                                       # N4
    #header_GAL_NAXIS1,header_GAL_NAXIS2 = (6000,6500)                                       # S1
    #header_GAL_NAXIS1,header_GAL_NAXIS2 = (8000,4000)                                       # S2
    #header_GAL_NAXIS1,header_GAL_NAXIS2 = (6000,7000)                                       # S3
    #header_GAL_NAXIS1,header_GAL_NAXIS2 = (8000,4000)                                       # S4
    ############################################################################################

    header_GAL_CRPIX1,header_GAL_CRPIX2 = header_GAL_NAXIS1/2.,header_GAL_NAXIS2/2.
    crpix1_GAL,crpix2_GAL               = (header_GAL_NAXIS1*0.5,header_GAL_NAXIS2*0.5)

    crpix1_EQ,crpix2_EQ                 = header_EQ_NAXIS1/2.,header_EQ_NAXIS2/2.
    crpix1_crpix2_radec                 = w_EQ.all_pix2world(crpix1_EQ,crpix2_EQ,0)
    crpix1_ra,crpix2_dec                = np.float(crpix1_crpix2_radec[0]),np.float(crpix1_crpix2_radec[1])

    # transform center pixel values from (ra,dec) to (l,b)
    coords_EQ                           = SkyCoord(ra=crpix1_ra*u.degree, dec=crpix2_dec*u.degree, frame="fk5")
    header_GAL_CRVAL1,header_GAL_CRVAL2 = (coords_EQ.galactic.l.deg,coords_EQ.galactic.b.deg)

    header_GAL_CDELT1     = header_EQ["CDELT1"]
    header_GAL_CDELT2     = header_EQ["CDELT2"]

    # write GAL header
    data_GAL              = np.zeros(shape=(header_GAL_NAXIS2,header_GAL_NAXIS1))
    header_GAL            = fits.PrimaryHDU(data=data_GAL).header
    header_GAL["NAXIS"]   = 2
    header_GAL["NAXIS1"]  = header_GAL_NAXIS1
    header_GAL["NAXIS2"]  = header_GAL_NAXIS2
    # NAXIS1
    header_GAL["CTYPE1"]  = header_GAL_CTYPE1
    header_GAL["CRPIX1"]  = header_GAL_CRPIX1
    header_GAL["CRVAL1"]  = header_GAL_CRVAL1
    header_GAL["CDELT1"]  = header_GAL_CDELT1
    header_GAL["CROTA1"]  = header_GAL_CROTA1
    # NAXIS2
    header_GAL["CTYPE2"]  = header_GAL_CTYPE2
    header_GAL["CRPIX2"]  = header_GAL_CRPIX2
    header_GAL["CRVAL2"]  = header_GAL_CRVAL2
    header_GAL["CDELT2"]  = header_GAL_CDELT2
    header_GAL["CROTA2"]  = header_GAL_CROTA2
    # other
    header_GAL["EQUINOX"] = 2000.
    header_GAL["CUNIT1"]  = header_GAL_CUNIT1
    header_GAL["CUNIT2"]  = header_GAL_CUNIT2

    # perform reprojection with Montage
    header_file  = "/Users/campbell/Documents/PhD/data/GALFACTS/N1/GAL/header_GAL.fits"
    mheader_file = "/Users/campbell/Documents/PhD/data/GALFACTS/N1/GAL/mheader_GAL.txt"
    fits.writeto(header_file,data_GAL,header_GAL,overwrite=True)
    montage.mGetHdr(header_file,mheader_file)
    os.remove(header_file)
    montage.reproject(filedir_in,filedir_out,header=mheader_file)
Ejemplo n.º 24
0
def Run(ra,
        dec,
        width,
        name=None,
        out_dir=None,
        temp_dir=None,
        replace=False,
        flux=True,
        thumbnails=False,
        gzip=True,
        montage_path=None,
        swarp_path=None):
    """
    Function to generate standardised cutouts of Herschel observations.

    Arguments
        ra: {float, sequence of float}
                A sequence of right ascension values, in decimal degrees, of the targets to be processed. Alternatively,
                if you're only interested in one target, a single RA value can be given here.
        dec: {float, sequence of float}
                A sequence of declination values, in decimal degrees, of the targets to be processed. Alternatively, if
                you're only interested in one target, a single Dec value can be given here.
        width: {float, sequence of float}
                A sequence giving the desired width of the cutout square for each target, in decimal degrees.
                Alternatively, if you're only interested in one target, a single width value can be given here.

    Keyword arguments
        name: {str, sequence of str}, optional
                A sequence giving the name of each target; if you're only interested in one target, a
                single name can be given here. If not provided, a name is constructed automatrically from the target
                coordinates, according to the IAU catalogue convention.
        out_dir: str, optional
                A string giving the path to the directory where the output FITS files will be placed. If not provided,
                files will simply be written to the current working directory.
        temp_dir: str, optional
                A string giving the path to be used as a temporary working directory by Herschel_Button. If not provided,
                a temporary directory will be created inside the output directory.
        replace: bool, optional
                If False, Herschel_Button will search the output directory for any pre-existing output FITS files from
                previous runs of the function, and will not bother repeat creating these maps (making it easy to resume
                processing a large number of targets from an interruption. If True, Herschel_Button will produce maps for
                all input targets, regardless of whether maps for these targets already exist in the output directory.
        flux: bool, optional
                If True, output maps will be in flux density units of Jy/pix. If false, output maps will be in surface
                brightness units of MJy/sr.
        thumbnails: bool, optional
                If True, JPG thumbnail images of the generated maps will also be proced and placed in out_dir.
        montage_path: str, optional
                Path to directory that contains the Montage commands (mProject, etc); useful if this directory is not in $PATH
        swarp_path: str: optional
                Path to directory that contains the SWarp command; useful if this directory is not in $PATH
    """

    # Handle Montage and SWarp paths, if kwargs provided
    if montage_path != None:
        os.environ['PATH'] += ':' + montage_path
    if swarp_path != None:
        os.environ['PATH'] += ':' + swarp_path
    import montage_wrapper

    # Make sure input values are in list format, and sort out variable names for rest of function
    if not hasattr(ra, '__iter__'):
        ra = [ra]
    ra_list = np.array(ra)
    del (ra)
    if not hasattr(dec, '__iter__'):
        dec = [dec]
    dec_list = np.array(dec)
    del (dec)

    # Check that ra and declists all have same lengths
    if np.std([float(len(ra_list)), float(len(dec_list))]) > 0:
        raise Exception(
            'Input sequences of ra and dec all need to be the same length')

    # If single width provided, but multiple coordinates, create width array of same value repeated required number of times
    if not hasattr(width, '__iter__'):
        if len(ra_list) > 1:
            width_list = [width] * len(ra_list)

        # Else, if only one RA and one width given, stick width value into list, too
        elif len(ra_list) == 1:
            width_list = [width]
    width_list = np.array(width_list)
    del (width)

    # If no names provided, use coordinates to generate standardised names as per IAU catalogue convention
    if not hasattr(name, '__iter__'):
        if (name == None):
            name = []
            for i in range(len(ra_list)):
                coord = astropy.coordinates.SkyCoord(
                    str(ra_list[i]) + 'd ' + str(dec_list[i]) + 'd')
                name_coord = re.sub('[hmsdms. ]', ' ',
                                    coord.to_string('hmsdms'))
                name_coord = name_coord.split(' ')
                name_coord[3] = name_coord[3][:min(2, len(name_coord[3]))]
                name_coord[8] = name_coord[8][:min(2, len(name_coord[8]))]
                name_coord = 'J' + ''.join(name_coord)
                name.append(
                    re.sub('[hmsdms. ]', ' ', coord.to_string('hmsdms')))

        # If only one name provided, stick it into an array
        name_list = np.array([name])

    # If a sequence of names is provided, make sure it's in array format (and stop single names becoming zero-dim array)
    else:
        name_list = np.array(copy.deepcopy(name))
        if name_list.shape == ():
            name_list = np.array([name_list.tolist()])
    del (name)

    # Do final check that all input sequences are the right length
    if np.std([
            float(ra_list.size),
            float(dec_list.size),
            float(width_list.size),
            float(name_list.size)
    ]) > 0:
        raise Exception(
            'Input sequences of ra, dec, with, and name all need to be the same length'
        )

    # If no outout directory specified, set to current working directory
    if out_dir == None:
        out_dir = os.getcwd()

    # Check that output directory exists
    if not os.path.exists(out_dir):
        raise Exception('Specified output directory does not exist')

    # Create temporary directory
    if temp_dir == None:
        temp_dir = os.path.join(out_dir, 'Temp')

    # Check that temp directory exists, if it does, warn user that contents may be overwritten
    if os.path.exists(temp_dir):
        print(
            'Specificed temporary directory already exists; note that any existing contents may be overwritten'
        )

    # Else, if temp directory doesn't already exist, create it
    else:
        os.mkdir(temp_dir)

    # State band information
    bands_dict = {
        '70': {
            'band': '70',
            'instrument': 'PACS',
            'wavelength': '70um',
            'filter': 'PHOTBLUE',
            'pix_size': 2,
            'hdr_inst_card_kwrd': 'CAMERA',
            'hdr_inst_card_entry': 'PHOTBLUE',
            'hdr_blueband_kwrd': 'blue1',
            'hdr_err_ext_name': 'stDev'
        },
        '100': {
            'band': '100',
            'instrument': 'PACS',
            'wavelength': '100um',
            'filter': 'PHOTGREEN',
            'pix_size': 3,
            'hdr_inst_card_kwrd': 'CAMERA',
            'hdr_inst_card_entry': 'PHOTBLUE',
            'hdr_blueband_kwrd': 'blue2',
            'hdr_err_ext_name': 'stDev'
        },
        '160': {
            'band': '160',
            'instrument': 'PACS',
            'wavelength': '160um',
            'filter': 'PHOTRED',
            'pix_size': 4,
            'hdr_inst_card_kwrd': 'CAMERA',
            'hdr_inst_card_entry': 'PHOTRED',
            'hdr_blueband_kwrd': False,
            'hdr_err_ext_name': 'stDev'
        },
        '250': {
            'band': '250',
            'instrument': 'SPIRE',
            'wavelength': '250um',
            'filter': 'PSW',
            'pix_size': 6,
            'hdr_inst_card_kwrd': 'DETECTOR',
            'hdr_inst_card_entry': 'PSW',
            'hdr_blueband_kwrd': False,
            'hdr_err_ext_name': 'error'
        },
        '350': {
            'band': '350',
            'instrument': 'SPIRE',
            'wavelength': '350um',
            'filter': 'PMW',
            'pix_size': 8,
            'hdr_inst_card_kwrd': 'DETECTOR',
            'hdr_inst_card_entry': 'PMW',
            'hdr_blueband_kwrd': False,
            'hdr_err_ext_name': 'error'
        },
        '500': {
            'band': '500',
            'instrument': 'SPIRE',
            'wavelength': '500um',
            'filter': 'PLW',
            'pix_size': 12,
            'hdr_inst_card_kwrd': 'DETECTOR',
            'hdr_inst_card_entry': 'PLW',
            'hdr_blueband_kwrd': False,
            'hdr_err_ext_name': 'error'
        }
    }

    # State map mode prefixes we care about
    req_obs_modes = [
        'SpirePhotoLargeScan', 'SpirePhotoSmallScan', 'PacsPhoto',
        'SpirePacsParallel'
    ]

    # Record time taken
    time_list = [time.time()]

    # Loop over each target
    for i in np.random.permutation(range(name_list.shape[0])):
        name = name_list[i].replace(' ', '_')
        ra = ra_list[i]
        dec = dec_list[i]
        width = width_list[i]

        # If we're not repeating already-processed targets, check if this target has already been completed
        if not replace:
            bands_done = 0
            for band in bands_dict.keys():
                if os.path.exists(
                        os.path.join(
                            out_dir, name + '_Herschel_' +
                            bands_dict[band]['wavelength'] + '.fits.gz')):
                    bands_done += 1

                # Also check for null files, indicated data not available for a givne band
                elif os.path.exists(
                        os.path.join(
                            out_dir, '.' + name + '_Herschel_' +
                            bands_dict[band]['wavelength'] + '.null')):
                    bands_done += 1

            # If this source has already been processed in all bands, skip it
            if bands_done == len(bands_dict.keys()):
                print(
                    'Herschel data for ' + name +
                    ' already processed (if available); continuing to next target'
                )
                time_list.append(time.time())
                continue
        print('Processing Herschel data for target ' + name)

        # Create field processing dirctories (deleting any prior)
        gal_dir = os.path.join(temp_dir, str(name)) + '/'
        if os.path.exists(gal_dir):
            ChrisFuncs.RemoveCrawl(gal_dir)
        if not os.path.exists(os.path.join(gal_dir, 'Raw')):
            os.makedirs(os.path.join(gal_dir, 'Raw'))
        os.chdir(os.path.join(gal_dir, 'Raw'))

        # Create band-specific directories
        for band in bands_dict.keys():
            if not os.path.exists(os.path.join(gal_dir, 'Raw', band)):
                os.makedirs(os.path.join(gal_dir, 'Raw', band))

        # Perform query, with error handling
        print('Querying HSA')
        query_success = False
        query_fail_count = 0
        while query_success == False:
            if query_fail_count >= 10:
                raise Exception(
                    'HSA query failing consistently; maybe HSA is down, or something else has gone wrong'
                )
            try:
                query_url = 'http://archives.esac.esa.int/hsa/aio/jsp/siap.jsp?POS=' + str(
                    ra) + ',' + str(dec) + '&SIZE=' + str(
                        width) + '&INTERSECT=OVERLAPS'
                query_filename = os.path.join(temp_dir, name,
                                              str(name) + '.vot')
                if os.path.exists(query_filename):
                    os.remove(query_filename)
                urllib.request.urlretrieve(query_url, query_filename)
                query_success = True
            except:
                print('HSA query failed; reattempting')
                query_fail_count += 1
                time.sleep(60)
        if not os.path.exists(query_filename):
            query_success = False

        # Read query result VOTable
        query_output = astropy.io.votable.parse_single_table(query_filename)
        query_table = query_output.array

        # Check if query returned any results; if not, create null file, and continue to next target
        if len(query_table) == 0:
            print('No Herschel coverage for ' + name +
                  '; continuing to next target')
            os.system('touch ' +
                      os.path.join(temp_dir, '.' + name + '_Herschel_' + band +
                                   '.null'))
            continue

        # Record which urls correspond to data in the desired modes (dealing with awkwardness for if there is only 1 entry, or silly massive files)
        hsa_urls = []
        if query_table.size == 1:
            if query_table['OBS_MODE'] in req_obs_modes:
                hsa_urls.append(query_table['DATA_ACCESS'])
        else:
            for j in range(0, query_table.size):
                if query_table['OBS_MODE'][j].decode('utf-8') in req_obs_modes:
                    hsa_urls.append(
                        query_table['DATA_LINK'][j].decode('utf-8'))

        # In parallel, download and extract files
        os.chdir(os.path.join(gal_dir, 'Raw'))
        dl_pool = mp.Pool(processes=20)
        for j in range(0, len(hsa_urls)):
            data_url = hsa_urls[j]
            data_filename = os.path.join(gal_dir, 'Raw',
                                         name + '_' + str(j) + '_HSA.fits')
            #dl_pool.apply_async( Herschel_Download, args=(data_url, data_filename,) )
            Herschel_Download(data_url, data_filename)
        dl_pool.close()
        dl_pool.join()

        # Loop over bands, and downloaded files (skipping folders), for sorting files into separate folders
        for band in bands_dict.keys():
            prev_hdr_filenames = []
            for listfile in os.listdir(os.path.join(gal_dir, 'Raw')):
                if '.tmp' in listfile:
                    os.remove(os.path.join(gal_dir, 'Raw', listfile))
                    continue
                if '.fits' not in listfile:
                    continue

                # Determine what band this is
                try:
                    list_hdr = astropy.io.fits.getheader(os.path.join(
                        gal_dir, 'Raw', listfile),
                                                         ext=0)
                except:
                    pdb.set_trace()
                if list_hdr['INSTRUME'] == bands_dict[band]['instrument']:
                    if list_hdr[bands_dict[band]
                                ['hdr_inst_card_kwrd']] == bands_dict[band][
                                    'hdr_inst_card_entry']:

                        # Handle the fact that 70um and 100um are hard to tell apart in headers
                        if bands_dict[band]['hdr_blueband_kwrd'] != False:
                            if bands_dict[band][
                                    'hdr_blueband_kwrd'] not in list_hdr[
                                        'BLUEBAND']:
                                continue

                        # Skip dud PACS calibration(?) maps
                        if list_hdr['OBSERVER'][-4:].lower() == 'pacs':
                            os.remove(os.path.join(gal_dir, 'Raw', listfile))
                            continue

                        # Check that we havne't already grabbed a duplicate of this map; if not, move it to band-specific directory
                        if 'FILENAME' in list_hdr.keys():
                            if list_hdr['FILENAME'] in prev_hdr_filenames:
                                os.remove(
                                    os.path.join(gal_dir, 'Raw', listfile))
                                continue
                            else:
                                prev_hdr_filenames.append(list_hdr['FILENAME'])
                        shutil.copy2(os.path.join(gal_dir, 'Raw', listfile),
                                     os.path.join(gal_dir, 'Raw', band))
                        os.remove(os.path.join(gal_dir, 'Raw', listfile))

        # Loop over PACS bands and files to delete dud PACS calibration(?) maps
        for band in bands_dict.keys():
            if bands_dict[band]['instrument'] == 'PACS':
                for listfile in os.listdir(os.path.join(gal_dir, 'Raw', band)):
                    if astropy.io.fits.getheader(
                            os.path.join(gal_dir, 'Raw', band, listfile),
                            ext=0)['OBSERVER'][-4:].lower() == 'pacs':
                        os.remove(os.path.join(gal_dir, 'Raw', band, listfile))

        # Loop over each band's files, to save image map to separate FITS files
        for band in bands_dict.keys():
            for listfile in os.listdir(os.path.join(gal_dir, 'Raw', band)):
                print('Extracting components from ' + band + ' um map ' +
                      listfile)
                if '.tmp' in listfile:
                    pdb.set_trace()

                # Check map has error and coverage data; open if so, skip forward if not
                with astropy.io.fits.open(
                        os.path.join(gal_dir, 'Raw', band,
                                     listfile)) as listfile_hdulist:
                    if len(listfile_hdulist) < 4:
                        print('Some FITS extensions missing from ' + band +
                              ' um map ' + listfile + '; skipping')
                        continue
                img_map, img_header = astropy.io.fits.getdata(os.path.join(
                    gal_dir, 'Raw', band, listfile),
                                                              header=True,
                                                              extname='image')

                # Record which image pixels are zeros, and convert to NaNs
                where_zero = np.where(img_map == 0)
                img_map[where_zero] = np.NaN
                astropy.io.fits.writeto(os.path.join(
                    gal_dir, 'Raw', band,
                    listfile.replace('.fits', '_Img.fits')),
                                        img_map,
                                        header=img_header)

                # Now save coverage and error maps to separate files, with zeros similarly converted to NaNs
                cov_map, cov_header = astropy.io.fits.getdata(
                    os.path.join(gal_dir, 'Raw', band, listfile),
                    header=True,
                    extname='coverage')
                cov_map[where_zero] = np.NaN
                astropy.io.fits.writeto(os.path.join(
                    gal_dir, 'Raw', band,
                    listfile.replace('.fits', '_Cov.fits')),
                                        cov_map,
                                        header=cov_header)
                err_map, err_header = astropy.io.fits.getdata(
                    os.path.join(gal_dir, 'Raw', band, listfile),
                    header=True,
                    extname=bands_dict[band]['hdr_err_ext_name'])
                err_map[where_zero] = np.NaN
                astropy.io.fits.writeto(os.path.join(
                    gal_dir, 'Raw', band,
                    listfile.replace('.fits', '_Error.fits')),
                                        err_map,
                                        header=err_header)

        # Loop over each band for coaddition
        for band in bands_dict.keys():
            if not os.path.exists(os.path.join(gal_dir, 'Raw', band)):
                continue
            if len(os.path.join(gal_dir, 'Raw', band)) == 0:
                continue
            print('Commencing processing of ' + name + '_Herschel_' + band)

            # Create processing directories
            os.chdir(os.path.join(gal_dir, 'Raw', band))
            os.mkdir(os.path.join(gal_dir, 'Raw', band, 'Img_Maps'))
            os.mkdir(os.path.join(gal_dir, 'Raw', band, 'Cov_Maps'))
            os.mkdir(os.path.join(gal_dir, 'Raw', band, 'Err_Maps'))
            os.mkdir(os.path.join(gal_dir, 'Raw', band, 'Exp_Maps'))
            os.mkdir(os.path.join(gal_dir, 'Raw', band, 'Wgt_Temp'))
            os.mkdir(os.path.join(gal_dir, 'Raw', band, 'Pff_Temp'))
            os.mkdir(os.path.join(gal_dir, 'Raw', band, 'Backsub_Temp'))
            os.mkdir(os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp'))

            # Create Montage FITS header
            location_string = str(ra) + ' ' + str(dec)
            pix_size = bands_dict[band]['pix_size']
            montage_wrapper.mHdr(location_string,
                                 width,
                                 os.path.join(gal_dir, 'Raw', band,
                                              str(name) + '.hdr'),
                                 pix_size=pix_size)

            # Use Montage wrapper to reproject all fits files to common projection, skipping if none acually overlap
            print('Performing reprojections for ' + name + '_Herschel_' +
                  band + ' maps')
            target_files = []
            proj_fail = 0
            [
                target_files.append(target_file) for target_file in os.listdir(
                    os.path.join(gal_dir, 'Raw', band))
                if '.fits' in target_file
            ]
            for target_file in target_files:
                try:
                    montage_wrapper.reproject(
                        os.path.join(
                            os.path.join(gal_dir, 'Raw', band, target_file)),
                        os.path.join(
                            os.path.join(gal_dir, 'Raw', band, target_file)),
                        header=os.path.join(gal_dir, 'Raw', band,
                                            str(name) + '.hdr'),
                        exact_size=True)
                except:
                    os.remove(
                        os.path.join(
                            os.path.join(gal_dir, 'Raw', band, target_file)))
                    proj_fail += 1
            if proj_fail == len(target_files):
                print('No Herschel coverage for ' + name + ' at ' + band)
                os.system('touch ' + os.path.join(
                    temp_dir, '.' + name + '_Herschel_' + band + '.null'))
                continue

            # Move reprojcted maps to relevant locations
            for listfile in os.listdir(os.path.join(gal_dir, 'Raw', band)):
                if '_Img.fits' in os.path.join(gal_dir, 'Raw', band, listfile):
                    shutil.move(os.path.join(gal_dir, 'Raw', band, listfile),
                                os.path.join(gal_dir, 'Raw', band, 'Img_Maps'))
                elif '_Cov.fits' in os.path.join(gal_dir, 'Raw', band,
                                                 listfile):
                    shutil.move(os.path.join(gal_dir, 'Raw', band, listfile),
                                os.path.join(gal_dir, 'Raw', band, 'Cov_Maps'))
                elif '_Error.fits' in os.path.join(gal_dir, 'Raw', band,
                                                   listfile):
                    shutil.move(os.path.join(gal_dir, 'Raw', band, listfile),
                                os.path.join(gal_dir, 'Raw', band, 'Err_Maps'))

            # If only one image file, proceed straight to co-adding; otherwise, commence background-matching
            mosaic_count = 0
            for listfile in os.listdir(
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps')):
                if '_Img.fits' in listfile:
                    mosaic_count += 1
            if mosaic_count == 1:
                for listfile in os.listdir(
                        os.path.join(gal_dir, 'Raw', band, 'Img_Maps')):
                    if '.fits' in listfile:
                        shutil.move(
                            os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                         listfile),
                            os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp'))
                mBgExec_uberfail = False
            if mosaic_count > 1:

                # Use Montage wrapper to determine appropriate corrections for background matching
                print('Determining background corrections for ' + name +
                      '_Herschel_' + band + ' maps')
                os.chdir(os.path.join(gal_dir, 'Raw', band, 'Img_Maps'))
                montage_wrapper.mImgtbl(
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps'),
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Metadata_Table.dat'),
                    corners=True)
                montage_wrapper.mOverlaps(
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Metadata_Table.dat'),
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Diffs_Table.dat'))
                montage_wrapper.mDiffExec(
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Diffs_Table.dat'),
                    os.path.join(gal_dir, 'Raw', band,
                                 str(name) + '.hdr'),
                    os.path.join(gal_dir, 'Raw', band, 'Pff_Temp'),
                    no_area=True,
                    proj_dir=os.path.join(gal_dir, 'Raw', band, 'Img_Maps'))
                montage_wrapper.mFitExec(
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Diffs_Table.dat'),
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Fitting_Table.dat'),
                    os.path.join(gal_dir, 'Raw', band, 'Pff_Temp'))
                montage_wrapper.mBgModel(
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Metadata_Table.dat'),
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Fitting_Table.dat'),
                    os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                 band + '_Image_Corrections_Table.dat'),
                    level_only=True,
                    n_iter=16384)

                # Apply background corrections using Montage subprocess, with timeout handling
                print('Applying background corrections to ' + name +
                      '_Herschel_' + band + ' maps')
                mBgExec_fail_count = 0
                mBgExec_success = False
                mBgExec_uberfail = False
                while mBgExec_success == False:

                    # Attempt background-matching
                    mBgExec_sp = subprocess.Popen([
                        'mBgExec', '-n', '-p',
                        os.path.join(gal_dir, 'Raw', band, 'Img_Maps'),
                        os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                     band + '_Image_Metadata_Table.dat'),
                        os.path.join(gal_dir, 'Raw', band, 'Img_Maps',
                                     band + '_Image_Corrections_Table.dat'),
                        os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp')
                    ],
                                                  preexec_fn=os.setsid,
                                                  stdout=subprocess.PIPE)
                    mBgExec_fail = False
                    seconds = 0
                    minutes_max = 45
                    while mBgExec_fail == False:
                        time.sleep(1)
                        mBgExec_stdout = mBgExec_sp.stdout.readline().decode()
                        if mBgExec_sp.poll() == None:
                            seconds += 1
                        if 'Table has no data records' in mBgExec_stdout:
                            mBgExec_fail = True
                            mBgExec_fail_count += 1
                            break
                        if seconds >= (60 * minutes_max):
                            mBgExec_fail = True
                            mBgExec_fail_count += 1
                            break
                        if mBgExec_sp.poll() != None:
                            mBgExec_success = True
                            break

                    # Handle timeouts and other failures
                    if mBgExec_fail_count > 1:
                        print('Background matching with Montage has failed ' +
                              str(mBgExec_fail_count) +
                              ' time(s); reattempting')
                    if mBgExec_fail == True and mBgExec_success == False and mBgExec_fail_count >= 5:
                        mBgExec_uberfail = True
                        print(
                            'Background matching with Montage has failed 5 times; proceeding directly to co-additon'
                        )
                        try:
                            os.killpg(os.getpgid(mBgExec_sp.pid), 15)
                        except:
                            'Background matching subprocess appears to have imploded; no task to kill'
                        break
            if mBgExec_uberfail:
                raise Exception(
                    'Background matching with Montage has failed utterly')
                """for listfile in os.listdir(os.path.join(gal_dir,'Raw',band,'Img_Maps')):
                    if '_HSA_Img.fits' in listfile:
                        shutil.move(listfile, os.path.join(gal_dir,'Raw',band,'SWarp_Temp'))"""

            # Create weight maps, and copy to SWarp directory
            for listfile in os.listdir(
                    os.path.join(gal_dir, 'Raw', band, 'Cov_Maps')):
                if '.fits' in listfile:
                    shutil.copy2(
                        os.path.join(gal_dir, 'Raw', band, 'Cov_Maps',
                                     listfile),
                        os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp'))
                    wgt_image, wgt_header = astropy.io.fits.getdata(
                        os.path.join(gal_dir, 'Raw', band, 'Cov_Maps',
                                     listfile),
                        header=True)
                    wgt_image = wgt_image**0.5
                    astropy.io.fits.writeto(os.path.join(
                        gal_dir, 'Raw', band, 'SWarp_Temp',
                        listfile.replace('_Cov.fits', '_Wgt.fits')),
                                            wgt_image,
                                            header=wgt_header)

            # Sort out daft filename differences between image maps and error maps
            for listfile in os.listdir(
                    os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp')):
                os.rename(
                    os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp', listfile),
                    os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp',
                                 listfile.replace('_Img.fits', '.fits')))

            # Perform least-squares plane fitting to match image levels
            ChrisFuncs.Coadd.LevelFITS(os.path.join(gal_dir, 'Raw', band,
                                                    'SWarp_Temp'),
                                       'Img.fits',
                                       convfile_dir=False)

            # Use SWarp to co-add images weighted by their coverage maps
            print('Co-adding ' + name + '_Herschel_' + band + ' maps')
            os.chdir(os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp'))
            os.system(
                'swarp *HSA.fits -IMAGEOUT_NAME ' + name + '_Herschel_' +
                band +
                '_SWarp.fits -WEIGHT_SUFFIX _Wgt.fits -WEIGHT_TYPE MAP_RMS -COMBINE_TYPE WEIGHTED -COMBINE_BUFSIZE 2048 -GAIN_KEYWORD DIESPIZERDIE -RESCALE_WEIGHTS N -SUBTRACT_BACK N -RESAMPLE N -VMEM_MAX 4095 -MEM_MAX 4096 -WEIGHT_TYPE MAP_WEIGHT -NTHREADS 4 -VERBOSE_TYPE QUIET'
            )
            Herschel_SWarp_NaN(name + '_Herschel_' + band + '_SWarp.fits')

            # Check that the final maps provides actual coverage of the point in question
            coadd_image, coadd_header = astropy.io.fits.getdata(os.path.join(
                gal_dir, 'Raw', band, 'SWarp_Temp',
                name + '_Herschel_' + band + '_SWarp.fits'),
                                                                header=True)
            coadd_wcs = astropy.wcs.WCS(coadd_header)
            coords_xy = np.round(
                coadd_wcs.all_world2pix(np.array([[ra, dec]]), 0)).astype(int)
            coord_i, coord_j = coords_xy[0, 1], coords_xy[0, 0]
            if np.isnan(
                    np.nanmax(coadd_image[coord_i - 2:coord_i + 2 + 1,
                                          coord_j - 2:coord_j + 2 + 2])):
                print('No Herschel coverage for ' + name + ' at ' + band)
                os.system('touch ' + os.path.join(
                    temp_dir, '.' + name + '_Herschel_' + band + '.null'))
                continue

            # Re-project finalised image map using Montage
            montage_wrapper.reproject(
                os.path.join(gal_dir, 'Raw', band, 'SWarp_Temp',
                             name + '_Herschel_' + band + '_SWarp.fits'),
                os.path.join(gal_dir, name + '_Herschel_' + band + '.fits'),
                header=os.path.join(gal_dir, 'Raw', band,
                                    str(name) + '.hdr'),
                exact_size=True)

            # Compress finalised FITS file
            os.chdir(gal_dir)
            if gzip:
                os.system('gzip ' +
                          os.path.join(gal_dir, name + '_Herschel_' + band +
                                       '.fits'))
            print('Completed processing ' + name + '_Herschel_' + band +
                  ' image map')

            # Turn error maps into exposure time maps
            for listfile in os.listdir(
                    os.path.join(gal_dir, 'Raw', band, 'Err_Maps')):
                if '_Error.fits' in listfile:
                    err_image, err_header = astropy.io.fits.getdata(
                        os.path.join(gal_dir, 'Raw', band, 'Err_Maps',
                                     listfile),
                        header=True)
                    err_image = err_image**-2.0
                    astropy.io.fits.writeto(os.path.join(
                        gal_dir, 'Raw', band, 'Exp_Maps',
                        listfile.replace('_Error.fits', '_Exp.fits')),
                                            err_image,
                                            header=err_header)

            # Use Montage to add exposure time images
            print('Processing ' + name + '_Herschel_' + band +
                  ' uncertainty map')
            target_files = []
            [
                target_files.append(dir_file) for dir_file in os.listdir(
                    os.path.join(gal_dir, 'Raw', band, 'Exp_Maps'))
                if '_Exp.fits' in dir_file
            ]
            for i in range(0, len(target_files)):
                exp_image, exp_header = astropy.io.fits.getdata(os.path.join(
                    gal_dir, 'Raw', band, 'Exp_Maps', target_files[i]),
                                                                header=True)
                if i == 0:
                    add_image = np.zeros(
                        [exp_image.shape[0], exp_image.shape[1]])
                    add_header = exp_header.copy()
                exp_good = np.where(np.isnan(exp_image) == False)
                add_image[exp_good] += exp_image[exp_good]
            add_hdu = astropy.io.fits.PrimaryHDU(data=add_image,
                                                 header=add_header)
            add_hdulist = astropy.io.fits.HDUList([add_hdu])
            astropy.io.fits.writeto(os.path.join(
                gal_dir, 'Raw', band, 'Exp_Maps',
                name + '_Herschel_' + band + '_Exp_Add.fits'),
                                    add_image,
                                    header=add_header,
                                    clobber=True)

            # Re-project final exposure map using Montage
            montage_wrapper.reproject(
                os.path.join(gal_dir, 'Raw', band, 'Exp_Maps',
                             name + '_Herschel_' + band + '_Exp_Add.fits'),
                os.path.join(gal_dir, 'Raw', band, 'Exp_Maps',
                             name + '_Herschel_' + band + '_Exp.fits'),
                header=os.path.join(gal_dir, 'Raw', band,
                                    str(name) + '.hdr'),
                exact_size=True)

            # Convert final exposure time map into error map
            err_image, err_header = astropy.io.fits.getdata(os.path.join(
                gal_dir, 'Raw', band, 'Exp_Maps',
                name + '_Herschel_' + band + '_Exp.fits'),
                                                            header=True)
            err_image[np.where(err_image < 0)] = np.NaN
            err_image = err_image**-0.5
            err_image[np.where(err_image == np.inf)] = np.NaN
            astropy.io.fits.writeto(os.path.join(
                gal_dir, name + '_Herschel_' + band + '_Error.fits'),
                                    err_image,
                                    header=err_header,
                                    clobber=True)

            # Compress finalised exposure time map
            os.chdir(out_dir)
            if gzip:
                os.system('gzip ' + os.path.join(
                    gal_dir, name + '_Herschel_' + band + '_Error.fits'))
            print('Completed processing ' + name + '_Herschel_' + band +
                  ' uncertainty map')

        # In parallel, generate final standardised maps for each band
        pool = mp.Pool(processes=9)
        for key in bands_dict.keys():
            band_dict = bands_dict[key]
            #pool.apply_async( Herschel_Generator, args=(name, ra, dec, temp_dir, out_dir, band_dict, flux, thumbnails, gzip=gzip,) )
            Herschel_Generator(name,
                               ra,
                               dec,
                               temp_dir,
                               out_dir,
                               band_dict,
                               flux,
                               thumbnails,
                               gzip=gzip)
        pool.close()
        pool.join()

        # Clean memory, and return timings (if more than one target being processed)
        gc.collect()
        time_list.append(time.time())
        time_est = ChrisFuncs.TimeEst(time_list, len(name_list))
        if len(name) > 1:
            print(
                'Estimated time until Herschel data completed for all targets: '
                + time_est)

        # Tidy up (best as we can)
        gc.collect()
        try:
            shutil.rmtree(temp_dir)
        except:
            ChrisFuncs.RemoveCrawl(temp_dir)
            print(
                'Unable to fully tidy up temporary directory; probably due to NFS locks on network drive'
            )

    # Report completion
    print('Total time elapsed: ' + str((time.time() - time_list[0]) / 3600.0) +
          ' hours')
    print('All available Herschel imagery acquired for all targets')
Ejemplo n.º 25
0
def main(arguments=None):
    """ main() function, encapsulated in a method to allow for easy invokation.

    This method follows Guido van Rossum's suggestions on how to write Python
    main() functions in order to make them more flexible. By encapsulating the
    main code of the script in a function and making it take an optional
    argument the script can be called not only from other modules, but also
    from the interactive Python prompt.

    Guido van van Rossum - Python main() functions:
    http://www.artima.com/weblogs/viewpost.jsp?thread=4829

    Keyword arguments:
    arguments - the list of command line arguments passed to the script.

    """

    if arguments is None:
        arguments = sys.argv[1:]  # ignore argv[0], the script name
    (options, args) = parser.parse_args(args=arguments)

    # Print the help and abort the execution if there are fewer than three
    # positional arguments left, as the user must specify at least two FITS
    # images and the output mosaic into which they are assembled.
    if len(args) < 3:
        parser.print_help()
        return 2  # used for command line syntax errors
    else:
        assert len(args) >= 3
        input_paths = set(args[:-1])
        output_path = args[-1]

    # Refuse to overwrite the output FITS file unless explicitly instructed to
    # do so. Note that, if the --overwritten option is given, we do not need to
    # delete the existing file: it will be silently overwritten when the output
    # of montage.mosaic() is shutil.move()'d to the output path.

    if os.path.exists(output_path):
        if not options.overwrite:
            msg = "%sError. The output file '%s' already exists."
            print msg % (style.prefix, output_path)
            print style.error_exit_message
            return 1

    # Workaround for a bug in montage.mosaic() that raises an error ('mpirun
    # has exited due to process rank [...] without calling "finalize"...') if
    # mpi = True and background_match = True. Until this is fixed, we can only
    # use one core if the --background-match option is given by the user.

    if options.background_match and options.ncores > 1:
        options.ncores = 1
        for msg in (
                "{0}Warning: --background-match is incompatible with --cores > 1.",
                "{0}Setting the --cores option to a value of one.",
                "{0}This is a workaround for a known bug in montage-wrapper:",
                "{0}https://github.com/astropy/montage-wrapper/issues/18"):
            print msg.format(style.prefix)
        print

    # Map each filter to a list of FITSImage objects
    files = fitsimage.InputFITSFiles()

    msg = "%sMaking sure the %d input paths are FITS images..."
    print msg % (style.prefix, len(input_paths))

    util.show_progress(0.0)
    for index, path in enumerate(input_paths):
        # fitsimage.FITSImage.__init__() raises fitsimage.NonStandardFITS if
        # one of the paths is not a standard-conforming FITS file.
        try:
            img = fitsimage.FITSImage(path)

            # If we do not need to know the photometric filter (because the
            # --filter was not given) do not read it from the FITS header.
            # Instead, use None. This means that 'files', a dictionary, will
            # only have a key, None, mapping to all the input FITS images.

            if options.filter:
                pfilter = img.pfilter(options.filterk)
            else:
                pfilter = None

            files[pfilter].append(img)

        except fitsimage.NonStandardFITS:
            print
            msg = "'%s' is not a standard FITS file"
            raise fitsimage.NonStandardFITS(msg % path)

        percentage = (index + 1) / len(input_paths) * 100
        util.show_progress(percentage)
    print  # progress bar doesn't include newline

    # The --filter option allows the user to specify which FITS files, among
    # all those received as input, must be combined: only those images taken
    # in the options.filter photometric filter.
    if options.filter:

        msg = "%s%d different photometric filters were detected:"
        print msg % (style.prefix, len(files.keys()))

        for pfilter, images in sorted(files.iteritems()):
            msg = "%s %s: %d files (%.2f %%)"
            percentage = len(images) / len(files) * 100
            print msg % (style.prefix, pfilter, len(images), percentage)

        msg = "%sIgnoring images not taken in the '%s' photometric filter..."
        print msg % (style.prefix, options.filter),
        sys.stdout.flush()

        discarded = 0
        for pfilter, images in files.items():
            if pfilter != options.filter:
                discarded += len(images)
                del files[pfilter]

        if not files:
            print
            msg = "%sError. No image was taken in the '%s' filter."
            print msg % (style.prefix, options.filter)
            print style.error_exit_message
            return 1

        else:
            print 'done.'
            msg = "%s%d images taken in the '%s' filter, %d were discarded."
            print msg % (style.prefix, len(files), options.filter, discarded)

    # montage.mosaic() silently ignores those FITS images that have no WCS
    # information in their headers, and also raises a rather cryptic exception
    # (mMakeHdr: Invalid table file) if none of them has been astrometrically
    # solved. Instead of ignoring some images without warning or showing a
    # confusing error message that makes it almost impossible to understand
    # what may be failing, use FITSImage.center_wcs() to make sure that all the
    # images have WCS information, raising NoWCSInformationError otherwise.

    for img in files:
        # May raise NoWCSInformationError
        img.center_wcs()

    # montage.mosaic() requires as first argument the directory containing the
    # input FITS images but, in order to maintain the same syntax across all
    # LEMON commands, we receive them as command-line arguments. Thus, create a
    # temporary directory and symlink from it the input images. Hard links are
    # not an option because os.link() will raise "OSError: [Errno 18] Invalid
    # cross-device link" if the temporary directory is created in a different
    # partition.

    pid = os.getpid()
    suffix = "_LEMON_%d_mosaic" % pid
    kwargs = dict(suffix=suffix + '_input')
    input_dir = tempfile.mkdtemp(**kwargs)
    atexit.register(util.clean_tmp_files, input_dir)

    for img in files:
        path = img.path
        source = os.path.abspath(path)
        basename = os.path.basename(path)
        link_name = os.path.join(input_dir, basename)
        os.symlink(source, link_name)

    # The output of montage.mosaic() is another directory, to which several
    # files are written, so we need the path to a second temporary directory.
    # Delete it before calling mosaic(), as otherwise it will raise IOError
    # ("Output directory already exists").

    kwargs = dict(suffix=suffix + '_output')
    output_dir = tempfile.mkdtemp(**kwargs)
    atexit.register(util.clean_tmp_files, output_dir)
    os.rmdir(output_dir)

    kwargs = dict(
        background_match=options.background_match,
        combine=options.combine,
        bitpix=-64,
    )

    if options.ncores > 1:
        kwargs['mpi'] = True  # use MPI whenever possible
        kwargs['n_proc'] = options.ncores  # number of MPI processes
    montage.mosaic(input_dir, output_dir, **kwargs)

    # montage.mosaic() writes several files to the output directory, but we are
    # only interested in one of them: 'mosaic.fits', the mosaic FITS image.

    MOSAIC_OUTPUT = 'mosaic.fits'
    src = os.path.join(output_dir, MOSAIC_OUTPUT)

    if options.reproject:
        print "%sReproject mosaic to point North..." % style.prefix,
        sys.stdout.flush()
        kwargs = dict(north_aligned=True, silent_cleanup=True)
        montage.reproject(src, output_path, **kwargs)
        print 'done.'
    else:
        # No reprojection, move mosaic to the output path
        shutil.move(src, output_path)

    print "%sYou're done ^_^" % style.prefix
    return 0
Ejemplo n.º 26
0
def main(arguments = None):
    """ main() function, encapsulated in a method to allow for easy invokation.

    This method follows Guido van Rossum's suggestions on how to write Python
    main() functions in order to make them more flexible. By encapsulating the
    main code of the script in a function and making it take an optional
    argument the script can be called not only from other modules, but also
    from the interactive Python prompt.

    Guido van van Rossum - Python main() functions:
    http://www.artima.com/weblogs/viewpost.jsp?thread=4829

    Keyword arguments:
    arguments - the list of command line arguments passed to the script.

    """

    if arguments is None:
        arguments = sys.argv[1:] # ignore argv[0], the script name
    (options, args) = parser.parse_args(args = arguments)

    # Print the help and abort the execution if there are fewer than three
    # positional arguments left, as the user must specify at least two FITS
    # images and the output mosaic into which they are assembled.
    if len(args) < 3:
        parser.print_help()
        return 2 # used for command line syntax errors
    else:
        assert len(args) >= 3
        input_paths = set(args[:-1])
        output_path = args[-1]

    # Refuse to overwrite the output FITS file unless explicitly instructed to
    # do so. Note that, if the --overwritten option is given, we do not need to
    # delete the existing file: it will be silently overwritten when the output
    # of montage.mosaic() is shutil.move()'d to the output path.

    if os.path.exists(output_path):
        if not options.overwrite:
            msg = "%sError. The output file '%s' already exists."
            print msg % (style.prefix, output_path)
            print style.error_exit_message
            return 1

    # Workaround for a bug in montage.mosaic() that raises an error ('mpirun
    # has exited due to process rank [...] without calling "finalize"...') if
    # mpi = True and background_match = True. Until this is fixed, we can only
    # use one core if the --background-match option is given by the user.

    if options.background_match and options.ncores > 1:
        options.ncores = 1
        for msg in (
            "{0}Warning: --background-match is incompatible with --cores > 1.",
            "{0}Setting the --cores option to a value of one.",
            "{0}This is a workaround for a known bug in montage-wrapper:",
            "{0}https://github.com/astropy/montage-wrapper/issues/18"):
            print msg.format(style.prefix)
        print

    # Map each filter to a list of FITSImage objects
    files = fitsimage.InputFITSFiles()

    msg = "%sMaking sure the %d input paths are FITS images..."
    print msg % (style.prefix, len(input_paths))

    methods.show_progress(0.0)
    for index, path in enumerate(input_paths):
        # fitsimage.FITSImage.__init__() raises fitsimage.NonStandardFITS if
        # one of the paths is not a standard-conforming FITS file.
        try:
            img = fitsimage.FITSImage(path)

            # If we do not need to know the photometric filter (because the
            # --filter was not given) do not read it from the FITS header.
            # Instead, use None. This means that 'files', a dictionary, will
            # only have a key, None, mapping to all the input FITS images.

            if options.filter:
                pfilter = img.pfilter(options.filterk)
            else:
                pfilter = None

            files[pfilter].append(img)

        except fitsimage.NonStandardFITS:
            print
            msg = "'%s' is not a standard FITS file"
            raise fitsimage.NonStandardFITS(msg % path)

        percentage = (index + 1) / len(input_paths) * 100
        methods.show_progress(percentage)
    print # progress bar doesn't include newline

    # The --filter option allows the user to specify which FITS files, among
    # all those received as input, must be combined: only those images taken
    # in the options.filter photometric filter.
    if options.filter:

        msg = "%s%d different photometric filters were detected:"
        print msg % (style.prefix, len(files.keys()))

        for pfilter, images in sorted(files.iteritems()):
            msg = "%s %s: %d files (%.2f %%)"
            percentage = len(images) / len(files) * 100
            print msg % (style.prefix, pfilter, len(images), percentage)

        msg = "%sIgnoring images not taken in the '%s' photometric filter..."
        print msg % (style.prefix, options.filter) ,
        sys.stdout.flush()

        discarded = 0
        for pfilter, images in files.items():
            if pfilter != options.filter:
                discarded += len(images)
                del files[pfilter]

        if not files:
            print
            msg = "%sError. No image was taken in the '%s' filter."
            print msg % (style.prefix, options.filter)
            print style.error_exit_message
            return 1

        else:
            print 'done.'
            msg = "%s%d images taken in the '%s' filter, %d were discarded."
            print msg % (style.prefix, len(files), options.filter, discarded)

    # montage.mosaic() silently ignores those FITS images that have no WCS
    # information in their headers, and also raises a rather cryptic exception
    # (mMakeHdr: Invalid table file) if none of them has been astrometrically
    # solved. Instead of ignoring some images without warning or showing a
    # confusing error message that makes it almost impossible to understand
    # what may be failing, use FITSImage.center_wcs() to make sure that all the
    # images have WCS information, raising NoWCSInformationError otherwise.

    for img in files:
        # May raise NoWCSInformationError
        img.center_wcs()

    # montage.mosaic() requires as first argument the directory containing the
    # input FITS images but, in order to maintain the same syntax across all
    # LEMON commands, we receive them as command-line arguments. Thus, create a
    # temporary directory and symlink from it the input images. Hard links are
    # not an option because os.link() will raise "OSError: [Errno 18] Invalid
    # cross-device link" if the temporary directory is created in a different
    # partition.

    pid = os.getpid()
    suffix = "_LEMON_%d_mosaic" % pid
    kwargs = dict(suffix = suffix + '_input')
    input_dir = tempfile.mkdtemp(**kwargs)
    atexit.register(methods.clean_tmp_files, input_dir)

    for img in files:
        path = img.path
        source = os.path.abspath(path)
        basename = os.path.basename(path)
        link_name = os.path.join(input_dir, basename)
        os.symlink(source, link_name)

    # The output of montage.mosaic() is another directory, to which several
    # files are written, so we need the path to a second temporary directory.
    # Delete it before calling mosaic(), as otherwise it will raise IOError
    # ("Output directory already exists").

    kwargs = dict(suffix = suffix + '_output')
    output_dir = tempfile.mkdtemp(**kwargs)
    atexit.register(methods.clean_tmp_files, output_dir)
    os.rmdir(output_dir)

    kwargs = dict(background_match = options.background_match,
                  combine = options.combine,
                  bitpix=-64,
                  )

    if options.ncores > 1:
        kwargs['mpi'] = True              # use MPI whenever possible
        kwargs['n_proc'] = options.ncores # number of MPI processes
    montage.mosaic(input_dir, output_dir, **kwargs)

    # montage.mosaic() writes several files to the output directory, but we are
    # only interested in one of them: 'mosaic.fits', the mosaic FITS image.

    MOSAIC_OUTPUT = 'mosaic.fits'
    src = os.path.join(output_dir, MOSAIC_OUTPUT)

    if options.reproject:
        print "%sReproject mosaic to point North..." % style.prefix ,
        sys.stdout.flush()
        kwargs = dict(north_aligned = True, silent_cleanup = True)
        montage.reproject(src, output_path, **kwargs)
        print 'done.'
    else:
        # No reprojection, move mosaic to the output path
        shutil.move(src, output_path)

    print "%sYou're done ^_^" % style.prefix
    return 0
Ejemplo n.º 27
0
def register_reproject_direc(direc, ref='r'):
	os.system("rm -rf "+direc+"/"+ref+"_header.hdr "+direc+"/*_area* "+direc+"/*_reg*")
	cmds.mGetHdr(direc+"/"+ref+".fits",direc+"/header_"+ref+".hdr")
	list_in=[direc+'/g.fits',direc+'/r.fits',direc+'/i.fits',direc+'/u.fits',direc+'/z.fits']
	list_out=[direc+'/g_reg.fits',direc+'/r_reg.fits',direc+'/i_reg.fits',direc+'/u_reg.fits',direc+'/z_reg.fits']
	mw.reproject(list_in,list_out,header=direc+"/header_"+ref+".hdr",north_aligned=True,system='EQUJ',exact_size=True,common=True)
Ejemplo n.º 28
0
Archivo: rgb.py Proyecto: EDrabek/aplpy
def make_rgb_cube(files, output, north=False, system=None, equinox=None):
    '''
    Make an RGB data cube from a list of three FITS images.

    This method can read in three FITS files with different
    projections/sizes/resolutions and uses Montage to reproject
    them all to the same projection.

    Two files are produced by this function. The first is a three-dimensional
    FITS cube with a filename give by `output`, where the third dimension
    contains the different channels. The second is a two-dimensional FITS
    image with a filename given by `output` with a `_2d` suffix. This file
    contains the mean of the different channels, and is required as input to
    FITSFigure if show_rgb is subsequently used to show a color image
    generated from the FITS cube (to provide the correct WCS information to
    FITSFigure).

    Parameters
    ----------

    files : tuple or list
       A list of the filenames of three FITS filename to reproject.
       The order is red, green, blue.

    output : str
       The filename of the output RGB FITS cube.

    north : bool, optional
       By default, the FITS header generated by Montage represents the
       best fit to the images, often resulting in a slight rotation. If
       you want north to be straight up in your final mosaic, you should
       use this option.

    system : str, optional
       Specifies the system for the header (default is EQUJ).
       Possible values are: EQUJ EQUB ECLJ ECLB GAL SGAL

    equinox : str, optional
       If a coordinate system is specified, the equinox can also be given
       in the form YYYY. Default is J2000.
    '''

    # Check whether the Python montage module is installed. The Python module
    # checks itself whether the Montage command-line tools are available, and
    # if they are not then importing the Python module will fail.
    try:
        import montage_wrapper as montage
    except ImportError:
        raise Exception("Both the Montage command-line tools and the"
                        " montage-wrapper Python module are required"
                        " for this function")

    # Check that input files exist
    for f in files:
        if not os.path.exists(f):
            raise Exception("File does not exist : " + f)

    # Create work directory
    work_dir = tempfile.mkdtemp()

    raw_dir = '%s/raw' % work_dir
    final_dir = '%s/final' % work_dir

    images_raw_tbl = '%s/images_raw.tbl' % work_dir
    header_hdr = '%s/header.hdr' % work_dir

    # Create raw and final directory in work directory
    os.mkdir(raw_dir)
    os.mkdir(final_dir)

    # Create symbolic links to input files
    for i, f in enumerate(files):
        os.symlink(os.path.abspath(f), '%s/image_%i.fits' % (raw_dir, i))

    # List files and create optimal header
    montage.mImgtbl(raw_dir, images_raw_tbl, corners=True)
    montage.mMakeHdr(images_raw_tbl, header_hdr, north_aligned=north, system=system, equinox=equinox)

    # Read header in with astropy.io.fits
    header = fits.Header.fromtextfile(header_hdr)

    # Find image dimensions
    nx = int(header['NAXIS1'])
    ny = int(header['NAXIS2'])

    # Generate emtpy datacube
    image_cube = np.zeros((len(files), ny, nx), dtype=np.float32)

    # Loop through files
    for i in range(len(files)):

        # Reproject channel to optimal header
        montage.reproject('%s/image_%i.fits' % (raw_dir, i),
                          '%s/image_%i.fits' % (final_dir, i),
                          header=header_hdr, exact_size=True, bitpix=-32)

        # Read in and add to datacube
        image_cube[i, :, :] = fits.getdata('%s/image_%i.fits' % (final_dir, i))

    # Write out final cube
    fits.writeto(output, image_cube, header, clobber=True)

    # Write out collapsed version of cube
    fits.writeto(output.replace('.fits', '_2d.fits'), \
                   np.mean(image_cube, axis=0), header, clobber=True)

    # Remove work directory
    shutil.rmtree(work_dir)
Ejemplo n.º 29
0
from   astropy.io import fits
import montage_wrapper as montage

hstfile        = 'ibs401010_drz.fits'
hstimage       = fits.open(hstfile)[1]
hstheader      = hstimage.header

fits.writeto('output_file.fits', hstimage.data, hstheader, overwrite=True)

hstfile        = 'output_file.fits'
hstimage       = fits.open(hstfile)[0]
hstheader      = hstimage.header

montage.reproject('output_file.fits','hstnew.fits', north_aligned=True,exact_size=True)