Exemplo n.º 1
0
def scopy():
    if not os.path.exists(Path + '/test/'):
        os.mkdir(Path + '/test/')
    fs = glob(Path + '/sci*c?.fits')
    print(fs)
    for i, f in enumerate(fs):
        iraf.scopy(f + '[*,1,1]', Path + '/test/' + 'sci' + str(i) + '.fits')
Exemplo n.º 2
0
def split1d(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('x1d/sci*x1d????.fits')
    if len(fs) == 0:
        print "WARNING: No extracted spectra to split."
        iraf.cd('..')
        return

    for f in fs:
        hdu = pyfits.open(f.replace('x1d', 'fix'))
        chipgaps = get_chipgaps(hdu)
        # Throw away the first pixel as it almost always bad
        chipedges = [[1, chipgaps[0][0]], [chipgaps[0][1] + 1, chipgaps[1][0]],
                     [chipgaps[1][1] + 1, chipgaps[2][0]]]

        w = WCS(f)
        # Copy each of the chips out seperately. Note that iraf is 1 indexed
        # unlike python so we add 1
        for i in range(3):
            # get the wavelengths that correspond to each chip
            lam, _apnum, _bandnum = w.all_pix2world(chipedges[i], 0, 0, 0)
            iraf.scopy(f, f[:-5] + 'c%i' % (i + 1), w1=lam[0], w2=lam[1],
                       format='multispec', rebin='no',clobber='yes')
        hdu.close()
    iraf.cd('..')
Exemplo n.º 3
0
def scopy(spec_in, spec_out, wstart, wend):
    """
    Cut a spectrum on given wavelengths.

    Parameters
    ==========

    spec_in: str;
        Name of input spectrum.

    sepc_out: str;
        name of output spectrum.

    wstart: int;
        Beginning wavelength

    wend: int;
        Ending wavelength.

    """
    iraf.onedspec(_doprint=0)
    iraf.scopy.w1 = wstart
    iraf.scopy.w2 = wend
    iraf.scopy.input = spec_in
    iraf.scopy.output = spec_out
    iraf.scopy(mode='h')
Exemplo n.º 4
0
def split1d(fs=None):
    iraf.cd('work')
    if fs is None:
        fs = glob('x1d/sci*x1d????.fits')
    if len(fs) == 0:
        print "WARNING: No extracted spectra to split."
        iraf.cd('..')
        return

    for f in fs:
        hdu = pyfits.open(f.replace('x1d', 'fix'))
        chipgaps = get_chipgaps(hdu)
        # Throw away the first pixel as it almost always bad
        chipedges = [[1, chipgaps[0][0]], [chipgaps[0][1] + 1, chipgaps[1][0]],
                     [chipgaps[1][1] + 1, chipgaps[2][0]]]

        w = WCS(f)
        # Copy each of the chips out seperately. Note that iraf is 1 indexed
        # unlike python so we add 1
        for i in range(3):
            # get the wavelengths that correspond to each chip
            lam, _apnum, _bandnum = w.all_pix2world(chipedges[i], 0, 0, 0)
            iraf.scopy(f, f[:-5] + 'c%i' % (i + 1), w1=lam[0], w2=lam[1],
                       format='multispec', rebin='no',clobber='yes')
        hdu.close()
    iraf.cd('..')
Exemplo n.º 5
0
    def ScopyTask(self, InputFile, OutputFile, Fits_Folder, Wmin = 'INDEF', Wmax = 'INDEF', Suffix = 't'):

        if OutputFile == None:
            OutputFile = self.outputNameGenerator(InputFile, Suffix)

        scopy_conf = self.ScopyAttributes(InputFile, OutputFile, Fits_Folder, w1 = Wmin, w2 = Wmax)

        iraf.scopy(**scopy_conf)

        return OutputFile
Exemplo n.º 6
0
def split1d(filename):

    hdu = pyfits.open(filename)
    chipedges = get_chipedges(hdu['SCI'].data[0])
    lam = fitshdr_to_wave(hdu['SCI'].header)
    # Copy each of the chips out seperately. Note that iraf is 1 indexed
    for i in range(3):
        # get the wavelengths that correspond to each chip
        w1 = lam[chipedges[i][0]]
        w2 = lam[chipedges[i][1]]
        iraf.scopy(filename+ '[SCI]', output=filename[:-5] + 'c%i.fits' % (i + 1), w1=w1,
                   w2=w2, rebin='no')
        hdu.close()
Exemplo n.º 7
0
def split1d(filename):

    hdu = fits.open(filename)
    chipedges = get_chipedges(hdu['SCI'].data[0])
    lam = fitshdr_to_wave(hdu['SCI'].header)
    # Copy each of the chips out seperately. Note that iraf is 1 indexed
    for i in range(3):
        # get the wavelengths that correspond to each chip
        w1 = lam[chipedges[i][0]]
        w2 = lam[chipedges[i][1]]
        iraf.scopy(filename+ '[SCI]', output=filename[:-5] + 'c%i.fits' % (i + 1), w1=w1,
                   w2=w2, rebin='no')
        hdu.close()
Exemplo n.º 8
0
def barycor(filelist_new):
    iraf.reset(obsdb='home$obsdb.dat')
    for i in range(len(filelist_new)):
        hdulist = fits.open(filelist_new[i])
        header_time_of_observation = hdulist[0].header['DATE-OBS']
        year_of_observation = int(header_time_of_observation[:4])
        month_of_observation = int(header_time_of_observation[5:7])
        day_of_observation = int(header_time_of_observation[8:10])
        right_ascension = hdulist[0].header['RA']
        declination = hdulist[0].header['DEC']

        try:
            ut_of_observation = hdulist[0].header['UT']
        except KeyError:
            ut_of_observation = int(header_time_of_observation[11:13]) + int(
                header_time_of_observation[14:16]) / 60 + int(
                    header_time_of_observation[17:19]) / 3600
        exposure_time = hdulist[0].header['EXP_TIME']

        output_filename_dummy = filelist_new[i].replace("norm.", "norm.dummy.")
        output_filename_dummyI = output_filename_dummy.replace(
            "norm-1", "norm-1.dummy.")
        output_filename = output_filename_dummyI.replace(
            "merged", "merged.dummy.")

        iraf.scopy(filelist_new[i], output_filename)
        iraf.hedit(images=output_filename,
                   fields="UT",
                   value=ut_of_observation)
        iraf.hedit(images=output_filename, fields="EPOCH", value="2000")
        iraf.hedit(images=output_filename,
                   fields="EXP-TIME",
                   value=exposure_time)
        iraf.rvcorrect(images=output_filename,
                       year=year_of_observation,
                       month=month_of_observation,
                       day=day_of_observation,
                       ut=ut_of_observation,
                       ra=right_ascension,
                       dec=declination)

        output_filename_final = output_filename.replace("dummy.", "rvcorrect.")
        print(output_filename, output_filename_final)
        iraf.dopcor(output_filename,
                    output_filename_final,
                    redshift="-VHELIO",
                    isvelocity="yes")

        os.remove(output_filename)
        hdulist.close()
Exemplo n.º 9
0
def trim_spectra(spectrum_name):
    iraf.scopy(
        input = spectrum_name,\
        output = spectrum_name,\
        w1 = region_w1,\
        w2 = region_w2,\
        apertures = "",\
        bands = "",\
        beams = "",\
        apmodulus = "0",\
        format = "multispec",\
        renumber = 0,\
        offset = "0",\
        clobber = 1,\
        rebin = 1,\
        verbose = 1,\
        mode = "ql")
Exemplo n.º 10
0
def extract(packnam):
    obs = packnam[-6]
    pack = pyfits.open(packnam)
    table = pack[1].data
    head = pack[0].header
    setup = head['HIERARCH ESO INS EXP MODE']
    fib = []
    obj = []
    valid_obj = []
    for i in range(len(table)):
        fib.append(table[i][0])
        obj.append(table[i][7])
    iraf.noao()
    iraf.onedspec()
    os.makedirs(setup + '/' + obs)
    for k in range(len(fib)):
        if (obj[k] != 'CALSIM') and (obj[k][0:4] != 'Grid'):
            valid_obj.append(setup + '/' + obs + '/' + obj[k])
            iraf.scopy(packnam + '[' + str(fib[k]) + ',*]',
                       setup + '/' + obs + '/' + obj[k])
    return valid_obj, setup
Exemplo n.º 11
0
def do_all_science(object,
                   blueimage,
                   bluepath,
                   bluestd,
                   redimage,
                   redflats,
                   redpath,
                   redstd,
                   btrimsec1=BLUETRIM1,
                   btrimsec2=BLUETRIM2,
                   rtrimsec=REDTRIM):

    if not os.path.exists("combine"):
        os.mkdir("combine")

    pyraf.iraffunctions.chdir('blue')
    blue_science(blueimage,
                 '../%s' % bluepath,
                 object=object,
                 smooth="%s.smooth.fits" % bluestd,
                 sens="%s.sens" % bluestd,
                 trimsec1=btrimsec1,
                 trimsec2=btrimsec2)
    iraf.scopy("%s.f" % object, "../combine/blue", w1=3500, w2=5500)

    pyraf.iraffunctions.chdir("../red")
    red_science(redimage,
                redflats,
                "../%s" % redpath,
                object=object,
                smooth="%s.smooth.fits" % redstd,
                sens="%s.sens" % redstd,
                trimsec=rtrimsec)
    iraf.scopy("%s.f" % object, "../combine/red", w1=5400, w2=10000)

    pyraf.iraffunctions.chdir("../combine")
    spec_combine("red", "blue", object)

    pyraf.iraffunctions.chdir("../")
    return
Exemplo n.º 12
0
def stitch():
    #Find the mean 3100 and 3125 of blue and uv
    uvhdu = pyfits.open('13dh_uv.fits')
    uvlam = fitshdr_to_wave(uvhdu[0].header)
    w = np.logical_and(uvlam > 3050, uvlam < 3100)
    uvmean = np.median(uvhdu[0].data[1,0,w])
    uvhdu.close()
    print("uv mean %e"%uvmean)


    bluehdu = pyfits.open('13dh_blue.fits')
    bluelam = fitshdr_to_wave(bluehdu[0].header)
    w = np.logical_and(bluelam > 3050, bluelam < 3100)
    blueuvmean = np.median(bluehdu[0].data[1,0,w])
    print("blue mean uv %e"%blueuvmean)

    # Find mean of red and blue between 5375 and 5600
    w = np.logical_and(bluelam > 5375, bluelam < 5600)
    blueredmean = bluehdu[0].data[1,0,w].mean()
    bluehdu.close()
    print("blue mean red %e"%blueredmean)

    redhdu = pyfits.open('13dh_red.fits')
    redlam = fitshdr_to_wave(redhdu[0].header)
    w = np.logical_and(redlam > 5375, redlam < 5600)
    redmean = redhdu[0].data[1,0,w].mean()
    redhdu.close()
    print("red mean %e"%redmean)

    # trim uv at 3140
    iraf.unlearn(iraf.scopy)
    iraf.scopy('13dh_uv.fits', '13dh_uv_trim.fits', w1='INDEF', w2=3140, rebin='no')
    
    # trim blue at 3130 and 5600
    iraf.unlearn(iraf.scopy)
    iraf.scopy('13dh_blue.fits', '13dh_blue_trim.fits', w1=3130, w2=5600, rebin='no')
    
    # Trim red at 5375
    iraf.unlearn(iraf.scopy)
    iraf.scopy('13dh_red.fits', '13dh_red_trim.fits', w1=5375, w2='INDEF', rebin='no')

    # Copy the spectra from the second extraction to the first
    for im in ['13dh_uv_trim.fits', '13dh_blue_trim.fits', '13dh_red_trim.fits']:
        hdu = pyfits.open(im, mode='update')
        hdu[0].data[0, 0, :] = hdu[0].data[1, 0, :]
        hdu.flush()
        hdu.close()

    # Write out the scale factors
    lines = ['%f\n' % (blueuvmean / uvmean)**-1,'1.0\n','%f\n' % (blueredmean / redmean)**-1]

    f = open('scales.dat','w')
    f.writelines(lines)
    f.close()
    #Scombine with the correct scalings using average
    iraf.unlearn(iraf.scombine)
    iraf.scombine('13dh_uv_trim, 13dh_blue_trim, 13dh_red_trim', '13dh_hst.fits', scale='@scales.dat')
    return
Exemplo n.º 13
0
def do_all_science(object, blueimage, bluepath, bluestd, redimage, redflats, redpath,
                   redstd, btrimsec1=BLUETRIM1, btrimsec2=BLUETRIM2, rtrimsec=REDTRIM):

    if not os.path.exists("combine"):
        os.mkdir("combine")

    pyraf.iraffunctions.chdir('blue')
    blue_science(blueimage, '../%s' % bluepath, object=object, smooth="%s.smooth.fits" %
                 bluestd, sens="%s.sens" % bluestd, trimsec1=btrimsec1, 
                 trimsec2=btrimsec2)
    iraf.scopy("%s.f" % object, "../combine/blue", w1=3500, w2=5500)

    pyraf.iraffunctions.chdir("../red")
    red_science(redimage, redflats, "../%s" % redpath, object=object, 
                smooth="%s.smooth.fits" % redstd, sens="%s.sens" % redstd,
                trimsec=rtrimsec)
    iraf.scopy("%s.f" % object, "../combine/red", w1=5400, w2=10000)

    pyraf.iraffunctions.chdir("../combine")
    spec_combine("red", "blue", object)

    pyraf.iraffunctions.chdir("../")
    return
Exemplo n.º 14
0
def scopy_cmp(fn, w1='INDEF', w2='INDEF'):
    """
    copy part of a spectrum, and generate a new spectrum fits file.
    if a same name file already exist, this function will delete the old.
    if the fits fn have more than one apertures,
    this function will just extract the last aperture.
    fn : fits name
    type : string
    w1 : start wavelength, default='INDEF'
    type : float or a string='INDEF'
    w2 : end wavelength, default='INDEF'
    type : float or a string='INDEF'
    return : out put file name
    type : string
    """
    nax2 = pyfits.getval(fn, keyword='NAXIS2')
    outname = str(w1) + '_' + str(w2) + '_' + fn
    if os.path.isfile(outname):
        print('remove file ' + outname)
        os.remove(outname)
    iraf.scopy(input=fn,
               output=outname,
               w1=w1,
               w2=w2,
               apertures=nax2,
               bands=1,
               beams='',
               apmodulus=0,
               format='multispec',
               renumber='Yes',
               offset=0,
               clobber='No',
               merge='No',
               rebin='No',
               verbose='Yes')
    return outname
def scopy_flux(flux_sci, flux_scopy_fits, flux_scopy_range, flux_scopy_file):
    """
    Combine (average) all spectra (according to bin) in the image for a given spectral range, calculate mean flux.
    This is used for 1 mag contours when plotting the velocity fields.
    INPUT: FLUX_SCI, FLUX_SCOPY_FITS, FLUX_SCOPY_RANGE
    OUTPUT: FLUX_SCOPY_FILE
    """

    if os.path.exists(flux_scopy_file):
        print('File {} already exists'.format(flux_scopy_file))
        return

    files_in_dir = glob.glob(flux_sci.format('*'))
    assert len(files_in_dir) > 0, 'No files match {}'.format(flux_sci.format('*'))

    from pyraf import iraf

    iraf.noao()
    iraf.onedspec()

    flux_scopy_fits_i_data_mean = []

    for i in range(len(files_in_dir)):

        flux_sci_i = flux_sci.format(i)
        flux_scopy_fits_i = flux_scopy_fits.format(i)

        if not os.path.exists(flux_scopy_fits_i):
            iraf.scopy(flux_sci_i, flux_scopy_fits_i, w1=flux_scopy_range[0], w2=flux_scopy_range[1])

        flux_scopy_fits_i_data = fits.getdata(flux_scopy_fits_i, 0)
        assert flux_scopy_fits_i_data.ndim != 0, "Scrop'd array is empty"

        flux_scopy_fits_i_data_mean.append(flux_scopy_fits_i_data.mean())

    np.array(flux_scopy_fits_i_data_mean).tofile(flux_scopy_file, sep='\n')
Exemplo n.º 16
0
def normalize_and_merge(reduced_science_files):
    current_directory = os.getcwd()

    for k in range(len(reduced_science_files)):

        remove_file(current_directory, "norm.dummyI.fits")
        remove_file(current_directory, "norm.dummyIa.fits")
        remove_file(current_directory, "norm.dummyII.fits")
        remove_file(current_directory, "norm.dummyIIa.fits")
        remove_file(current_directory, "norm.dummyIII.fits")
        remove_file(current_directory, "norm.dummyIV.fits")
        remove_file(current_directory, "norm.dummyV.fits")
        remove_file(current_directory, "norm.dummy1.fits")
        remove_file(current_directory, "norm.dummy2.fits")
        remove_file(current_directory, "norm.dummy3.fits")
        remove_file(current_directory, "norm.dummy4.fits")
        remove_file(current_directory, "norm.dummy5.fits")
        remove_file(current_directory, "norm.dummy6.fits")
        remove_file(current_directory, "norm.dummy7.fits")
        remove_file(current_directory, "norm.dummy8.fits")

        ranges = np.loadtxt("ranges.lis", delimiter=",")
        aperturlist = open("test.norm.apertures.lis", "w")

        merge = reduced_science_files[k].replace(".extracted.fits",
                                                 ".norm.fits")

        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummyI.fits"),
                   apertures="5:14",
                   format="multispec")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummyII.fits"),
                   apertures="35:42",
                   format="multispec")

        iraf.fit1d(place_here("norm.dummyI.fits"),
                   place_here("norm.dummyIa.fits"),
                   naverage=1,
                   axis=2,
                   type="fit",
                   low_rej=1.0,
                   high_rej=2.0,
                   order=2,
                   niterate=2,
                   func="spline3",
                   sample="*")
        iraf.fit1d(place_here("norm.dummyII.fits"),
                   place_here("norm.dummyIIa.fits"),
                   naverage=1,
                   axis=2,
                   type="fit",
                   low_rej=1.0,
                   high_rej=2.0,
                   order=2,
                   niterate=2,
                   func="spline3",
                   sample="*")

        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy1.fits"),
                   apertures="1",
                   format="multispec",
                   w1="4544")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy2.fits"),
                   apertures="2",
                   format="multispec",
                   w1="4569")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy3.fits"),
                   apertures="3:7",
                   format="multispec")
        iraf.scopy(place_here("norm.dummyIa.fits"),
                   place_here("norm.dummy4.fits"),
                   apertures="8:10",
                   format="multispec")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy5.fits"),
                   apertures="11:37",
                   format="multispec")
        iraf.scopy(place_here("norm.dummyIIa.fits"),
                   place_here("norm.dummy6.fits"),
                   apertures="38:40",
                   format="multispec")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy7.fits"),
                   apertures="41:50",
                   format="multispec")
        iraf.scopy(reduced_science_files[k],
                   place_here("norm.dummy8.fits"),
                   apertures="51",
                   format="multispec",
                   w2="7591")

        iraf.scombine("@normalization.list.lis",
                      place_here("norm.dummyIII.fits"),
                      group='apertures')

        iraf.fit1d(place_here("norm.dummyIII.fits"),
                   place_here("norm.dummyIV.fits"),
                   naverage=1,
                   axis=1,
                   type="fit",
                   low_rej=0.8,
                   high_rej=2.0,
                   order=7,
                   niterate=4,
                   func="spline3",
                   sample="*")

        iraf.sarith(reduced_science_files[k], "/",
                    place_here("norm.dummyIV.fits"),
                    place_here("norm.dummyV.fits"))

        iraf.fit1d(place_here("norm.dummyV.fits"),
                   merge,
                   naverage=1,
                   axis=1,
                   type="ratio",
                   low_rej=0.2,
                   high_rej=2.0,
                   order=1,
                   niterate=4,
                   func="chebyshev",
                   sample="*")

        iraf.hedit(merge, "INSTRUME", 'TLS-echelle')

        for i in range(len(ranges)):
            apertures = int(ranges[i][0])

            new_name = merge.replace(".fits", "." + str(apertures) + ".fits")

            input1 = os.path.join(current_directory, merge)
            output = os.path.join(current_directory, new_name)

            iraf.scopy(input1,
                       output,
                       w1=ranges[i][1],
                       w2=ranges[i][2],
                       apertur=apertures,
                       format="multispec")

            aperturlist.write(new_name + "\n")

        aperturlist.close()

        new_name_merged = reduced_science_files[k].replace(
            ".extracted.fits", ".merged.fits")

        iraf.scombine("@test.norm.apertures.lis", new_name_merged, group='all')

        cut_for_ston = new_name_merged.replace("merged", "cut")
        iraf.scopy(new_name_merged,
                   cut_for_ston,
                   w1=5603,
                   w2=5612,
                   format="multispec",
                   apertures="")
        stddev = iraf.imstat(cut_for_ston,
                             Stdout=1,
                             fields="stddev",
                             format="no")
        ston = 1 / float(stddev[0])

        iraf.hedit(new_name, "STON", ston)

        for i in range(len(ranges)):
            apertures = int(ranges[i][0])
            os.remove(
                os.path.join(
                    merge.replace(".fits", "." + str(apertures) + ".fits")))

        os.remove(os.path.join(current_directory, "test.norm.apertures.lis"))

        remove_file(current_directory, "norm.dummyI.fits")
        remove_file(current_directory, "norm.dummyIa.fits")
        remove_file(current_directory, "norm.dummyII.fits")
        remove_file(current_directory, "norm.dummyIIa.fits")
        remove_file(current_directory, "norm.dummyIII.fits")
        remove_file(current_directory, "norm.dummyIV.fits")
        remove_file(current_directory, "norm.dummyV.fits")
        remove_file(current_directory, "norm.dummy1.fits")
        remove_file(current_directory, "norm.dummy2.fits")
        remove_file(current_directory, "norm.dummy3.fits")
        remove_file(current_directory, "norm.dummy4.fits")
        remove_file(current_directory, "norm.dummy5.fits")
        remove_file(current_directory, "norm.dummy6.fits")
        remove_file(current_directory, "norm.dummy7.fits")
        remove_file(current_directory, "norm.dummy8.fits")
    print("Success! Produced files *merged.fits, *norm.fits")
Exemplo n.º 17
0
def stitch_flats(outputnames, pivots, outstring):
    """Take a list of multispec files and a list of pivots and stitch together a master flat.

    The pivot values are inclusive, so if pivots = [72] then the master flat will contain fibers 1 - 72 from flat #1 and 73 - 109 from flat #2.

    Parameters
    ----------
    
    outputnames : list of str
        Names of the multispec files to stitch together
    
    pivots : list of int
        The fibers that form the borders of the stitch. If outputnames is length N, pivots must be length N - 1
    
    outstring : str
        The special, IRAF scrunch string used to identify intermediate files associated with a **dohydra** run

    Returns
    -------
    
    mastername : str
        The name of the stitched master multispec flat

    Notes
    -----
    The specifics of outstring depend on system and IRAF distribution. See :meth:`GradPak_flatfu.get_scrunch`

    """
    pivots = [0] + pivots + [109]

    tmpfiles = []
    print 'Extracting flat apertures...'
    for i, flat in enumerate(outputnames):
        print '\ttaking {} from {} to {}'.format(flat, pivots[i] + 1,
                                                 pivots[i + 1])
        name = 'tmp{}'.format(flat)
        iraf.scopy(flat,
                   name,
                   apertur='{}-{}'.format(pivots[i] + 1, pivots[i + 1]),
                   w1='INDEF',
                   w2='INDEF',
                   format='multispec',
                   verbose=False)
        tmpfiles.append(name)

    mastername = 'dFlat_master{}.ms.fits'.format(outstring)
    print 'Stitching together master flat {}'.format(mastername)

    iraf.scombine(','.join(tmpfiles),
                  mastername,
                  apertur='',
                  group='apertures',
                  first=True,
                  w1='INDEF',
                  w2='INDEF',
                  dw='INDEF',
                  nw='INDEF',
                  log=False,
                  scale='none',
                  zero='none',
                  weight='none',
                  logfile='flatfu.log')

    for tmp in tmpfiles:
        os.remove(tmp)
    return mastername
Exemplo n.º 18
0
combine_list_norm = ""
combine_list_flux = ""

for im_slice in image_slices:
    image_name = im_slice + "_" + file_name
    if os.path.exists("norm_" + image_name):
        combine_list_norm = combine_list_norm + "norm_" +image_name + ","

        iraf.scopy(
            input = "norm_" + image_name,\
            output = "norm_" + image_name,\
            w1 = spectrum_w1,\
            w2 = spectrum_w2,\
            apertures = "",\
            bands = "",\
            beams = "",\
            apmodulus = 0,\
            format = "multispec",\
            renumber = 0,\
            offset = 0,\
            clobber = 1,\
            merge = 0,\
            rebin= 1,\
            verbose = 1)

    if os.path.exists("cray_" + image_name):
        combine_list_flux = combine_list_flux + "cray_" +image_name + ","   
        iraf.scopy(
            input = "cray_" + image_name,\
            output = "cray_" + image_name,\
            w1 = spectrum_w1,\
            w2 = spectrum_w2,\
Exemplo n.º 19
0
def runMultispec(fpath, skyFile=''):
    '''
    Identifies and measures the wavelengths, fluxes, and equivalent widths of
    emission lines in 1D or 2D spectra using Pyraf and ALFA.

    Prompts the user to measure and identify one line, via Pyraf's/Iraf's splot,
    in order to derive a redshift to be used in ALFA.

    BSC todo: update this to reflect the new structure of the whole WRALF

    Args:
    fpath       absolute path to a spectrail .fits file (1D or 2D)
    skyFile     absolute path to a sky spectrum .fits files

    Returns:
    objDF           Pandas dataframe storing global data about each object.
                    One entry per object.
    lineDF          Pandas dataframe storing data on emission lines in each
                    spectrum. One entry per emission line for each spectrum.

    Output Files:
    /1dspectra      directory for 1D spectra generated from the input 2D spectrum
    1dspec.*.fits   a single 1D spectrum generated from the input 2D spectrum;
    globalData.txt  csv output file containing global data about each object for
                    each spectra that the user does not skip
    lineData.txt    same as above, but contains line data for each object
    '''

    datapath, basename = split(fpath)
    fname, __ = splitext(basename)
    outPath_WRALF = join(datapath, 'WRALFoutput')

    #prepare for sky line adjustments if desired
    if skyFile != '':
        try:
            print('\n*****Running a sky line correction. Displaying sky spectrum. ' \
                    'Follow the prompts and measure a sky line.')
            userW, restW, __ = promptLine(skyFile, 'sky')
            deltaW = userW - restW
        except:
            deltaW = 0
    else:
        deltaW = 0
    print('Sky line shift: ', round(deltaW, 6), 'Angstroms')

    ########################################################
    from astropy.io import fits

    #get the name of the data's field from imhead

    hdul = fits.open(fpath)
    header = hdul[0].header

    fieldName = header['OBJECT']

    numAxes = header['NAXIS']

    ########################################################

    #DANGER: clear out old output files before writing anything
    outFile1 = join(outPath_WRALF, fieldName + '_globalData.txt')
    outFile2 = join(outPath_WRALF, fieldName + '_lineData.txt')
    outFile3 = join(outPath_WRALF, fieldName + '_globalData_MR.txt')
    if exists(outFile1):
        remove(outFile1)
    if exists(outFile2):
        remove(outFile2)
    if exists(outFile3):
        remove(outFile3)

    #directories for output files (create if nonexistent)
    specPath = join(outPath_WRALF, fieldName + '_1dspectra')
    if not exists(specPath):
        makedirs(specPath)

    ##Process as a one-dimensional spectrum if there's only one spectrum.
    if numAxes == 1:
        objID = fieldName
        objDF, lineDF = run1Dspec(fpath, objID, fieldName, deltaW)
        return objDF, lineDF

    ##Otherwise, process as a two-dimensional spectrum.

    #assumes that the 2D spectrum shape is [number of spectra, wavelength]
    #so that 1st dimension is # of pixels, and 2nd is # of spectra
    numSpec = header['NAXIS2']

    apIDs = np.empty(numSpec, dtype=object)
    apNums = np.empty(numSpec, dtype=object)
    apFlags = np.empty(numSpec)
    for i in range(1, numSpec + 1):

        curApIDKey = 'APID' + str(i)
        curApNumKey = 'APNUM' + str(i)

        curApIDstr = header[curApIDKey]
        curApNumstr = header[curApNumKey]

        curApID = curApIDstr.split()[0]
        curApNum = curApNumstr.split()[0]
        curApFlag = curApNumstr.split()[1]

        apIDs[i - 1] = curApID
        apNums[i - 1] = curApNum
        apFlags[i - 1] = curApFlag

    #change the aperture flags to a boolean array
    apFlags = apFlags.astype(bool)

    #sort all the Imhead keys according to ascending aperture number; this is the
    #right way to handle a disordered Imheader _iff_ the multispec has correct ordering
    apNums = list(map(int, apNums))  #first, convert apNums to ints
    keys = list(zip(apNums, apIDs, apFlags))
    keys.sort()  #sorts along the first entry, the aperture number
    apNums = np.array([x for x, y, z in keys])
    apIDs = np.array([y for x, y, z in keys])
    apFlags = np.array([z for x, y, z in keys], dtype=bool)
    #again, this sorting will only work downstream _iff_ the multispec has correct ordering.
    #Otherwise, we will have big, unseen problems.

    goodNums = apNums[apFlags]
    goodNames = apIDs[apFlags]

    nSpec = len(goodNums)
    #prompt user for which spectrum to start on
    while (True):
        try:
            startSpec = eval(
                input(
                    '\nThere are ' + str(nSpec) +
                    ' spectra; press enter to start '
                    'at the first spectrum, or enter a number to start at a later spectrum:\n'
                ))
            startSpec = int(
                startSpec)  #trigger a Name/ValueError if not an int
        except (NameError, ValueError):
            print('Invalid input - please enter an integer between ' + str(1) +
                  ' and ' + str(nSpec) + ':\n')
            continue
        except SyntaxError:
            startSpec = 1
        if startSpec > nSpec or startSpec < 1:
            print('Invalid input - please enter an integer between ' + str(1) +
                  ' and ' + str(nSpec) + ':\n')
            continue
        break

    for i in range(startSpec - 1, nSpec):

        ##Run scopy to make a file for the current spectrum.

        objID = goodNames[i]
        #if different objects have the same objID, add 'a' to the latter duplicates
        #e.g. three entries of 2018 will become: '2018', '2018a', '2018aa'
        #BSC todo: this is redundantly called every loop, which could be cleaned up.. but it's fast
        if (goodNames == objID).sum() > 1:
            nameIdx = np.argwhere(goodNames == objID)
            numDups = len(nameIdx)
            for j in range(numDups):
                goodNames[nameIdx[j]] = objID + 'a' * j

        curAp = goodNums[i]

        #run scopy with a default file name - the scopy command then saves a
        #file using this default name to be 1dspec.00##.fits' for ap number ##
        scopyName = '1dspec'
        scopyFile = join(specPath, scopyName)
        #        iraf.scopy(input=fpath, output=specFile, apertures=curAp, clobber='yes')
        iraf.scopy(input=fpath,
                   output=scopyFile,
                   apertures=curAp,
                   format='onedspec',
                   clobber='yes')
        #and can then use the specFile var as before -- though this may be redundant

        #need to pause to let the file write
        sleep(1)

        ##Process the 1D spectrum.
        #pass the path for the 1Dspectrum created by scopy: '1dspec.00##.fits':
        pad_ap = format(curAp, '04d')
        specName = '1dspec.' + pad_ap + '.fits'
        specFile = join(specPath, specName)

        cur_objDF, cur_lineDF = run1Dspec(specFile, objID, fieldName, deltaW)

    import completion
    completion.thanksForPlaying()

    #old/outdated return statements (what were they for??)
    objDF = pd.DataFrame()
    lineDF = pd.DataFrame()
    return objDF, lineDF
Exemplo n.º 20
0
    mclip = 0,\
    lsigma = 0,\
    hsigma = 0,\
    rdnoise = 0,\
    gain = 1,\
    snoise = 0,\
    pclip = -0.5)

iraf.scopy(
    input = "master_smooth.fits",\
    output = "master_smooth.fits",\
    w1 = wave1,\
    w2 = wave2,\
    apertures = "",\
    bands = "",\
    beams = "",\
    apmodulus = 0,\
    format = "multispec",\
    renumber = 0,\
    clobber = 1,\
    merge = 0,\
    rebin = 1,\
    verbose = 1)


# ### Normalise the smooth spectrum
# smooth_min = iraf.imstat(
#     images = "master_smooth.fits[*,1,1]",\
#     fields = "min",\
#     lower = "INDEF",\
#     upper = "INDEF",\
Exemplo n.º 21
0
def cut_apertures(args, infile, outroot, Naps, path='../../spectra'):
    for i in range(Naps):
        iraf.scopy('{0}[sci,1] {1}{2}'.format(infile, outroot, i),
                   apertures=i)
    return
Exemplo n.º 22
0
	if ebvh == 0.0:
		# galaxy and host reddening correction
		print '\033[31m'+'*** correcting spectrum for galactic reddening ***\033[0m'
		ebv = ebvg+ebvh
		print '\033[31m Total E(B-V) = galactic E(B-V) = \033[0m',ebv
		try:
			iraf.unlearn("deredden")
			iraf.dered('sn_dez.fits',"sn_dez_dered.fits", value=ebv, R=3.1, type='E(B-V)',overrid='yes',uncorre='no')
			iraf.dered('sn_dez_err1.fits',"sn_dez_dered_err1.fits", value=ebv, R=3.1, type='E(B-V)',overrid='yes',uncorre='no')
			iraf.dered('sn_dez_err2.fits',"sn_dez_dered_err2.fits", value=ebv, R=3.1, type='E(B-V)',overrid='yes',uncorre='no')
		except:
				print 'WARNING: it is not possible to correct the spectrum for salactic reddenning '
				try:
					iraf.unlearn("scopy")
					iraf.scopy('sn_dez.fits',"sn_dez_dered.fits", w1='INDEF', w2='INDEF',format='multispec')
					iraf.scopy('sn_dez_err1.fits',"sn_dez_dered_err1.fits", w1='INDEF', w2='INDEF',format='multispec')
					iraf.scopy('sn_dez_err2.fits',"sn_dez_dered_err2.fits", w1='INDEF', w2='INDEF',format='multispec')
				except:
					print 'WARNING: problem to copy the spectrum or with the spectrum fits format'
	else:
		#Galaxy reddening correction
		print '\033[31m'+'*** correcting spectrum for galactic reddening ***\033[0m'
		try:
			iraf.dered(sn + '[*,1,1]',"sn_galdered.fits", value=ebvg, R=3.1, type='E(B-V)')
		except:
			print ' WARNING: it is not possible to correct the spectrum for salactic reddenning '
			try:
				iraf.scopy(sn + '[*,1,1]',"sn_galdered.fits", w1='INDEF', w2='INDEF',format='multispec')
			except:
				print ' WARNING: a problem is appeared to copy the spectrum, problems with the spectrum fits format'
Exemplo n.º 23
0
def stitch():
    #Find the mean 3100 and 3125 of blue and uv
    uvhdu = pyfits.open('13dh_uv.fits')
    uvlam = fitshdr_to_wave(uvhdu[0].header)
    w = np.logical_and(uvlam > 3050, uvlam < 3100)
    uvmean = np.median(uvhdu[0].data[1, 0, w])
    uvhdu.close()
    print("uv mean %e" % uvmean)

    bluehdu = pyfits.open('13dh_blue.fits')
    bluelam = fitshdr_to_wave(bluehdu[0].header)
    w = np.logical_and(bluelam > 3050, bluelam < 3100)
    blueuvmean = np.median(bluehdu[0].data[1, 0, w])
    print("blue mean uv %e" % blueuvmean)

    # Find mean of red and blue between 5375 and 5600
    w = np.logical_and(bluelam > 5375, bluelam < 5600)
    blueredmean = bluehdu[0].data[1, 0, w].mean()
    bluehdu.close()
    print("blue mean red %e" % blueredmean)

    redhdu = pyfits.open('13dh_red.fits')
    redlam = fitshdr_to_wave(redhdu[0].header)
    w = np.logical_and(redlam > 5375, redlam < 5600)
    redmean = redhdu[0].data[1, 0, w].mean()
    redhdu.close()
    print("red mean %e" % redmean)

    # trim uv at 3140
    iraf.unlearn(iraf.scopy)
    iraf.scopy('13dh_uv.fits',
               '13dh_uv_trim.fits',
               w1='INDEF',
               w2=3140,
               rebin='no')

    # trim blue at 3130 and 5600
    iraf.unlearn(iraf.scopy)
    iraf.scopy('13dh_blue.fits',
               '13dh_blue_trim.fits',
               w1=3130,
               w2=5600,
               rebin='no')

    # Trim red at 5375
    iraf.unlearn(iraf.scopy)
    iraf.scopy('13dh_red.fits',
               '13dh_red_trim.fits',
               w1=5375,
               w2='INDEF',
               rebin='no')

    # Copy the spectra from the second extraction to the first
    for im in [
            '13dh_uv_trim.fits', '13dh_blue_trim.fits', '13dh_red_trim.fits'
    ]:
        hdu = pyfits.open(im, mode='update')
        hdu[0].data[0, 0, :] = hdu[0].data[1, 0, :]
        hdu.flush()
        hdu.close()

    # Write out the scale factors
    lines = [
        '%f\n' % (blueuvmean / uvmean)**-1, '1.0\n',
        '%f\n' % (blueredmean / redmean)**-1
    ]

    f = open('scales.dat', 'w')
    f.writelines(lines)
    f.close()
    #Scombine with the correct scalings using average
    iraf.unlearn(iraf.scombine)
    iraf.scombine('13dh_uv_trim, 13dh_blue_trim, 13dh_red_trim',
                  '13dh_hst.fits',
                  scale='@scales.dat')
    return
    print('\n' + 'Applying flux calibration to sensitivity function...')
    iraf.calibrate(
        '@speclist',
        'allspeccal',
        obs='lapalma',
        sens=sensfile,
        extinct='no',
        ignoreaps='yes'
    )  ## If extinct='yes', try extinction ='onedstds$ctioextinct.dat'

    print('\n' +
          'Extracting the 1D spectra from the calibrated multispec file...')
    iraf.scopy('allspeccal[0]',
               'allspeccal2',
               format='onedspec',
               w1=4000,
               w2=8000,
               rebin='no')

    print('\n' + '1D spectrum output...')
    iraf.wspectext('allspeccal2.0001', obj + '_' + date + '.txt', header='no')

    print('\n' + 'Tidying up...')
    a = glob.glob('./*')
    for f in a:
        if 'allspec' in f:
            os.remove(f)

if apply_flux_correction != 'n':

    s = np.loadtxt('./ManualExtractionFluxCorrections.txt',
Exemplo n.º 25
0
    if len(file_list) > 0:
        for f in file_list:
            if (date in f) and (SN in f):
                justplot = True

    if justplot == False:

        print 'Extracting spectrum'

        np.savetxt('.' + input_location + 'allspec', [spectrum + '[4]'],
                   fmt="%s")

        ## begin IRAFing

        ### Extract the 1D spectrum
        iraf.scopy(spectrum + '[4]', '.' + input_location + 'allspeccal2')

        iraf.wspectext('.' + input_location + 'allspeccal2',
                       '.' + input_location + '' + SN + '_' + date + '.txt',
                       header='no')
        print 'Removing allspeccal2.fits'
        os.remove('.' + input_location + 'allspeccal2.fits')
    else:
        print 'Skipping spectrum extraction'

    ### Correct the spectrum

    corrections = glob.glob('./CorrectionFiles/*.txt')
    clist = [float(os.path.basename(c)[:-4]) for c in corrections]
    correction_index = np.argmin(abs(np.array(clist) - airmass))
    corloc = './CorrectionFiles/' + str(clist[correction_index]) + '.txt'
Exemplo n.º 26
0
 file_name = []
 for k in tqdm(np.arange(len(analyse['NAME_S1D'])),
               desc="Reducing " + name_star):
     hdu = fits.open(star_folders[i] + "/untar/" +
                     analyse['NAME_S1D'][k])
     file_name.append(
         analyse['NAME_S1D'][k][0:len(analyse['NAME_S1D'][k]) -
                                5])
     print k, star_folders[i] + "/reduction/" + file_name[k]
     scidata = hdu[0].data
     length = len(scidata) - 1
     w_zero = np.where(scidata == 0)
     low_gap = min(w_zero[0] - 1)
     up_gap = max(w_zero[0] + 1)
     iraf.scopy(
         star_folders[i] + "/untar/" + analyse['NAME_S1D'][k] +
         "[0:" + str(low_gap) + "]", star_folders[i] +
         "/reduction/" + file_name[k] + "_blue.fits")
     iraf.scopy(
         star_folders[i] + "/untar/" + analyse['NAME_S1D'][k] +
         "[" + str(up_gap) + ":" + str(length) + "]",
         star_folders[i] + "/reduction/" + file_name[k] +
         "_red.fits")
     iraf.dopcor(star_folders[i] + "/reduction/" +
                 file_name[k] + "_blue.fits",
                 star_folders[i] + "/reduction/" +
                 file_name[k] + "_blue_d.fits",
                 analyse['CCF_RVC'][k],
                 isveloc="yes")
     iraf.dopcor(star_folders[i] + "/reduction/" +
                 file_name[k] + "_red.fits",
                 star_folders[i] + "/reduction/" +
Exemplo n.º 27
0
    print fits_file

    RV_val = entry[3]

    if os.path.exists(fits_file):
        order = 1
        while order < 5:
            try:
                iraf.scopy(
                    input = fits_file,\
                    output = outdir+"temp/"+str(count)+".fits",\
                    w1 = "INDEF",\
                    w2 = "INDEF",\
                    apertures = order,\
                    bands = "*",\
                    beams = "*",\
                    format = "multispec",\
                    renumber = 1,\
                    offset = 0,\
                    clobber = 1,\
                    merge = 0,\
                    rebin = 1,\
                    verbose = 1)

                iraf.dopcor(
                    input = outdir+"temp/"+str(count)+".fits",\
                    output = outdir+"temp/"+str(count)+".fits",\
                    redshift = RV_val,\
                    isvelocity=1,\
                    add = 1,\
                    dispersion = 1,\
Exemplo n.º 28
0
def find_flux_weights(file_name):
    os.chdir(program_dir)

    if functions.read_config_file("COMBINE_APERTURES") == "false":

        no_apertures = eval(functions.read_config_file("NO_APERTURES"))

        multispec_name = "spec_"+file_name

        os.chdir(file_path_reduced)

        ### Load spectrum
        print "Finding best aperture"

        aperture_flux = []
        i = 1
        while i <= no_apertures:
            try:
                os.system("rm -f " + file_path_reduced + "aperture.fits")
                iraf.scopy(
                    input = file_path_reduced + multispec_name,\
                    output = file_path_reduced + "aperture.fits",\
                    w1 = region_w1,\
                    w2 = region_w2,\
                    apertures = i,\
                    bands = "",\
                    beams = "",\
                    apmodulus = 0,\
                    format = "multispec",\
                    renumber = 1,\
                    offset = 0,\
                    clobber = 1,\
                    merge = 1,\
                    rebin = 1,\
                    verbose = 1)

                spectrum_maximum = iraf.imstat(
                    images = file_path_reduced + "aperture.fits",\
                    fields = "midpt",\
                    lower = "INDEF",\
                    upper = "INDEF",\
                    nclip = 1,\
                    lsigma = 5.0,\
                    usigma = 5.0,\
                    binwidth = 0.1,\
                    format = 1,\
                    cache = 1,\
                    mode = "al",\
                    Stdout = 1)

                spectrum_maximum = float(spectrum_maximum[1])
                aperture_flux.append(spectrum_maximum)
                os.system("rm -f " + file_path_reduced + "aperture.fits")

                i = i + 1

            except (IrafError,ValueError):
                print "Aperture " + str(i) + " not found"
                aperture_flux.append(0)
                i = i + 1

        aperture_flux = array(aperture_flux)
        aperture_flux = aperture_flux / sum(aperture_flux)
        os.chdir(program_dir)
    else:
        aperture_flux = array([1.])
    return aperture_flux
Exemplo n.º 29
0
###########################

### Chop out section norm_w1 - norm_w2 (according to param_file)
### This avoids the balmer jump and all the Ca lines
### Which are hard to normalise

os.system("rm trim_" + file_name)

iraf.scopy(
    input = "fluxcal_" + file_name,\
    output = "trim_" + file_name,\
    w1 = norm_w1,\
    w2 = norm_w2,\
    apertures = "*",\
    bands = "",\
    beams = "",\
    apmodulus = 0,\
    format = "multispec",\
    renumber = 0,\
    offset = 0,\
    clobber = 1,\
    merge = 0,\
    rebin = 1,\
    verbose = 1)

### Apply continuum to normalise
os.system("rm norm_" + file_name)

iraf.continuum(
    input = "trim_" + file_name,\
    output = "norm_" + file_name,\
    lines = "*",\
Exemplo n.º 30
0
def stitch_flats(outputnames,pivots,outstring):
    """Take a list of multispec files and a list of pivots and stitch together a master flat.

    The pivot values are inclusive, so if pivots = [72] then the master flat will contain fibers 1 - 72 from flat #1 and 73 - 109 from flat #2.

    Parameters
    ----------
    
    outputnames : list of str
        Names of the multispec files to stitch together
    
    pivots : list of int
        The fibers that form the borders of the stitch. If outputnames is length N, pivots must be length N - 1
    
    outstring : str
        The special, IRAF scrunch string used to identify intermediate files associated with a **dohydra** run

    Returns
    -------
    
    mastername : str
        The name of the stitched master multispec flat

    Notes
    -----
    The specifics of outstring depend on system and IRAF distribution. See :meth:`GradPak_flatfu.get_scrunch`

    """
    pivots = [0] + pivots + [109]

    tmpfiles = []
    print 'Extracting flat apertures...'
    for i, flat in enumerate(outputnames):
        print '\ttaking {} from {} to {}'.format(flat,pivots[i]+1,pivots[i+1])
        name = 'tmp{}'.format(flat)
        iraf.scopy(flat,name,
                   apertur='{}-{}'.format(pivots[i]+1,pivots[i+1]),
                   w1='INDEF',
                   w2='INDEF',
                   format='multispec',
                   verbose=False)
        tmpfiles.append(name)


    mastername = 'dFlat_master{}.ms.fits'.format(outstring)
    print 'Stitching together master flat {}'.format(mastername)    
        
    iraf.scombine(','.join(tmpfiles),mastername,
                  apertur='',
                  group='apertures',
                  first=True,
                  w1='INDEF',
                  w2='INDEF',
                  dw='INDEF',
                  nw='INDEF',
                  log=False,
                  scale='none',
                  zero='none',
                  weight='none',
                  logfile='flatfu.log')

    for tmp in tmpfiles:
        os.remove(tmp)
    return mastername
Exemplo n.º 31
0
    if no_apertures > 1:
        RV_Standards_templist = ""
        RV_Standards = string.split(RV_Standards, ",")
        for star in RV_Standards:
            if not star == "":
                for i in range(1, no_apertures + 1):
                    iraf.scopy(
                        input=star,
                        output=str(i) + "_" + star,
                        w1=0.0,
                        w2=20000,
                        apertures=i,
                        bands="",
                        beams="",
                        apmodulus=0,
                        format="multispec",
                        renumber=1,
                        offset=0,
                        clobber=1,
                        merge=0,
                        rebin=0,
                        verbose=1,
                    )
                    if os.path.exists(str(i) + "_" + star):
                        RV_Standards_templist = RV_Standards_templist + str(i) + "_" + star + ","
            RV_Standards = RV_Standards_templist

    #################
    ### Run fxcor ###
    #################
Exemplo n.º 32
0
            spectrum = im.replace(".fits", ".ms.fits")
            if not os.path.exists(spectrum):
                continue
            print "working spectrum: ", spectrum
            mspec = MultiSpec(im, spectrum)
            objfibers = mspec.hydra_fibselect(ftype=spectype)
            for i, f in enumerate(objfibers):
                finfo = pf.getval(mspec.image, "SLFIB%s" % f)
                name = finfo.split()[4].lower()
                outspec_name = "%s_%s" % (mspec.image.replace(".fits", ""), name)
                obj = pf.getval(mspec.image, "OBJECT").strip().replace(" ", "_")
                log = "{0:25s}{1:15s}{2} {3}".format(outspec_name, night, finfo, obj)
                coords.append(log)
                outspec = os.path.join(outdir, outspec_name)
                if not os.path.exists(outspec):
                    continue
                    iraf.scopy(
                        input=spectrum, output=outspec, w1="INDEF", w2="INDEF", apertures=objfibers[i], clobber="yes"
                    )
                chdir(outdir)
                data = pf.getdata(outspec_name + ".fits")
                h = pf.getheader(outspec_name + ".fits")
                h["FINFO"] = (finfo, "Coordinates info")
                pf.writeto(outspec_name + ".fits", data, h, clobber=True)
                chdir(wdir)

    coordfile = os.path.join(tables_dir, "coords.dat")
    with open(coordfile, "w") as f:
        f.write("\n".join(coords))
    print "End"
Exemplo n.º 33
0
def flux_cal_new(objname, stdobjnames):
    print 'run flux_cal...'
    iraf.imred()
    iraf.kpnoslit()
    stdname, stdmag, stdmagband = get_std_name(stdobjnames[0])
    print 'the standard star is ' + stdname
    stdmag = float(stdmag)
    stdairmass, stdexptime = get_fits_airmass_exptime(stdobjnames[0])
    scripath = sys.argv[0]
    tempindex = scripath.rfind(os.sep)
    scripath = scripath[:tempindex]
    extpath = scripath + os.sep + 'LJextinct.dat'
    calpath = scripath + os.sep + 'standarddir' + os.sep
    outname1 = 'std' + stdobjnames[0]
    stdobjname = ''
    for tempstdname in stdobjnames:
        stdobjname = stdobjname + tempstdname + ','
    stdobjname = stdobjname[:-1]
    if os.path.isfile(outname1):
        print 'file %s is already exist' % outname1
    else:
        print 'run standard...'
        print 'make file %s ...' % outname1
        iraf.standard(input = stdobjname
                , output = outname1, samestar = True, beam_switch = False
                , apertures = '', bandwidth = 30.0, bandsep = 20.0
                , fnuzero = 3.6800000000000E-20, extinction = extpath
                , caldir = calpath, observatory = ')_.observatory'
                , interact = True, graphics = 'stdgraph', cursor = ''
                , star_name = stdname, airmass = stdairmass, exptime = stdexptime
                , mag = stdmag, magband = stdmagband, teff = '', answer = 'yes')
    outname2 = 'sens' + stdobjname
    if os.path.isfile(outname2):
        print 'file %s is already exist' % outname2
    else:
        print 'run sensfunc...'
        print 'make file %s ...' % outname2
        iraf.sensfunc(standards = outname1
                , sensitivity = outname2, apertures = '', ignoreaps = True
                , logfile = 'logfile', extinction = ')_.extinction'
                , newextinction = extpath, observatory = 'bao', function = 'spline3'
                , order = 6, interactive = True, graphs = 'sr'
                , marks = 'plus cross box', colors = '2 1 3 4', cursor =''
                , device = 'stdgraph', answer = 'yes')
    outname3 = 'c' + objname
    if os.path.isfile(outname3):
        print 'file %s is already exist' % outname3
    else:
        print 'run calibrate...'
        print 'make file %s ...' % outname3
        iraf.calibrate(input = objname
                , output = outname3, extinct = True, flux = True
                , extinction = extpath, observatory = 'bao', ignoreaps = True
                , sensitivity = outname2, fnu = False)
    final_outname = 'mark_' + objname
    if os.path.isfile(final_outname):
        print 'file %s is already exist'
    else:
        print 'run scopy'
        print 'make file %s ...' % final_outname
        iraf.scopy(input = outname3
                , output = final_outname, w1 = 'INDEF', w2 = 'INDEF'
                , apertures = '', bands = 1, beams = '', apmodulus = 0
                , format = 'multispec', renumber = False, offset = 0
                , clobber = False, merge = False, rebin = True, verbose = False)
    print 'splot %s' % final_outname
    iraf.splot(images = final_outname)
    return final_outname
Exemplo n.º 34
0
        iraf.calibrate(file, 'f'+file)

#combine spectra
finalcomb = []
for file in os.listdir(os.getcwd()):
    if file.startswith('fds'):
        finalcomb.append(file)


file1 = open('listascombine', 'w')
file1.writelines(["%s\n" % item  for item in finalcomb])
file1.close()

iraf.scombine('@listascombine', 'temp_quick.fits')

iraf.scopy('temp_quick.fits',target+'_quick.fits',w1='4000',w2='9000')

#mv final file in a dedicated folder
shutil.copy(target+'_quick.fits','./output/.')

#remove temp FILES
os.remove('lapalmaextinct.dat')
for file in os.listdir(os.getcwd()):
    if file.endswith('fits'):
        os.remove(file)
    elif file.startswith('lista'):
        os.remove(file)
    elif file.startswith('log'):
        os.remove(file)

Exemplo n.º 35
0
    # ### so that doesn't have to be separated.

    if no_apertures > 1:
        RV_Standards_templist = ""
        RV_Standards = string.split(RV_Standards,",")
        for star in RV_Standards:
            if not star == "":
                for i in range(1,no_apertures+1):
                    iraf.scopy(
                        input = star,\
                        output = str(i)+"_"+star,\
                        w1 = 0.0,\
                        w2 = 20000,\
                        apertures = i,\
                        bands = "",\
                        beams = "",\
                        apmodulus = 0,\
                        format = "multispec",\
                        renumber = 1,\
                        offset = 0,\
                        clobber = 1,\
                        merge = 0,\
                        rebin = 0,\
                        verbose = 1)
                    if os.path.exists(str(i)+"_"+star):
                        RV_Standards_templist = RV_Standards_templist + str(i) + "_" + star + ","
            RV_Standards = RV_Standards_templist

    #################
    ### Run fxcor ###
    #################