예제 #1
0
    def test_compressed_image_data_float32(self):
        n = np.arange(100, dtype='float32')
        hdu = fits.ImageHDU(n)
        comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
        comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
        hdu.writeto(self.temp('uncomp.fits'), checksum=True)
        with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
            assert np.all(hdul[1].data == comp_hdu.data)
            assert np.all(hdul[1].data == hdu.data)
            assert 'CHECKSUM' in hdul[0].header
            assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
            assert 'DATASUM' in hdul[0].header
            assert hdul[0].header['DATASUM'] == '0'

            assert 'CHECKSUM' in hdul[1].header
            assert 'DATASUM' in hdul[1].header

            if not sys.platform.startswith('win32'):
                assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH'
                assert hdul[1]._header['DATASUM'] == '1277667818'

            with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
                header_comp = hdul[1]._header
                header_uncomp = hdul2[1].header
                assert 'ZHECKSUM' in header_comp
                assert 'CHECKSUM' in header_uncomp
                assert header_uncomp['CHECKSUM'] == 'Cgr5FZo2Cdo2CZo2'
                assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
                assert 'ZDATASUM' in header_comp
                assert 'DATASUM' in header_uncomp
                assert header_uncomp['DATASUM'] == '2393636889'
                assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
예제 #2
0
    def test_compressed_image_data_int16(self):
        n = np.arange(100, dtype='int16')
        hdu = fits.ImageHDU(n)
        comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
        comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
        hdu.writeto(self.temp('uncomp.fits'), checksum=True)
        with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
            assert np.all(hdul[1].data == comp_hdu.data)
            assert np.all(hdul[1].data == hdu.data)
            assert 'CHECKSUM' in hdul[0].header
            assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
            assert 'DATASUM' in hdul[0].header
            assert hdul[0].header['DATASUM'] == '0'

            assert 'CHECKSUM' in hdul[1].header
            assert hdul[1]._header['CHECKSUM'] == 'J5cCJ5c9J5cAJ5c9'
            assert 'DATASUM' in hdul[1].header
            assert hdul[1]._header['DATASUM'] == '2453673070'
            assert 'CHECKSUM' in hdul[1].header

            with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
                header_comp = hdul[1]._header
                header_uncomp = hdul2[1].header
                assert 'ZHECKSUM' in header_comp
                assert 'CHECKSUM' in header_uncomp
                assert header_uncomp['CHECKSUM'] == 'ZE94eE91ZE91bE91'
                assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
                assert 'ZDATASUM' in header_comp
                assert 'DATASUM' in header_uncomp
                assert header_uncomp['DATASUM'] == '160565700'
                assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
예제 #3
0
    def test_compressed_image_data_float32(self):
        n = np.arange(100, dtype='float32')
        hdu = fits.ImageHDU(n)
        comp_hdu = fits.CompImageHDU(hdu.data, hdu.header)
        comp_hdu.writeto(self.temp('tmp.fits'), checksum=True)
        hdu.writeto(self.temp('uncomp.fits'), checksum=True)
        with fits.open(self.temp('tmp.fits'), checksum=True) as hdul:
            assert np.all(hdul[1].data == comp_hdu.data)
            assert np.all(hdul[1].data == hdu.data)
            assert 'CHECKSUM' in hdul[0].header
            assert hdul[0].header['CHECKSUM'] == 'D8iBD6ZAD6fAD6ZA'
            assert 'DATASUM' in hdul[0].header
            assert hdul[0].header['DATASUM'] == '0'

            assert 'CHECKSUM' in hdul[1].header
            assert 'DATASUM' in hdul[1].header
            assert 'CHECKSUM' in hdul[1].header
            if sys.platform != 'win32':
                # The checksum ends up being different on Windows, possibly due
                # to slight floating point differences
                # TODO: In Astropy mark these properly as known fail
                assert hdul[1]._header['CHECKSUM'] == 'eATIf3SHe9SHe9SH'
                assert hdul[1]._header['DATASUM'] == '1277667818'

            with fits.open(self.temp('uncomp.fits'), checksum=True) as hdul2:
                header_comp = hdul[1]._header
                header_uncomp = hdul2[1].header
                assert 'ZHECKSUM' in header_comp
                assert 'CHECKSUM' in header_uncomp
                assert header_uncomp['CHECKSUM'] == 'Cgr5FZo2Cdo2CZo2'
                assert header_comp['ZHECKSUM'] == header_uncomp['CHECKSUM']
                assert 'ZDATASUM' in header_comp
                assert 'DATASUM' in header_uncomp
                assert header_uncomp['DATASUM'] == '2393636889'
                assert header_comp['ZDATASUM'] == header_uncomp['DATASUM']
예제 #4
0
    def _test_uint(self, utype, compressed):
        bits = 8 * int(utype[1])
        if platform.architecture()[0] == '64bit' or bits != 64:
            if compressed:
                hdu = fits.CompImageHDU(np.array([-3, -2, -1, 0, 1, 2, 3]))
                hdu_number = 1
            else:
                hdu = fits.PrimaryHDU(np.array([-3, -2, -1, 0, 1, 2, 3]))
                hdu_number = 0
            hdu.scale('int%s' % bits, '', bzero=2**(bits - 1))

            with ignore_warnings():
                hdu.writeto(self.temp('tempfile.fits'), clobber=True)

            with fits.open(self.temp('tempfile.fits'), uint=True) as hdul:
                assert hdul[hdu_number].data.dtype == self.utype_map[utype]
                assert (hdul[hdu_number].data == np.array(
                    [(2**bits) - 3, (2**bits) - 2, (2**bits) - 1, 0, 1, 2, 3],
                    dtype=self.utype_map[utype])).all()

                with ignore_warnings():
                    hdul.writeto(self.temp('tempfile1.fits'), clobber=True)

                with fits.open(self.temp('tempfile1.fits'),
                               uint16=True) as hdul1:
                    d1 = hdul[hdu_number].data
                    d2 = hdul1[hdu_number].data
                    assert (d1 == d2).all()
                    if not compressed:
                        # TODO: Enable these lines if CompImageHDUs ever grow
                        # .section support
                        sec = hdul[hdu_number].section[:1]
                        assert sec.dtype.name == 'uint%s' % bits
                        assert (sec == d1[:1]).all()
예제 #5
0
def _add_hdu(hdus, data, pyfits_compress):
    import pyfits
    if pyfits_compress:
        if len(hdus) == 0:
            hdus.append(pyfits.PrimaryHDU())  # Need a blank PrimaryHDU
        hdu = pyfits.CompImageHDU(data, compressionType=pyfits_compress)
    else:
        if len(hdus) == 0:
            hdu = pyfits.PrimaryHDU(data)
        else:
            hdu = pyfits.ImageHDU(data)
    hdus.append(hdu)
    return hdu
예제 #6
0
    def test_identical_comp_image_hdus(self):
        """Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/189

        For this test we mostly just care that comparing to compressed images
        does not crash, and returns the correct results.  Two compressed images
        will be considered identical if the decompressed data is the same.
        Obviously we test whether or not the same compression was used by
        looking for (or ignoring) header differences.
        """

        data = np.arange(100.0).reshape((10, 10))
        hdu = fits.CompImageHDU(data=data)
        hdu.writeto(self.temp('test.fits'))
        hdula = fits.open(self.temp('test.fits'))
        hdulb = fits.open(self.temp('test.fits'))
        diff = FITSDiff(hdula, hdulb)
        assert diff.identical
예제 #7
0
def esi_reduce(date):

    #Get edge masks from file
    orders_mask = pickle.load(
        open(str(date) + '/Calibs/orders_mask_' + str(date) + '.p', 'rb'))
    background_mask = pickle.load(
        open(str(date) + '/Calibs/background_mask_' + str(date) + '.p', 'rb'))

    #Bias
    bias = pyfits.getdata(str(date) + '/Calibs/bias_' + str(date) +
                          '.fits')[:, 25:2070]

    #Normalized Flat
    flat = pyfits.getdata(
        str(date) + '/Calibs/norm_flat_' + str(date) + '.fits')

    #READ LOG
    im1 = open(str(date) + '/Logs/esi_info_' + str(date) + '.dat', 'r')
    data1 = im1.readlines()
    im1.close()

    filename = []
    dateobs = []
    objname = []
    imgtype = []
    ra = []
    dec = []
    exptime = []
    usable = []

    for line in data1:
        p = line.split()
        filename.append(p[0])
        dateobs.append(p[1])
        objname.append(p[2])
        imgtype.append(p[3])
        ra.append(p[4])
        dec.append(p[5])
        exptime.append(p[6])
        usable.append(p[7])

    #Rewrite in a more convenient array with format array[line][element]
    alldata = []

    for line in range(len(usable)):
        alldata.append([
            filename[line], dateobs[line], objname[line], imgtype[line],
            ra[line], dec[line], exptime[line], usable[line]
        ])

    #Find good files:
    good = []
    for line in range(len(alldata)):
        if "yes" in alldata[line][7]:
            good.append(alldata[line])

    #Find list of objects
    names = []
    for line in range(len(alldata)):
        if ("Object" in alldata[line][3] and float(alldata[line][6]) > 600):
            names.append(alldata[line][2])
    objects = np.array(list(set(names)))
    objects.sort()  #ascending order, modify in place

    #Make directory to hold decosmicified files
    if not os.path.exists(str(date) + '/Calibs/reduced/'):
        os.makedirs(str(date) + '/Calibs/reduced/')

    #Reduce for each object
    for obj_id in objects:

        print "reducing " + str(obj_id) + '...'

        #Find files related to obj_id:
        aobj_id = []
        for line in range(len(good)):
            if str(obj_id) in good[line][2]:
                aobj_id.append(good[line])
        print str(len(aobj_id)), "files for " + str(obj_id)

        if len(aobj_id) == 0:
            continue

        #Write Path to each object's files
        obj_locs = [
            str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
            str(line + 1) + 'decos.fits' for line in range(len(aobj_id))
        ]

        #Read in Data
        all_sci = []
        for line in obj_locs:
            sci = pyfits.getdata(line)[:, 25:2070]
            all_sci.append(sci)

        #Get a mask for remaining cosmic rays; mean combine
        print "averaging with rejection..."
        filtered = sigma_clip(all_sci,
                              axis=0,
                              copy=False,
                              varfunc=np.median,
                              sig=15)
        filtered = (filtered - bias) / flat

        mean = np.mean(filtered, axis=0)
        mean.data[background_mask] = 0

        hdu = pyfits.CompImageHDU(mean.data)
        hdu.writeto(str(date) + '/Calibs/reduced/' + str(obj_id) +
                    '_mean.fits',
                    clobber=True)
        #pdb.set_trace()

    #Reduce lines
    names = []
    for line in range(len(alldata)):
        if ("Line" in alldata[line][3]) or ("*" in alldata[line][2]):
            names.append(alldata[line][2])
    objects = np.array(list(set(names)))
    objects.sort()  #ascending order, modify in place

    #Reduce for each line or star (because not decosmfied)
    for obj_id in objects:

        print "reducing " + str(obj_id) + '...'

        #Find files related to obj_id:
        aobj_id = []
        for line in range(len(good)):
            if str(obj_id) in good[line][2]:
                aobj_id.append(good[line])
        print str(len(aobj_id)), "files for " + str(obj_id)

        if len(aobj_id) == 0:
            continue

        #Write Path to each object's files
        obj_locs = [
            str(date) + '/Raw/' + str(aobj_id[line][0])
            for line in range(len(aobj_id))
        ]

        #Read in Data
        all_sci = []
        for line in obj_locs:
            sci = pyfits.getdata(line)[:, 25:2070]
            sci = (sci - bias) / flat
            all_sci.append(sci)

        #median, not mean, for lines
        mean = np.median(all_sci, axis=0)
        mean[background_mask] = 0
        hdu = pyfits.CompImageHDU(mean)
        hdu.writeto(str(date) + '/Calibs/reduced/' + str(obj_id) +
                    '_mean.fits',
                    clobber=True)

    #Make variance image for each science image:
    #The break in the amplifiers is at 1022-1023. Everything increases at 1023.

    names = []
    for line in range(len(alldata)):
        if ("Object" in alldata[line][3]
                and float(alldata[line][6]) > 600) or ("*"
                                                       in alldata[line][2]):
            names.append(alldata[line][2])
    objects = np.array(list(set(names)))
    objects.sort()  #ascending order, modify in place

    for obj_id in objects:

        print "making variance for " + str(obj_id) + '...'

        #Find files related to obj_id:
        aobj_id = []
        for line in range(len(good)):
            if str(obj_id) in good[line][2]:
                aobj_id.append(good[line])
        print str(len(aobj_id)), "files for " + str(obj_id)

        if len(aobj_id) == 0:
            continue

        #Write Path to each object's files
        obj_locs = [
            str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
            str(line + 1) + 'decos.fits' for line in range(len(aobj_id))
        ]

        #CALCULATE EACH NOISE CONTRIBUTION SEPARATELY

        #Read in Data
        all_var = []
        for line in range(len(obj_locs)):
            im = pyfits.getdata(obj_locs[line])[:, 25:2070]
            mask = bias > im  #to avoid taking sqrt(negative)
            im = im - bias

            rn = np.zeros((4096, 2045))
            #make gain mask:
            right_mask = np.empty(flat.shape, dtype=bool)
            right_mask[:, :1022] = 0
            right_mask[:, 1023:] = 1
            left_mask = -right_mask

            #MAKE A READ NOISE FRAME
            #random bias image

            biases = []
            for line in range(len(good)):
                if "Bias" in good[line][3] and "Bias" in good[line][2]:
                    biases.append(good[line])
            rand_path = str(date) + '/Raw/' + str(
                biases[0][0])  #pick the first bias.
            rand_bias = pyfits.getdata(rand_path)[:, 25:2070]

            read_noise = rand_bias - bias

            #Big enough square to be representative
            left_rn = np.std(read_noise[2000:2100, 920:1020])
            right_rn = np.std(read_noise[2000:2100, 1030:1130])

            rn[left_mask] = left_rn
            rn[right_mask] = right_rn
            #as expected, the read nose is a bit higher on the right side.

            #GAIN AND POISSON NOISE
            #gain on right side == 1.29 e-/DN
            poisson = np.zeros((4096, 2045))
            left_bias = np.mean(bias[2000:2100, 920:1020])
            right_bias = np.mean(bias[2000:2100, 1030:1130])

            left_gain = 1.29

            right_gain = left_gain * left_bias / right_bias
            #as expected, gain a little lower on right side

            poisson[left_mask] = np.sqrt(im[left_mask] / left_gain)
            poisson[right_mask] = np.sqrt(im[right_mask] / right_gain)
            poisson[mask] = np.median(poisson)

            noise = np.sqrt(rn**2 + poisson**2)
            noise = noise / flat

            #variance = 1/noise**2

            #noise = (noise - bias)/flat
            #noise[background_mask] = 0
            all_var.append(noise)
            #write to file
            #fits = pyfits.PrimaryHDU(variance)
            #fits.writeto('Calibs/variance/'+str(obj_id)+'_'+str(line + 1)+'_var.fits', clobber = True)

        #Make directory to hold decosmicified files
        if not os.path.exists(str(date) + '/Calibs/variance/'):
            os.makedirs(str(date) + '/Calibs/variance/')

        #Combine variance images for each object
        tot_noise = np.sqrt(
            np.sum([(siggma / len(all_var))**2 for siggma in all_var], axis=0))
        tot_noise[background_mask] = 0

        fits = pyfits.CompImageHDU(tot_noise)
        fits.writeto(str(date) + '/Calibs/variance/' + str(obj_id) +
                     '_noise.fits',
                     clobber=True)
예제 #8
0
    def generate_fits(self, out_dir='', filename=None, ldcoeffs=None):
        """ Saves the exposure as a HST style fits file.

        :param out_dir: director to save output
        :type out_dir: str
        :param filename: filename to save the fits
        :type filename: str

        :return:
        """

        assert (len(self.reads) == (self.exp_info['NSAMP'])), \
            'Reads {} != NSAMP {}'.format(len(self.reads),
                                          self.exp_info['NSAMP'])

        if filename is None:
            filename = self.exp_info['filename']

        out_path = os.path.join(out_dir, filename)

        science_header = self.generate_science_header(ldcoeffs=ldcoeffs)

        hdulist = fits.HDUList([science_header])

        compression = 'RICE_1'

        for i, (data, header) in enumerate(reversed(self.reads)):
            # compression currently disabled as its producing stripey data
            header.set('SAMPNUM', len(self.reads) - 1 - i)
            read_HDU = fits.ImageHDU(data, header, name='SCI')

            error_array = fits.CompImageHDU(compression_type=compression,
                                            name='ERR')
            """ This array contains 16 independent flags indicating various
            status and problem conditions associated with each corresponding
            pixel in the science image. Each flag has a true (set) or false
            (unset) state and is encoded as a bit in a 16-bit integer word.
            Users are advised that this word should not be interpreted as as
            simple integer, but must be converted to base-2 and each bit
            interpreted as a flag. Table 2.5 lists the WFC3 data quality flags.
            """
            data_quality_array = fits.CompImageHDU(
                compression_type=compression, name='DQ')
            """ This array is present only for IR data. It is a 16-bit integer
            array and contains the number of samples used to derive the
            corresponding pixel values in the science image. For raw and
            intermediate data files, the sample values are set to the number of
            readouts that contributed to the science image. For calibrated
            files, the SAMP array contains the total number of valid samples
            used to compute the final science image pixel value, obtained by
            combining the data from all the readouts and rejecting cosmic
            ray hits and saturated pixels. Similarly, when multiple
            exposures (i.e., REPEAT-OBS) are combined to produce a single
            image, the SAMP array contains the total number of samples
            retained at each pixel for all the exposures.
            """
            samples_HDU = fits.CompImageHDU(compression_type=compression,
                                            name='SAMP')
            """ This array is present only for IR data. This is a
            floating-point array that contains the effective integration
            time associated with each corresponding science image pixel value.
            For raw and intermediate data files, the time value is the total
            integration time of data that contributed to the science image.
            For calibrated datasets, the TIME array contains the combined
            exposure time of the valid readouts or exposures that were used
            to compute the final science image pixel value, after rejection of
            cosmic rays and saturated pixels from the intermediate data.
            """
            integration_time_HDU = fits.CompImageHDU(
                compression_type=compression, name='TIME')

            hdulist.extend([
                read_HDU, error_array, data_quality_array, samples_HDU,
                integration_time_HDU
            ])

        if os.path.isfile(out_path):
            os.system("rm {0}".format(out_path))

        hdulist.writeto(out_path)
예제 #9
0
def esi_cosmic(date):

    #Get edge masks from file
    orders_mask = pickle.load(
        open(str(date) + '/Calibs/orders_mask_' + str(date) + '.p', 'rb'))
    background_mask = pickle.load(
        open(str(date) + '/Calibs/background_mask_' + str(date) + '.p', 'rb'))

    #Bias
    bias = pyfits.getdata(str(date) + '/Calibs/bias_' + str(date) + '.fits')

    #Normalized Flat
    flat = pyfits.getdata(
        str(date) + '/Calibs/norm_flat_' + str(date) + '.fits')

    #READ LOG
    im1 = open(str(date) + '/Logs/esi_info_' + str(date) + '.dat', 'r')
    data1 = im1.readlines()
    im1.close()

    filename = []
    dateobs = []
    objname = []
    imgtype = []
    ra = []
    dec = []
    exptime = []
    usable = []

    for line in data1:
        p = line.split()
        filename.append(p[0])
        dateobs.append(p[1])
        objname.append(p[2])
        imgtype.append(p[3])
        ra.append(p[4])
        dec.append(p[5])
        exptime.append(p[6])
        usable.append(p[7])

    #Rewrite in a more convenient array with format array[line][element]
    alldata = []

    for line in range(len(usable)):
        alldata.append([
            filename[line], dateobs[line], objname[line], imgtype[line],
            ra[line], dec[line], exptime[line], usable[line]
        ])

    #Find good files:
    good = []
    for line in range(len(alldata)):
        if "yes" in alldata[line][7]:
            good.append(alldata[line])

    #Find list of objects:
    names = []
    for line in range(len(alldata)):
        if ("Object" in alldata[line][3]
                and float(alldata[line][6]) > 600) or "*" in alldata[line][2]:
            names.append(alldata[line][2])
    objects = np.array(list(set(names)))
    objects.sort()  #ascending order, modify in place

    #Find list of lines
    names = []
    for line in range(len(alldata)):
        if "Line" in alldata[line][3]:
            names.append(alldata[line][2])
    lines = np.array(list(set(names)))
    lines.sort()  #ascending order, modify in place

    #Find list of stars:
    names = []
    for line in range(len(alldata)):
        if "*" in alldata[line][2]:
            names.append(alldata[line][2])
    stars = np.array(list(set(names)))
    stars.sort()  #ascending order, modify in place

    #Make directory to hold decosmicified files
    if not os.path.exists(str(date) + '/Calibs/cosmicless/'):
        os.makedirs(str(date) + '/Calibs/cosmicless/')

    #Remove cosmics from each lamp
    for obj_id in lines:
        print "reducing " + str(obj_id) + '...'

        #Find files related to this object
        aobj_id = []
        for line in range(len(good)):
            if str(obj_id) in good[line][2]:
                aobj_id.append(good[line])
        print str(len(aobj_id)), "files for " + str(obj_id)

        #Write path to each objects files:
        obj_locs = [
            str(date) + '/Raw/' + str(aobj_id[line][0])
            for line in range(len(aobj_id))
        ]

        #Read in and de-cosmify
        for line in range(len(obj_locs)):

            array, header = cosmics.fromfits(obj_locs[line])
            #array = array - bias #backwards for some reason
            c = cosmics.cosmicsimage(array,
                                     gain=1.29,
                                     readnoise=2.2,
                                     sigclip=6,
                                     objlim=5.0,
                                     sigfrac=0.7,
                                     satlevel=1e4)
            c.run(
                maxiter=3
            )  # can increase up to 4 to improve precision, but takes longer
            cosmics.tofits(
                str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                str(line + 1) + 'decos.fits', c.cleanarray, header)

            #now zip it (to save space)
            f = pyfits.getdata(
                str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                str(line + 1) + 'decos.fits')
            hdu = pyfits.CompImageHDU(f)
            hdu.writeto(str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                        str(line + 1) + 'decos.fits',
                        clobber=True)

    #Remove cosmics from each object
    for obj_id in objects:
        print "reducing " + str(obj_id) + '...'

        #Find files related to this object
        aobj_id = []
        for line in range(len(good)):
            if str(obj_id) in good[line][2]:
                aobj_id.append(good[line])
        print str(len(aobj_id)), "files for " + str(obj_id)

        #Write path to each objects files:
        obj_locs = [
            str(date) + '/Raw/' + str(aobj_id[line][0])
            for line in range(len(aobj_id))
        ]

        #Read in and de-cosmify
        for line in range(len(obj_locs)):

            array, header = cosmics.fromfits(obj_locs[line])
            #array = array - bias #backwards for some reason
            c = cosmics.cosmicsimage(array,
                                     gain=1.29,
                                     readnoise=2.2,
                                     sigclip=3.8,
                                     objlim=3.0,
                                     sigfrac=0.7,
                                     satlevel=-1)
            c.run(
                maxiter=3
            )  # can increase up to 4 to improve precision, but takes longer
            cosmics.tofits(
                str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                str(line + 1) + 'decos.fits', c.cleanarray, header)

            #now zip it (to save space)
            f = pyfits.getdata(
                str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                str(line + 1) + 'decos.fits')
            hdu = pyfits.CompImageHDU(f)
            hdu.writeto(str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                        str(line + 1) + 'decos.fits',
                        clobber=True)

    #Remove cosmics from each star
    for obj_id in stars:
        print "reducing " + str(obj_id) + '...'

        #Find files related to this object
        aobj_id = []
        for line in range(len(good)):
            if str(obj_id) == good[line][2]:
                aobj_id.append(good[line])
        print str(len(aobj_id)), "files for " + str(obj_id)

        #Write path to each objects files:
        obj_locs = [
            str(date) + '/Raw/' + str(aobj_id[line][0])
            for line in range(len(aobj_id))
        ]

        #Read in and de-cosmify
        for line in range(len(obj_locs)):

            array, header = cosmics.fromfits(obj_locs[line])
            #array = array - bias #backwards for some reason
            c = cosmics.cosmicsimage(array,
                                     gain=1.29,
                                     readnoise=2.2,
                                     sigclip=10,
                                     objlim=7.0,
                                     sigfrac=0.7,
                                     satlevel=15000)
            c.run(
                maxiter=3
            )  # can increase up to 4 to improve precision, but takes longer
            cosmics.tofits(
                str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                str(line + 1) + 'decos.fits', c.cleanarray, header)

            #now zip it (to save space)
            f = pyfits.getdata(
                str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                str(line + 1) + 'decos.fits')
            hdu = pyfits.CompImageHDU(f)
            hdu.writeto(str(date) + '/Calibs/cosmicless/' + str(obj_id) + '_' +
                        str(line + 1) + 'decos.fits',
                        clobber=True)