예제 #1
0
def darkmaker():
    

    with open_fits_file(biasname) as hdulist:
        bias = hdulist[0].data
    position = 0
    i = 1
    for line in file(inlist):
        fname = outdir+'dsorted'+"{0:03d}".format(position)
        f = open(fname, 'a')
        f.write(line)
        f.close()
        if i == 50:
            i = 0
            position += 1
        i += 1

    os.system('ls '+outdir+'dsorted* >removeindexlist.dat')


    i = 1

    for line in file('removeindexlist.dat'):

        datamatrix = []
        mastermatrix = []
        call = line.strip('\n')
        for line in file(call):
            line = line.strip()
            with open_fits_file(line) as hdulist:
                overscan = extract_overscan(hdulist)
                data = hdulist[0].data[0:2048,20:2068]
                exposure = hdulist[0].header['exposure']
            corrected = (data-np.median(overscan)-bias)/exposure
            datamatrix.append(corrected)
        print np.shape(datamatrix)
        master = np.median(datamatrix, axis=0)
        print i
        mastermatrix.append(master)
        i += 1

    print 'averaging'
    print np.shape(mastermatrix)
    dark = np.mean(mastermatrix, axis=0)
    
    phdu = pyfits.PrimaryHDU(dark)

    outname = outdir+darkname
    command = 'rm -f '+outname
    os.system(command)
    phdu.writeto(outname)

    os.system('rm -f removeindexlist.dat '+outdir+'dsorted*')
예제 #2
0
def reducer():
    with open_fits_file(biasname) as hdulist:
        bias = hdulist[0].data
    with open_fits_file(darkname) as hdulist:
        dark = hdulist[0].data
    with open_fits_file(flatname) as hdulist:
        flat = hdulist[0].data

    pool = ThreadPool()
    fn = partial(reduce_file, bias=bias, dark=dark, flat=flat)
    with open(inlist) as infile:
        filenames = [line.strip() for line in infile]

    pool.map(fn, filenames)
예제 #3
0
def extract_from_file(fname):
    logger.info('Analysing file %s', fname)
    with open_fits_file(fname) as infile:
        header = infile[0].header
        image = infile[0].data

    # sx, sy = parse_overscan_region(header['biassec'])

    mjd = header['mjd']
    left = image[4:, 1:20]
    right = image[4:, -15:]
    exposure = header['exposure']

    airmass = header.get('airmass', 0)
    chstemp = header.get('chstemp', 0)
    ccdtemp = header.get('ccdtemp', 0)
    image_id = header['image_id']
    roof_status_value = header['roofstat']
    roof_open = True if roof_status_value.lower() == 'full open' else False

    return {
            'mjd': mjd,
            'exposure': float(exposure),
            'image_id': image_id,
            'roof_open': roof_open,
            'left': sigma_clipped_mean(left).astype(float),
            'right': sigma_clipped_mean(right).astype(float),
            'airmass': airmass,
            'ccdtemp': ccdtemp,
            'chstemp': chstemp,
            }
예제 #4
0
def sort_scilist(liste):
    fields = []
    scilists = []
    for item in liste:
        with open_fits_file(item) as hdulist:
            try:
                field = hdulist[0].header['OBJECT']
            except:
                field = hdulist[0].header['FIELD']
        if fields.count(field) == 0:
            fields.append(field)
            scilists.append([])
        idx = fields.index(field)
        scilists[idx].append(item)
    return(fields,scilists)
예제 #5
0
def reduce_file(filename, bias, dark, flat):
    with open_fits_file(filename) as hdulist:
        overscan = extract_overscan(hdulist)
        data = hdulist[0].data[0:2048,20:2068]
        exposure = hdulist[0].header['exposure']
        corrected = (data-np.median(overscan)-bias-(dark*exposure))/flat
        path, fname = os.path.split(filename)
        outname = outdir+'proc'+fname.replace('.bz2', '')
        print outname
        hdulist[0].data = corrected
        hdulist[0].header.add_history('Overscan of '+str(np.median(overscan))+' subtracted')
        hdulist[0].header.add_history('Bias subtracted using '+str(biasname))
        hdulist[0].header.add_history('Dark subtracted using '+str(darkname))
        hdulist[0].header.add_history('Flat corrected using '+str(flatname))

        command = 'rm -f '+outname
        os.system(command)
        hdulist.writeto(outname)
        dfile = outdir+'processed.dat'
        f = open(dfile, 'a')
        f.write(outname)
        f.close()
예제 #6
0
def extract_from_file(fname):
    with open_fits_file(fname) as infile:
        header = infile[0].header
        image = infile[0].data

    if not header['imgtype'].strip() == 'DARK':
        return None

    mjd = header['mjd']
    left = image[:, 1:20]
    right = image[:, -20:]

    gain = header['gainfact']
    exptime = header['exposure']

    left_overscan = sigma_clipped_mean(left)
    right_overscan = sigma_clipped_mean(right)

    airmass = header['airmass']
    chstemp = header['chstemp']
    ccdtemp = header['ccdtemp']

    logger.debug('Analysing file %s, gain: %s, exptime: %s, ccdtemp: %s',
                 fname, gain, exptime, ccdtemp)

    central = image[:, 20:-20]
    bias_signal = compute_bias_signal(central, left_overscan, right_overscan)
    dark_current = central - bias_signal
    dark_current_electrons_per_second = dark_current * gain / exptime

    return {
            'mjd': mjd,
            'left': left_overscan.astype(float),
            'right': right_overscan.astype(float),
            'airmass': airmass,
            'ccdtemp': ccdtemp,
            'chstemp': chstemp,
            'dark': sigma_clipped_mean(dark_current).astype(float),
            }
예제 #7
0
def classify_file(filename, logroot):
    out = defaultdict(list)
    with open_fits_file(filename) as hdulist:
        imtype = hdulist[0].header['IMGTYPE']
        action = hdulist[0].header['ACTION']
        try:
            dither = hdulist[0].header['DITHER']
        except:
            dither = 'DISABLED'
    string = "%20s %10s %30s\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), imtype, filename)
    result = write_log(logroot, runnumber, string, 2)
    if imtype == 'IMAGE':
        if dither == 'ENABLED':
            out['dithered'].append(filename)
        else:
            out['science'].append(filename)
    elif imtype == 'DARK':
        out['dark'].append(filename)
    elif imtype == 'BIAS':
        out['bias'].append(filename)
    elif imtype == 'FLAT':
        out['flat'].append(filename)

    return out
예제 #8
0
def reducer():
    os.system('mkdir '+outdir+'flats')
    with open_fits_file(biasname) as hdulist:
        bias = hdulist[0].data
    with open_fits_file(darkname) as hdulist:
        dark = hdulist[0].data    
    with open_fits_file(smname) as hdulist:
        sm = hdulist[0].data    
    os.system('rm -f '+outdir+'datafile.dat')
    os.system('rm -f '+outdir+'variance.fits')
    os.system('rm -f '+outdir+flatname)
    os.system('rm -f '+outdir+'std.fits')
    os.system('rm -f '+outdir+'expdata.dat')
    frameno = 1

    nflat_files = 0
    flat_total = np.zeros(dark.shape)
    datamatrix = []
    expfile = outdir+'expdata.dat'
    for line in file(inlist):
        stripped = line.strip()
        with open_fits_file(stripped) as hdulist:
            header = hdulist[0].header
            overscan = extract_overscan(hdulist)
            data = hdulist[0].data[0:2048,20:2068]
            exposure = header['exposure']
            mjd = header['mjd']
        median_data = np.median(data[:, 20:-20])
        
        f = open(expfile, 'a')
        f.write(str(exposure)+'\n')
        f.close()

        to_include = (exposure >= 3) & (median_data < 40000)
        if not to_include:
            print("Skipping file {fname}, exptime={exptime}, med_data={med}"
                  .format(fname=stripped, exptime=exposure, med=median_data),
                  file=sys.stderr)
        else:

            corrected1 = (data-np.median(overscan)-bias-(dark*exposure))
            flat_total += corrected1
            nflat_files += 1
    
#        corrected2 = corrected1/(1-(sm/exposure))
            
            fmean = np.mean(corrected1)
            fstd = np.std(corrected1)

            normalised = corrected1/fmean
#        normalised = corrected1
            path, fname = os.path.split(stripped)
            outname = outdir+'flats/'+'proc'+fname.replace('.bz2', '')
            dfile = outdir+'datafile.dat'
            f = open(dfile, 'a')
            f.write(str(frameno)+" "+str(fmean)+" "+str(fstd)+" "+str(exposure)+" "+outname)
            f.close()




            datamatrix.append(normalised)
        
     
        
            phdu = pyfits.PrimaryHDU(normalised)
            phdu.header['exposure'] = exposure
            phdu.header['mjd'] = mjd
            command = 'rm -f '+outname
            os.system(command)
            phdu.writeto(outname, clobber=True)
            tfile = outdir+'processed.dat'
            f = open(tfile, 'a')
            f.write(outname)
            f.close()

            frameno += 1

    try:
        frame, means, stds = np.loadtxt(dfile, usecols = (0,1,2), unpack = True)
    except UnboundLocalError as err:
        if 'dfile' in str(err):
            raise RuntimeError("All flats invalid. Pipeline cannot continue"
                               ", original error: {}".format(str(err)))


    wholestd = np.std(datamatrix, axis=0)

    print(np.size(wholestd))
    
    outname = outdir+'std.fits'
    pyfits.PrimaryHDU(wholestd).writeto(outname, clobber=True)
    print('std done')
    variance = 1/(wholestd*wholestd)

    outname = outdir+'variance.fits'
    pyfits.PrimaryHDU(variance).writeto(outname, clobber=True)
    print('var done')
    flat = np.median(datamatrix, axis = 0)


    outname = outdir+flatname
    pyfits.PrimaryHDU(flat).writeto(outname, clobber=True)
    print('flat done')

    render_total_file(flat_total, totalname, nflat_files)