def __init__(self, frame): st, ma, me, nrej = scstdev(frame) self.frame = frame self.min = frame.min() self.max = frame.max() self.std = st self.mean = ma self.median = me
def betalight(names, verbose = False): # Read Images and their headers and reorder in case they were taken with # some other BIAS script than the correct one nimages = len(names) if nimages != 4: print "I need four images. You provided me with %d" % nimages return if verbose: print 'start reading of MEF images' # files is a list of FITS files, already open for use files = [(pyfits.open(x), x) for x in names] # We want to sort the files attending to their decreasing mean value # To to this, we prepare a list of "tuples". Each tuple will contain # three values: the mean value of the file, a reference to the file # itself, and its name. It seems redundant to have the files listed # in two places, but it is not. Python copy references, not whole # values, so it's cheap to do like this. # # You can use a FITS object like a sequence. Each element of the # sequence is a FITS HDU. In ALFOSC files, file[0] is the Primary HDU # and file[1] is the first image (the only one). l = [ (fitsfile[1].data.mean(), fitsfile, name) for (fitsfile, name) in files ] # And now we get the list of sorted files. How? Well, when you sort # a list of tuples, what Python does is: sort using the first element, # and if there's a coincidence, use the second element, and if ... You # get the idea. "l" is a list of tuples having the mean value of the # file as a first element and thus "sorted(l)" will return the tuples # of "l" sorted by mean value. # Then we discard that mean value to create sortedfits. sl = sorted(l, reverse = True) sortednames = [x[2] for x in sl] sortedfits = [x[1] for x in sl] # The we produce a list of the first image (fistfile[1]) data, for # everyone of those sorted FITS files. Alse a list of primary headers. # We assign them also to discrete variables (f1, f2, ... hf1, hf2, ...) # for later use. datalist = [fitsfile[1].data for fitsfile in sortedfits] f1, f2, b1, b2 = datalist headerlist = [fitsfile[1].header for fitsfile in sortedfits] hf1, hf2, hb1, hb2 = headerlist if verbose: print 'end reading of MEF images' # Test that the images are of the same size # We could do it, for example, comparing the shape of the first # data with the second one, and then with the third one, and then # with the fourth one. # That's a direct but a bit cumbersome way to do it. Instead, we # use "set". set is a native type to Python that behaves as... a # set ;). That means it will contain only one copy of a value. Eg: # # >>> set([1, 2, 3, 1, 2, 4, 1, 2, 5]) # set([1, 2, 3, 4, 5]) # # so... if this set of image shapes has more than one element... # at least one of them is different to the others. if len(set(x.shape for x in datalist)) > 1: print 'Images not of same size! Aborted!' return if verbose: print 'Images are the same size' # Cut edges out of the images # # Images should be 101 x 101 pixels, since that is the size of the # image of betalight on alfosc bsize = 16 nareas = int(float(f1.shape[1])/bsize) ysize, xsize = f1.shape c1 = c3 = nareas - 1 c2 = xsize c4 = ysize if xsize < 200 or ysize < 200: cf1 = f1[c3:c4, c1:c2] cf2 = f2[c3:c4, c1:c2] cb1 = b1[c3:c4, c1:c2] cb2 = b2[c3:c4, c1:c2] else: cf1 = f1[50:ysize-50, 50:xsize-50] cf2 = f2[50:ysize-50, 50:xsize-50] cb1 = b1[50:ysize-50, 50:xsize-50] cb2 = b2[50:ysize-50, 50:xsize-50] # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Measure some easy statistical properties for the user to see ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # This empty class is just for convenience. That way we can set arbitrary # attributes to its instances. See below class Dummy(object): pass stats = [] if verbose: print ("%14s" + "%13s"*5) % ("Name", "Min", "Max", "StDev", "Mean", "Median") frmt = "%-14s%13.2f%13.2f%13.2f%13.2f%13.2f" for img, name in zip((cf1, cf2, cb1, cb2), sortednames): st = Dummy() st.min, st.max = img.min(), img.max() st.stdev, st.mean, st.median, st.nrejects = scstdev(img) stats.append(st) if verbose: print frmt % (name, st.min, st.max, st.stdev, st.mean, st.median) if verbose: print # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Check difference of bias frames, should be smaller than the stdev ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; biasdiff = abs(stats[2].mean - stats[3].mean) if biasdiff > stats[2].stdev or biasdiff > stats[3].stdev: print print " Difference of averages of bias frames", biasdiff print " is larger than the standard deviation" print " of either of the bias images ", stats[2].stdev, stats[3].stdev print " Aborted! " return # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Divide image to areas (subimages) of 16x16 pix and ; # ; calculate statistics of individual areas ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; class ImgPiece(object): def __init__(self, frame): st, ma, me, nrej = scstdev(frame) self.frame = frame self.min = frame.min() self.max = frame.max() self.std = st self.mean = ma self.median = me # The original script did this on three loops, what of course is # the obvious way. Python is not lightning fast when it comes to # long loops, but for this small 101x101 (36 16x16 squares) # The original also creates a 3D array shaped (nareas*nareas, 5, 4). # Instead, I create a dict structured like this: # # pieces[n] -> [ImgPiece(flat1[n]), ..., ImgPiece(bias2[n])] # # Where "n" is the number for a 16x16 area pieces = defaultdict(list) for img in (cf1, cf2, cb1, cb2): for ycoord in range(0, nareas): vert = ycoord * bsize row = img[vert: vert + bsize] base = ycoord * nareas for xcoord in range(0, nareas): horiz = xcoord * bsize pieces[base + xcoord].append(ImgPiece(row[:,horiz:horiz + bsize])) # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Calculate COUNTS, BIAS, RON and GAIN for individual areas ; # ; ; # ; gain = ( ( flat1 + flat2 ) - ( bias1 + bias2) ) / ; # ; ( STDEV( flat1 - flat2 )^2 - STDEV(bias1 - bias2 )^2 ) ; # ; ; # ; ron = gain * STDEV( bias1 - bias2 ) / SQRT( 2 ) ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; gaintbl = [] rontbl = [] sqrt2 = sqrt(2) for l in range(0, nareas*nareas): pf1, pf2, pb1, pb2 = pieces[l] stdFlats = (scstdev(pf1.frame - pf2.frame)[0]) stdBias = (scstdev(pb1.frame - pb2.frame)[0]) gaintbl.append( ((pf1.mean+pf2.mean) - (pb1.mean+pb2.mean)) / (stdFlats**2 - stdBias**2) ) rontbl.append( stdBias / sqrt2 ) # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Take the individual measurements of the subimages and ; # ; do sigma clipping on them ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; std, gain, _, nr1 = scstdev(gaintbl) gainerr = std / sqrt(numpy.array(gaintbl).size - nr1) std, mean, _, nr2 = scstdev(rontbl) ron = gain * mean ronerr = gain * std / sqrt(numpy.array(rontbl).size - nr2) # Ok fltmean = numpy.array([(x[0].mean, x[1].mean) for x in pieces.values()]) std, counts, _, nr3 = scstdev(fltmean) counterr = std / sqrt(fltmean.size - nr3) # Ok biasmean = numpy.array([(x[2].mean, x[3].mean) for x in pieces.values()]) std, bias, _, nr4 = scstdev(biasmean) biaserr = std / sqrt(biasmean.size - nr3) # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Print results to screen, these values are the ones going to DB ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; if verbose: print " COUNTS:%7.1f +/- %6.2f" % (counts, counterr) print " BIAS: %7.1f +/- %6.2f" % (bias, biaserr) print " GAIN: %7.4f +/- %6.4f" % (gain, gainerr) print " RON: %7.4f +/- %6.4f" % (ron, ronerr) print results = Dummy() results.counts = counts results.counterr = counterr results.bias = bias results.biaserr = biaserr results.gain = gain results.gainerr = gainerr results.ron = ron results.ronerr = ronerr # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; extract required keywords from the FITS header ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # Obtain the primary HDU headers from the first file hf0 = files[0][0][0].header # 012345678901234567890 # Format for DATE-AVG: 2008-01-22T14:53:12.5 date_avg = hf0['DATE-AVG'] results.date = DateTime(int(date_avg[0:4]), # year int(date_avg[5:7]), # month int(date_avg[8:10]) # day ).mjd results.time = hf0['UT'] results.amp = hf0['AMPLMODE'] results.gmode = hf0['GAINM'] if verbose: print "amp ", results.amp print "gmode ", results.gmode return results
def betalight(names, verbose = False): # Read Images and their headers and reorder in case they were taken with # some other BIAS script than the correct one nimages = len(names) if nimages != 4: print "Requires four images. You provided %d images!" % nimages return if verbose: print 'starts reading of MEF images' files = [(pyfits.open(x), x) for x in names] # We want to sort the files attending to their decreasing mean value # To to this, we prepare a list of "tuples". Each tuple will contain # three values: the mean value of the file, a reference to the file # itself, and its name. It seems redundant to have the files listed # in two places, but it is not. Python copy references, not whole # values, so it's cheap to do like this. # # You can use a FITS object like a sequence. Each element of the # sequence is a FITS HDU. In ALFOSC files, file[0] is the Primary HDU # and file[1] is the first image (the only one). l = [ (fitsfile[1].data.mean(), fitsfile, name) for (fitsfile, name) in files ] # And now we get the list of sorted files. How? Well, when you sort # a list of tuples, what Python does is: sort using the first element, # and if there's a coincidence, use the second element, and if ... You # get the idea. "l" is a list of tuples having the mean value of the # file as a first element and thus "sorted(l)" will return the tuples # of "l" sorted by mean value. # Then we discard that mean value to create sortedfits. sl = sorted(l, reverse = True) sortednames = [x[2] for x in sl] sortedfits = [x[1] for x in sl] # The we produce a list of the first image (fistfile[1]) data, for # everyone of those sorted FITS files. Alse a list of primary headers. # We assign them also to discrete variables (f1, f2, ... hf1, hf2, ...) # for later use. datalist = [fitsfile[1].data for fitsfile in sortedfits] f1, f2, b1, b2 = datalist headerlist = [fitsfile[1].header for fitsfile in sortedfits] hf1, hf2, hb1, hb2 = headerlist if verbose: print 'end reading of MEF images' # Test that the images are of the same size # We could do it, for example, comparing the shape of the first # data with the second one, and then with the third one, and then # with the fourth one. # That's a direct but a bit cumbersome way to do it. Instead, we # use "set". set is a native type to Python that behaves as... a # set ;). That means it will contain only one copy of a value. Eg: # # >>> set([1, 2, 3, 1, 2, 4, 1, 2, 5]) # set([1, 2, 3, 4, 5]) # # so... if this set of image shapes has more than one element... # at least one of them is different to the others. if len(set(x.shape for x in datalist)) > 1: print 'Images not of same size! Aborted!' return if verbose: print 'Images are the same size' # Cut edges out of the images # SMN: check this.. compare xbuf, ybu bsize = 16 nareas = int(float(f1.shape[1])/bsize) ysize, xsize = f1.shape c1 = c3 = nareas - 1 c2 = xsize c4 = ysize c1 = int() if xsize < 200 or ysize < 200: cf1 = f1[c3:c4, c1:c2] cf2 = f2[c3:c4, c1:c2] cb1 = b1[c3:c4, c1:c2] cb2 = b2[c3:c4, c1:c2] else: cf1 = f1[50:ysize-50, 50:xsize-50] cf2 = f2[50:ysize-50, 50:xsize-50] cb1 = b1[50:ysize-50, 50:xsize-50] cb2 = b2[50:ysize-50, 50:xsize-50] # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Measure some easy statistical properties for the user to see ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # This empty class is just for convenience. That way we can set arbitrary # attributes to its instances. See below class Dummy(object): pass stats = [] if verbose: print ("%14s" + "%13s"*5) % ("Name", "Min", "Max", "StDev", "Mean", "Median") frmt = "%-14s%13.2f%13.2f%13.2f%13.2f%13.2f" for img, name in zip((cf1, cf2, cb1, cb2), sortednames): st = Dummy() st.min, st.max = img.min(), img.max() st.stdev, st.mean, st.median, st.nrejects = scstdev(img) stats.append(st) if verbose: print frmt % (name, st.min, st.max, st.stdev, st.mean, st.median) if verbose: print # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Check difference of bias frames, should be smaller than the stdev ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; biasdiff = abs(stats[2].mean - stats[3].mean) if biasdiff > stats[2].stdev or biasdiff > stats[3].stdev: print print " Difference of averages of bias frames", biasdiff print " is larger than the standard deviation" print " of either of the bias images ", stats[2].stdev, stats[3].stdev print " Aborted! " return # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Divide image to areas (subimages) of 16x16 pix and ; # ; calculate statistics of individual areas ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; class ImgPiece(object): def __init__(self, frame): st, ma, me, nrej = scstdev(frame) self.frame = frame self.min = frame.min() self.max = frame.max() self.std = st self.mean = ma self.median = me # The original script did this on three loops, what of course is # the obvious way. Python is not lightning fast when it comes to # long loops, but for this small 101x101 (36 16x16 squares) # The original also creates a 3D array shaped (nareas*nareas, 5, 4). # Instead, I create a dict structured like this: # # pieces[n] -> [ImgPiece(flat1[n]), ..., ImgPiece(bias2[n])] # # Where "n" is the number for a 16x16 area !!!!!!!!!!CHECK THIS SECTION!!!!!!!!!!! pieces = defaultdict(list) for img in (cf1, cf2, cb1, cb2): for ycoord in range(0, nareas): vert = ycoord * bsize row = img[vert: vert + bsize] base = ycoord * nareas for xcoord in range(0, nareas): horiz = xcoord * bsize pieces[base + xcoord].append(ImgPiece(row[:,horiz:horiz + bsize])) # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Calculate COUNTS, BIAS, RON and GAIN for individual areas ; # ; ; # ; gain = ( ( flat1 + flat2 ) - ( bias1 + bias2) ) / ; # ; ( STDEV( flat1 - flat2 )^2 - STDEV(bias1 - bias2 )^2 ) ; # ; ; # ; ron = gain * STDEV( bias1 - bias2 ) / SQRT( 2 ) ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; gaintbl = [] rontbl = [] sqrt2 = sqrt(2) for l in range(0, nareas*nareas): pf1, pf2, pb1, pb2 = pieces[l] stdFlats = (scstdev(pf1.frame - pf2.frame)[0]) stdBias = (scstdev(pb1.frame - pb2.frame)[0]) gaintbl.append( ((pf1.mean+pf2.mean) - (pb1.mean+pb2.mean)) / (stdFlats**2 - stdBias**2) ) rontbl.append( stdBias / sqrt2 ) # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Take the individual measurements of the subimages and ; # ; do sigma clipping on them ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; std, gain, _, nr1 = scstdev(gaintbl) gainerr = std / sqrt(numpy.array(gaintbl).size - nr1) std, mean, _, nr2 = scstdev(rontbl) ron = gain * mean ronerr = gain * std / sqrt(numpy.array(rontbl).size - nr2) # Ok fltmean = numpy.array([(x[0].mean, x[1].mean) for x in pieces.values()]) std, counts, _, nr3 = scstdev(fltmean) counterr = std / sqrt(fltmean.size - nr3) # Ok biasmean = numpy.array([(x[2].mean, x[3].mean) for x in pieces.values()]) std, bias, _, nr4 = scstdev(biasmean) biaserr = std / sqrt(biasmean.size - nr3) # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; Print results to screen, these values are the ones going to DB ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; if verbose: print " COUNTS:%7.1f +/- %6.2f" % (counts, counterr) print " BIAS: %7.1f +/- %6.2f" % (bias, biaserr) print " GAIN: %7.4f +/- %6.4f" % (gain, gainerr) print " RON: %7.4f +/- %6.4f" % (ron, ronerr) print results = Dummy() results.counts = counts results.counterr = counterr results.bias = bias results.biaserr = biaserr results.gain = gain results.gainerr = gainerr results.ron = ron results.ronerr = ronerr # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # ; extract required keywords from the FITS header ; # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; # Obtain the primary HDU headers from the first file hf0 = files[0][0][0].header # 012345678901234567890 # Format for DATE-AVG: 2008-01-22T14:53:12.5 date_avg = hf0['DATE-AVG'] results.date = DateTime(int(date_avg[0:4]), # year int(date_avg[5:7]), # month int(date_avg[8:10]) # day ).mjd results.time = hf0['UT'] #results.amp = hf0['AMPLMODE'] #results.gmode = hf0['GAINM'] #if verbose: # print "amp ", results.amp # print "gmode ", results.gmode return results