def ImageIter(imagelist, **kargs): """ This is an iterator: giving a list containing the image names, it will return images one by one. Useful to process all the images, one at a time without needing to load them all in memory at the same time. EX: ilist = ['image1.fits', 'image2.fits', ... 'imageN.fits'] images = ImageIter(ilist) for image in images: some processing """ #default extension is 0 ext = kargs.get('ext', 0) #Make sure image list are strings if all([isinstance(i, str) for i in imagelist]): for i in imagelist: yield Image(i, ext) #Or Make sure image list are Path elif all([isinstance(i, Path) for i in imagelist]): for i in imagelist: yield Image(i, ext)
def extractchannel(filelist, channel): """ extractchannel is a utility to read an extension from a multi extension fits file and save it as a fits file. It's usefull if we want to extract only one channel from, for example, a list of MUSE files. Usage: 1)single case example extractchannel('MUSE_WFM_FLAT303_0031.fits','CHAN04') extract CHAN04 extension and create MUSE_WFM_FLAT303_0031_CHAN04.fits 2)multi file example extractchannel('listing.txt','CHAN04') listing.txt is a text file with the listing of all files we want to extract the channel, it can be generated with the 'ls' command, like ls MUSE*WFM*fits > listing.txt 3)filelist is python list with files names ex: flist=['OMEGACAM_100.fits', 'OMEGACAM_102.fits', 'OMEGACAM_103.fits'] extractchannel(filelist, 'CCD_78') TODO: add OUTPATH which points to the directory where the extracted channels will be saved """ #check if filelist is a fits file name if isinstance(filelist,str) and filelist.upper().endswith('.FITS'): try: im = Image(filelist, channel) im.save(filelist[:-5]+'_'+channel+'.fits') except: print('Bad channel designator') #check if filelist is a file elif os.path.isfile(filelist): files = open(filelist, 'r').readlines() for lines in files: try: #need to remove /n and split() generate a list, so #take first element im = Image(lines.split()[0], channel) im.save(lines[:-5]+'_'+channel+'.fits') except: print('Bad channel designator') #check if filelist is a python list of strings finished in fits elif isinstance(filelist,list) and all([isinstance(x, int) for x in filelist]): for files in filelist: try: im = Image(files, channel) im.save(lines[:-5]+'_'+channel+'.fits') except: print('Bad channel designator') else: print("Not valid name or file name list")
def medianstack(filelist, ext=0, **options): """ Compute the median for a list of images. Usefull to elliminate cosmic rays from dark images Syntax: medianstack(filelist) return an image which is the median stack form the files in the current directory example: med = medianstack([dk1, dk2, dk3], 0) compute median for images dk1, dk2 and dk3 using extension 0 TODO: Check if filelist is a list of images o list of strings and then perform the computation accordingly """ #check if filelist is a list, if not raise error try: if (isinstance(filelist, list) and len(filelist) > 1): pass else: raise Exception except: print("Not a list or len < 2") return None #check if elements in filelist is Image or string if all([isinstance(i, Image) for i in filelist]): imagesdata = [i.get_data() for i in filelist] im = filelist[0].copy() im.filename = 'medianstack' im.data = np.median(imagesdata, axis=0) return im if all([isinstance(i, str) for i in filelist]): #copy first image on list to get same dim imagesdata = [Image(i, ext).get_data() for i in filelist] im = Image(filelist[0], ext) im.filename = 'medianstack' im.data = np.median(imagesdata, axis=0) return im elif all([isinstance(i, Path) for i in filelist]): #copy first image on list to get same dim imagesdata = [Image(i, ext).get_data() for i in filelist] im = Image(filelist[0], ext) im.filename = 'medianstack' im.data = np.median(imagesdata, axis=0) return im return None
def stdstack(filelist, ext=0, **options): """ Compute the standard deviation in z direction for a list of images. Syntax: stdstack(filelist) return an image which is the std pixel by pixel for the files in the list example: stdmap = stdstack([dk1, dk2, dk3,..,dkn], 0) compute median for images dk1, dk2, dk3 to dkn using extension 0 """ #check if filelist is a list, if not raise error try: if (isinstance(filelist, list) and len(filelist) > 1): pass else: raise Exception except: print("Not a list or len < 2") return None #check if elements in filelist are Images or string if all([isinstance(i, Image) for i in filelist]): imagesdata = [i.get_data() for i in filelist] im = filelist[0].copy() im.filename = 'stdstack' im.data = np.std(imagesdata, axis=0) return im #check if filelist are files names if all([isinstance(i, str) for i in filelist]): #copy first image on list to get same dim imagesdata = [Image(i, ext).get_data() for i in filelist] im = Image(filelist[0], ext) im.filename = 'stdstack' im.data = np.std(imagesdata, axis=0) return im elif all([isinstance(i, Path) for i in filelist]): #copy first image on list to get same dim imagesdata = [Image(i, ext).get_data() for i in filelist] im = Image(filelist[0], ext) im.filename = 'stdstack' im.data = np.std(imagesdata, axis=0) return im return None
def meanstack(filelist, ext=0, **options): """ Compute the mean for a list of images. Syntax: medianstack(filelist) return an image which is the median stack form the files in the current directory example: med = meanstack([dk1, dk2, dk3], 0) compute median for images dk1, dk2 and dk3 using extension 0 TODO: Check if filelist is a list of images o list of strings and then perform the computation accordingly """ #check if filelist is a list, if not raise error try: if (isinstance(filelist,list) and len(filelist)>1): pass else: raise Exception except: print("Not a list or len < 2") return None #check if elements in filelist is Image or string allimages = [isinstance(i,Image) for i in filelist] if all(allimages): imagesdata = [i.get_data() for i in filelist] im = filelist[0].copy() im.filename = 'meanstack' im.data=np.mean(imagesdata, axis = 0) return im #check if filelist are files names allfiles = [isinstance(i,str) for i in filelist] if all(allfiles): #copy first image on list to get same dim imagesdata = [Image(i,ext).get_data() for i in filelist] im = Image(filelist[0],ext) im.filename = 'meanstack' im.data=np.mean(imagesdata, axis = 0) return im return None
def stdstack(filelist, ext=0, **options): """ Compute the standard deviation in z direction for a list of images. Syntax: stdstack(filelist) return an image which is the std pixel by pixel for the files in the list example: stdmap = stdstack([dk1, dk2, dk3,..,dkn], 0) compute median for images dk1, dk2, dk3 to dkn using extension 0 """ #check if filelist is a list, if not raise error try: if (isinstance(filelist,list) and len(filelist)>1): pass else: raise Exception except: print("Not a list or len < 2") return None #check if elements in filelist are Images or string allimages = [isinstance(i,Image) for i in filelist] if all(allimages): imagesdata = [i.get_data() for i in filelist] im = filelist[0].copy() im.filename = 'stdstack' im.data=np.std(imagesdata, axis = 0) return im #check if filelist are files names allfiles = [isinstance(i,str) for i in filelist] if all(allfiles): #copy first image on list to get same dim imagesdata = [Image(i,ext).get_data() for i in filelist] im = Image(filelist[0],ext) im.filename = 'stdstack' im.data=np.std(imagesdata, axis = 0) return im return None
def linearity_residual(imagelist, *coor, **kargs): """ Compute linearity residual using an image list starting with 2 bias and then pairs of FF at diferent levels LR = 100*(1 -(Sm/Tm)/(S/t)) TODO: need to complete!! """ MAXSIGNAL = kargs.get('MAXSIGNAL', 65535.0) VERBOSE = kargs.get('VERBOSE', False) # read coordinates of first image x1, x2, y1, y2 = Image(imagelist[0], ext).get_windowcoor(*coor)
def ptc_2ff2bias(bias1, bias2, ff1, ff2, *coor, **kargs): """ Perform ptc plot with two bias and 2 ff images which have many . ex: ptc_2tdi(b1,tdi1,tdi2,50, 2000, 100, 170) compute CF using 2 bias and 2 FF in an area defined by [50:2000,100:170] plot the ptc curve and compute the CF using a first order polynomia ptc_2tdi(b1,tdi1,tdi2,50, 2000, 100, 170, RETURN=True) compute CF using 2 bias and 2 FF in an area defined by [50:2000,100:170] return the vectors and the CF, using a first order polynomia ptc_2tdi(b1,tdi1,tdi2,50, 2000, 100, 170, ORDER=2) compute CF using 2 bias and 2 FF in an area defined by [50:2000,100:170] plot the ptc curve, and use a polynomia of order 2 The 2 tdi images should have a slope in flux to compute the ptc. To eliminate the FPN, the 'shotnoise' image is computed as the subtraction of two debiased flat field images optional kargs arguments: NSTD (default = 3) Default number of std deviation to elliminate outlayers VERBOSE (default=False) This method can be used on the TestBench """ nstd = kargs.get('SIGMA', 3) # factor to elliminate outlayers order = kargs.get('ORDER', 2) axis = kargs.get('AXIS', 1) # make computation along columns ext = kargs.get('ext', 0) x1 = coor[0] x2 = coor[1] y1 = coor[2] y2 = coor[3] ff1w = Image(ff1, ext=ext).crop(x1, x2, y1, y2).astype(float) ff2w = Image(ff2, ext=ext).crop(x1, x2, y1, y2).astype(float) bias1w = Image(bias1, ext=ext).crop(x1, x2, y1, y2).astype(float) bias2w = Image(bias2, ext=ext).crop(x1, x2, y1, y2).astype(float) bias_mean = sigma_clip(bias1w + bias2w, axis=axis).mean(axis=axis) / 2.0 bias_var = 0.5 * sigma_clip(bias1w - bias2w, axis=axis).std(axis=axis)**2 signal_mean = sigma_clip(ff1w + ff2w, axis=axis).mean(axis=axis) / 2.0 - bias_mean ratioffs = sigma_clip(ff1w, axis=axis).mean(axis=axis) / sigma_clip( ff2w, axis=axis).mean(axis=axis) total_var = sigma_clip(ff1w - ratioffs * ff2w, axis=axis).std(axis=axis)**2 signal_var = (total_var - 2 * bias_var) / 2.0 coefts = np.polyfit(signal_mean, signal_var, order) polycoef = np.poly1d(coefts) var_fitted = np.polyval(polycoef, signal_mean[:]) # if RETURN equal True, return signal_masked, variance masked, fitted variance and CF if kargs.get('RETURN', False): if order == 1: return signal_mean, signal_var, var_fitted, coefts else: return signal_mean, signal_var, var_fitted, coefts else: fig = plt.figure() # create a figure object ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure ax.plot(signal_mean, signal_var, '.b', signal_mean, var_fitted, 'r') ax.grid() title = 'PTC CF=%f' if order == 2: title = title % (1 / coefts[1]) else: title = title % (1 / coefts[0]) ax.set_title(title) ax.set_ylabel('Variance [ADU]**2') ax.set_xlabel('Signal [ADU]')
def ptc_ffpairs_mw(imagelist, *coor, **kargs): """ Perform ptc plot for pairs of ff at same level in multiple windows. The pairs of ff should have the same light level. The first 2 images in the list must be bias To eliminate the FPN, the 'shotnoise' image is computed as the subtraction of two debiased flat field images optional kargs arguments: FACTOR (default = 2.0) MAXSIGNAL (default 65535) => compute PTC only for signal values less than MAXSIGNAL VERBOSE (default=False) ==> print out table with signal and variance CLIP (default=True) ==> Use clipped statistic to on images before computing CF """ order = kargs.get('ORDER', 2) # order of polynomial regression if order > 2: order = 2 MAXSIGNAL = kargs.get('MAXSIGNAL', 65535.0) sigma = kargs.get('SIGMA', 3) ext = kargs.get('ext', 0) x1 = coor[0] x2 = coor[1] y1 = coor[2] y2 = coor[3] oddimageindex = list(range(3, len(imagelist), 2)) evenimageindex = list(range(2, len(imagelist), 2)) # Read in bias1 and bias2 b1 = Image(imagelist[0], ext).crop(x1, x2, y1, y2).get_data() b2 = Image(imagelist[1], ext).crop(x1, x2, y1, y2).get_data() biasRON = sigma_clip(b1 - b2).std() / np.sqrt(2.0) # Separate images in even and odd (crop the images..) ff1list = [ Image(imagelist[i], ext).crop(x1, x2, y1, y2).get_data() for i in oddimageindex ] ff2list = [ Image(imagelist[i], ext).crop(x1, x2, y1, y2).get_data() for i in evenimageindex ] # NHA new implementation with multi windows START # generate auxiliary arrays of nwx * nwy elements and initialize to zero meanff = [] signalvar = [] for ff1, ff2 in zip(ff1list, ff2list): # windows is a generator of subwindows windows = subwindowcoor(0, ff1.shape[0], 0, ff2.shape[0], **kargs) for i, j, xi, xf, yi, yf in windows: win = slice(xi, xf), slice(yi, yf) # compute mean value on each window for normalized ff meanff.append( sigma_clip((ff1[win] + ff2[win]) / 2.0, sigma=sigma).mean() - sigma_clip((b1[win] + b2[win]) / 2.0, sigma=sigma).mean()) # compute standard deviation on each window for normalized ff if kargs.get('NORMFF2', True): ff_diff = ff1[win] - ff2[win] * ( sigma_clip(ff1[win], sigma=sigma).mean() / sigma_clip(ff2[win]).mean()) else: ff_diff = ff1[win] - ff2[win] var_ff_diff = sigma_clip(ff_diff, sigma=sigma).std()**2 # Measure RON from difference of two bias var_ron = (sigma_clip(b1[win] - b2[win], sigma=sigma).std()** 2) / 2.0 signalvar.append(0.5 * (var_ff_diff - 2 * var_ron)) # compute polynomial coeficients meanff = np.array(meanff) signalvar = np.array(signalvar) coefts = np.polyfit(meanff, signalvar, order) polyts = np.poly1d(coefts) variance_fitted = np.polyval(polyts, meanff) fig = plt.figure() # create a figure object ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure ax.set_ylabel('Variance') ax.set_xlabel('Signal') ax.grid(True) ax.set_title('Photon Transfer Curve') # plot variance v/s signal # figure() ax.plot(meanff, signalvar, 'b.') ax.plot(meanff, variance_fitted, 'r-') plt.show() if order == 1: cf = 1 / coefts[0] print( f'Extension: {ext} CF = {cf:2.3f} -e/ADU RON = {cf * biasRON:2.3f}' ) elif order == 2: cf = 1 / coefts[1] print( f'Extension: {ext} CF = {cf:2.3f} -e/ADU RON = {cf * biasRON:2.3f} -e' ) if kargs.get('RETURN', True): return meanff, signalvar
def gain(imagelist, *coor, **kargs): """ Compute the gain of the system using 2 Bias and 2 FF. The procedure divides the window in NWX*NWY subwindows and computes the Gain for each one of them and then computes the mean value and display an histogram. If the windows coordinates are not given, it will use the full CCD. Syntax: gain(imagelist[,xi,xf,yi,yf][,NWX=10][,NWY=10][,VERBOSE=True/False][,SAVE=True][,TITLE='Graph Title'][,RETURN=True/False][, MEDIAN=True/False]) Note: the image list must contain 2 bias and 2 ff in that order! imagelist can be a list of names, a list of Paths or list of images b1,b2= bias images f1,f2= ff images *coor=[xi,xf,yi,yf] = coordinates of the window to analize (should be a flat region) kargs ------- VERBOSE=True => print intermediate values TITLE='Graph Title' => put a title in the graph SAVE=True/False => if True, it saves the graph in pnp format RETURN=True/False => if True, return only ConFAc without plots MEDIAN=True/False => default True, computes median instead of mean NORMFF2=True/False => default False, normalize FF2 level to set its mean level equal to FF1 RATIO=True/FALSE => defaul True. just change the way the FPN is elliminated. Both methods give almost the same rusults NWX= number of windows in X direction (default 10) NWY= number of windows in Y direction (default 10) EXT=image extension to load, default 0 SIGMA = std deviation used by sigma_clip, default=3 """ ext = kargs.get('ext', 0) sigma = kargs.get('SIGMA', 3) if len(imagelist) != 4: print('imagelist len different from 4') return None # if imagelist contains only image names, load them if all([isinstance(i, str) for i in imagelist]): images = [Image(i, ext) for i in imagelist] print(f'Extension={ext}') b1 = images[0] b2 = images[1] ff1 = images[2] ff2 = images[3] # elif all are images, just assign them elif all([isinstance(i, Path) for i in imagelist]): images = [Image(i, ext) for i in imagelist] b1 = images[0] b2 = images[1] ff1 = images[2] ff2 = images[3] # elif all are images, just assign them elif all([isinstance(i, Image) for i in imagelist]): b1 = imagelist[0] b2 = imagelist[1] ff1 = imagelist[2] ff2 = imagelist[3] else: print("Not all objects in image list are Images or filenames") return None x1 = coor[0] x2 = coor[1] y1 = coor[2] y2 = coor[3] b1 = b1.get_data() b2 = b2.get_data() ff1 = ff1.get_data() ff2 = ff2.get_data() nwx = kargs.get('NWX', 10) # set number of windows in x direction nwy = kargs.get('NWY', 10) # set number of windows in y direction if kargs.get('VERBOSE', False): print(f'format images X={b1.shape[0]} pix Y={b1.shape[1]} pix') print( f'Nx:{nwx} Ny:{nwy} X1:{x1} X2:{x2} Y1:{y1} Y2:{y2} WX:{(x2-x1)//nwx} WY:{(y2-y1)//nwy}' ) print('') # generate auxiliary arrays of nwx * nwy elements and initialize to zero meansig = np.zeros((nwx, nwy)) stdbias = np.zeros((nwx, nwy)) cf = np.zeros((nwx, nwy)) stdsig = np.zeros((nwx, nwy)) # windows is a generator of subwindows windows = subwindowcoor(x1, x2, y1, y2, **kargs) for i, j, xi, xf, yi, yf in windows: win = slice(xi, xf), slice(yi, yf) # compute mean value on each window for normalized ff meansig[i, j] = sigma_clip( (ff1[win] + ff2[win]) / 2.0).mean() - sigma_clip( (b1[win] + b2[win]) / 2.0).mean() # compute standard deviation on each window for normalized ff if kargs.get('NORMFF2', False): ff_diff = ff1[win] - ff2[win] * (sigma_clip(ff1[win]).mean() / sigma_clip(ff2[win]).mean()) else: ff_diff = ff1[win] - ff2[win] var_ff_diff = sigma_clip(ff_diff).std()**2 # Measure RON from difference of two bias var_ron = (sigma_clip(b1[win] - b2[win]).std()**2) / 2.0 stdsig[i, j] = (var_ff_diff - 2 * var_ron) cf[i, j] = 2 * meansig[i, j] / stdsig[i, j] # compute CF for each window # compute standard deviation for each window of bias difference stdbias[i, j] = np.sqrt(var_ron) if kargs.get('VERBOSE', False): print( f"X({xi+x1},{xf+x1}) Y({yi+y1},{yf+y2}) Mean:{meansig[i, j]:.2f} stdff:{stdsig[i, j]:.2f} CF:{cf[i, j]:.2f}" ) if kargs.get('MEDIAN', False): ConFac = np.median(cf, axis=None) RON = np.median(stdbias, axis=None) else: ConFac = sigma_clip(cf, sigma=sigma).mean() RON = sigma_clip(stdbias, sigma=sigma).mean() # RON = RMS / sqrt(2) #RON in ADUs RONe = RON * ConFac # RON in electrons # Error in CF estimation is the std/sqrt(number of windows) #CFstd = np.std(cf, axis=None)/np.sqrt(nwx*nwy) CFstd = sigma_clip(cf).std() #np.std(cf, axis=None)/np.sqrt(nwx*nwy) # Check if run as ROUTINE, in that case return only the Conversion Factor and don't continue with plotting if kargs.get('RETURN', False): return ConFac, RONe, np.mean(meansig, axis=None),np.median(meansig, axis=None), \ np.mean(stdsig, axis=None)**2,np.median(stdsig, axis=None)**2,x1, x2, y1, y2 else: plt.figure() print("*******************************************") print(f"*CF ={ConFac:.2f} +/-{CFstd:.2f} e/ADU") print(f"*RON ={RONe:.3f} -e") print(f"*RON ={RON:.3f} ADUs") print("*******************************************") # change shape of cf array to later compute the standard deviation and also make the histogram cf.shape = (nwx * nwy, ) cfstd = np.std(cf, axis=None) plt.clf() plt.hist(cf, range=(ConFac - 3 * cfstd, ConFac + 3 * cfstd), bins=20) plt.figtext(0.15, 0.8, ("CF mean=%5.3f +/-%5.3f e/ADU") % (ConFac, CFstd), fontsize=11, bbox=dict(facecolor='yellow', alpha=0.5)) plt.figtext(0.15, 0.75, ("RON =%6.3f -e") % (RONe), fontsize=11, bbox=dict(facecolor='yellow', alpha=0.5)) plt.figtext(0.15, 0.70, ("Computed @ %6.3f ADUs") % (np.mean(meansig)), fontsize=11, bbox=dict(facecolor='yellow', alpha=0.5)) Title = kargs.get('TITLE', '') plt.title(Title) filetitle = Title.replace(' ', '_') plt.show() if kargs.get('SAVE', False): plt.savefig('ConFac_' + filetitle + '.png')
def ptc_ffpairs(imagelist, *coor, **kargs): """ TODO: Need to be finished !! NHA Perform ptc plot for pairs of ff at same level. The pairs of ff should have the same light level. The first 2 images in the list must be bias To eliminate the FPN, the 'shotnoise' image is computed as the subtraction of two debiased flat field images optional kargs arguments: FACTOR (default = 2.0) MAXSIGNAL (default 65535) => compute PTC only for signal values less than MAXSIGNAL VERBOSE (default=False) ==> print out table with signal and variance CLIP (default=True) ==> Use clipped statistic to on images before computing CF """ order = kargs.get('ORDER', 1) # order of polynomial regression if order > 2: order = 2 MAXSIGNAL = kargs.get('MAXSIGNAL', 65535.0) sigma = kargs.get('SIGMA', 3) ext = kargs.get('ext', 0) x1 = coor[0] x2 = coor[1] y1 = coor[2] y2 = coor[3] oddimageindex = list(range(3, len(imagelist), 2)) evenimageindex = list(range(2, len(imagelist), 2)) # Read in bias1 and bias2 bias1 = Image(imagelist[0], ext).crop(x1, x2, y1, y2).get_data() bias2 = Image(imagelist[1], ext).crop(x1, x2, y1, y2).get_data() biasRON = (sigma_clip(bias2 - bias1).std()) / np.sqrt(2) if kargs.get('DEBUG', False): print(f'biasRON {biasRON}') bias_mean = sigma_clip((bias1 + bias2) / 2.0).mean() if kargs.get('DEBUG', False): print(f'Bias mean: {bias_mean}') # Separate images in even and odd (crop the images..) ff1 = [ Image(imagelist[i], ext).crop(x1, x2, y1, y2).get_data() for i in oddimageindex ] ff2 = [ Image(imagelist[i], ext).crop(x1, x2, y1, y2).get_data() for i in evenimageindex ] factor = [ sigma_clip(image1 / image2).mean() for image1, image2 in zip(ff1, ff2) ] if kargs.get('DEBUG', False): print(f'Factors: {factor}') ff2 = [image2 * factor for factor, image2 in zip(factor, ff2)] signal_mean = [ sigma_clip((image1 + image2) / 2.0).mean() - bias_mean for image1, image2 in zip(ff1, ff2) ] variance = [ sigma_clip(image1 - image2, sigma=sigma).std()**2 for image1, image2 in zip(ff1, ff2) ] Truevariance = [0.5 * (var - 2 * biasRON**2) for var in variance] # Need to sort both signal and variance according to list containing mean signal zipped = zip(signal_mean, Truevariance) zipped_sorted = sorted(zipped) # remove signal,variance pairs where signal is above MAXSIGNAL zipped_sorted = [x for x in zipped_sorted if x[0] <= MAXSIGNAL] # Now we unpack to get back signal and variance sorted signal, variance = zip(*zipped_sorted) if kargs.get('VERBOSE', False): print('Mean signal\tVariance\tCF') for s, v in zip(signal, variance): print(f'{s:6.1f}\t\t{v:6.1f}\t\t{s/v:2.3f}') # compute polynomial coeficients coefts = np.polyfit(signal, variance, order) polyts = np.poly1d(coefts) # compute the fitted values for variance variance_fitted = np.polyval(polyts, signal) # print('Intercept = {}'.format(polyts(0))) fig = plt.figure() # create a figure object ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure ax.set_ylabel('Variance') ax.set_xlabel('Signal') ax.grid(True) ax.set_title('Photon Transfer Curve') # plot variance v/s signal # figure() ax.plot(signal, variance, 'b.') ax.plot(signal, variance_fitted, 'r-') plt.show() cf = 1 / coefts[0] if order == 1: cf = 1 / coefts[0] print( f'Extension: {ext} CF = {cf:2.3f} -e/ADU RON = {cf * biasRON:2.3f}' ) elif order == 2: cf = 1 / coefts[1] print( f'Extension: {ext} CF = {cf:2.3f} -e/ADU RON = {cf * biasRON:2.3f} -e' ) if kargs.get('RETURN', True): return cf, cf * biasRON, coefts
def gain2(imagelist, *coor, **kargs): """ Variation of CCD gain computation Compute the gain of the system using 2 Bias and 2 FF. The procedure devides the window in NWX*NWY subwindows and computes the Gain for each one of them and then computes the mean value and display an histogram. If the windows coordinates are not given, it will use the full CCD. Syntax: gain(imagelist[,xi,xf,yi,yf][,NWX=10][,NWY=10][,VERBOSE=True/False][,SAVE=True][,TITLE='Graph Title'][,RETURN=True/False][, MEDIAN=True/False]) Note: the image list must contain 2 bias and 2 ff in that order! imagelist can be a list of names, a list of Path or list of images b1,b2= bias images f1,f2= ff images *coor=[xi,xf,yi,yf] = coordinates of the window to analize (should be a flat region) kargs ------- VERBOSE=True => print intermediate values TITLE='Graph Title' => put a title in the graph SAVE=True/False => if True, it saves the graph in pnp format RETURN=True/False => if True, return only ConFAc without plots MEDIAN=True/False => default True, computes median instead of mean RATIO=True/FALSE => defaul True. just change the way the FPN is elliminated. Both methods give almost the same rusults NWX= number of windows in X direction (default 10) NWY= number of windows in Y direction (default 10) EXT=image extension to load, default 0 """ ext = kargs.get('ext', 0) if len(imagelist) != 4: print('imagelist len different from 4') return None # if imagelist contains only image names, load them if all([isinstance(i, str) for i in imagelist]): images = [Image(i, ext) for i in imagelist] print(f'Extension={ext}') b1 = images[0] b2 = images[1] ff1 = images[2] ff2 = images[3] elif all([isinstance(i, Path) for i in imagelist]): images = [Image(i, ext) for i in imagelist] print(f'Extension={ext}') b1 = images[0] b2 = images[1] ff1 = images[2] ff2 = images[3] # elif all are images, just assign them elif all([isinstance(i, Image) for i in imagelist]): b1 = imagelist[0] b2 = imagelist[1] ff1 = imagelist[2] ff2 = imagelist[3] else: print("Not all objects in image list are Images or filenames") return None #Check if bias have EXPTIME = 0.0 and FF EXPTIME > 0 testlist = [] testlist.append(b1.header['EXPTIME'] == 0.0) testlist.append(b2.header['EXPTIME'] == 0.0) testlist.append(ff1.header['EXPTIME'] > 0.0) testlist.append(ff2.header['EXPTIME'] > 0.0) print(testlist) if not all(testlist): print('Exposure times for at least one file are not correct') return None nwx = kargs.get('NWX', 10) # set number of windows in x direction nwy = kargs.get('NWY', 10) # set number of windows in y direction x1, x2, y1, y2 = b1.get_windowcoor(*coor) # print(x1,x2,y1,y2) # now work with cropped images, where the signal is more or less flat.... b1 = b1.crop(x1, x2, y1, y2) b2 = b2.crop(x1, x2, y1, y2) ff1 = ff1.crop(x1, x2, y1, y2) ff2 = ff2.crop(x1, x2, y1, y2) dbiasff1 = ff1 - b1 # debiased FF1 dbiasff2 = ff2 - b2 # debiased FF2 meanff2 = dbiasff2.mean() # mean signal on FF2 debiased meanff1 = dbiasff1.mean() # mean signal on FF1 debiased #ratio = meanff1/meanff2 # print(ratio) if kargs.get('VERBOSE', False): print(f'format images X={b1.shape[0]} pix Y={b1.shape[1]} pix') print( f'Nx:{nwx} Ny:{nwy} X1:{x1} X2:{x2} Y1:{y1} Y2:{y2} WX:{(x2-x1)//nwx} WY:{(y2-y1)//nwy}' ) print('') print(f'meanff2 ={meanff2}') #dbiasff2 = dbiasff2*ratio #dbias_ff_diff = dbiasff1 - dbiasff2 #dbias_ff_sig = (dbiasff1 + dbiasff2)/2.0 # from Fabrice Chisthen dbias_ff_sig = (dbiasff1 / dbiasff2) * meanff2 # compute difference of 2 bias to get the RON dbias = b1 - b2 # generate auxiliary arrays of nwx * nwy elements and initialize to zero meansig = np.zeros((nwx, nwy)) stdff = np.zeros((nwx, nwy)) stdbias = np.zeros((nwx, nwy)) cf = np.zeros((nwx, nwy)) signal = (dbiasff1 / dbiasff2) stdsig = np.zeros((nwx, nwy)) # windows is a generator of subwindows windows = subwindowcoor(0, b1.shape[0], 0, b1.shape[1], **kargs) for i, j, xi, xf, yi, yf in windows: # compute mean value on each window for normalized ff meansig[i, j] = np.mean(dbias_ff_sig[xi:xf, yi:yf]) # compute standard deviation on each window for normalized ff stdsig[i, j] = np.std(dbias_ff_sig[xi:xf, yi:yf]) / np.sqrt(2.0) cf[i, j] = meansig[i, j] / (stdsig[i, j]**2) # compute CF for each window # compute standard deviation for each window of bias difference stdbias[i, j] = np.std(dbias[xi:xf, yi:yf]) / np.sqrt(2.0) if kargs.get('VERBOSE', False): print( f"X({xi+x1},{xf+x1}) Y({yi+y1},{yf+y2}) Mean:{meansig[i, j]:.2f} stdff:{stdsig[i, j]:.2f} CF:{cf[i, j]:.2f}" ) if kargs.get('MEDIAN', True): ConFac = np.median(cf, axis=None) RON = np.median(stdbias, axis=None) else: ConFac = np.mean(cf, axis=None) RON = np.mean(stdbias, axis=None) # RON in ADUs # RON = RMS / sqrt(2) #RON in ADUs RONe = RON * ConFac # RON in electrons # Error in CF estimation is the std/sqrt(number of windows) CFstd = np.std(cf, axis=None) / np.sqrt(nwx * nwy) # Check if run as ROUTINE, in that case return only the Conversion Factor and don't continue with plotting if kargs.get('RETURN', False): return x1, x2, y1, y2, ConFac, RONe, meanff2 else: plt.figure() print("*******************************************") print(f"*CF ={ConFac:.2f} +/-{CFstd:.2f} e/ADU") print(f"*RON ={RONe:.3f} -e") print(f"*RON ={RON:.3f} ADUs") print("*******************************************") # change shape of cf array to later compute the standard deviation and also make the histogram cf.shape = (nwx * nwy, ) cfstd = np.std(cf, axis=None) plt.clf() plt.hist(cf, range=(ConFac - 3 * cfstd, ConFac + 3 * cfstd), bins=20) plt.figtext(0.15, 0.8, ("CF mean=%5.3f +/-%5.3f e/ADU") % (ConFac, CFstd), fontsize=11, bbox=dict(facecolor='yellow', alpha=0.5)) plt.figtext(0.15, 0.75, ("RON =%6.3f -e") % (RONe), fontsize=11, bbox=dict(facecolor='yellow', alpha=0.5)) plt.figtext(0.15, 0.70, ("Computed @ %6.3f ADUs") % (np.mean(meansig)), fontsize=11, bbox=dict(facecolor='yellow', alpha=0.5)) Title = kargs.get('TITLE', '') plt.title(Title) filetitle = Title.replace(' ', '_') plt.show() if kargs.get('SAVE', False): plt.savefig('ConFac_' + filetitle + '.png')
def ptc_pixels(biaslist, fflist, ext=0, *coor, **kargs): """ Perform ptc computation to get gain and RON from a list of bias names and a list of ff images names. The ff images should be the same scene with all possible light levels. An example would be a grism ff on FORS or MUSE To eliminate the FPN, the analysis is done pixel by pixel. optional kargs: LOW: low level in ADUs to compute ptc HIGH: high level in ADU to compute ptc STEP: step in ADUs to compute variance (small steps will slow down the computation) ORDER (default = 1) polynomial order to fit the ptc RETURN (default=FALSE) returns CF and RON Ex: ptc_pixels(biaslist, fflst, 0,2000,300,600, LOW=100, HIGH=50000, STEP=10) signal, var, var_fitted, cf = ptc_pixels( b1, ff1, ff2, 100,200,10, 2000, LOW=100, HIGH=50000, STEP=100, OLAYERS=0.4, RETURN=True) Compute the PTC in the window [100:200,10:2000] from 100ADUs up to 50000 ADUs each 100 ADUs, """ low = kargs.get('LOW', 0) # minimum signal level to explore high = kargs.get('HIGH', 60000) # maximum signal level to explore step = kargs.get('STEP', 100) # step size, minimum is 1 nwx = kargs.get('NWX', 10) # size of windows in X to compute RON nwy = kargs.get('NWY', 10) # size of windows in Y to compute RON # print("Low = {}".format(low)) # print("High = {}".format(high)) # print("Step = {}".format(step)) order = kargs.get('ORDER', 1) # order of polynomial regression if order > 2: order = 2 # print("Order = {}".format(order)) # read biaslist biasimages = [Image(i, ext) for i in biaslist] # read fflist ffimages = [Image(i, ext) for i in fflist] x1, x2, y1, y2 = biasimages[0].get_windowcoor(*coor) # print("{},{},{},{}".format(x1,x2,y1,y2)) # crop bias images biascroped = [i.crop(*coor) for i in biasimages] # compute bias mean biasmean = meanstack(biascroped) # print(biasmean.shape) # compute stdsig = np.zeros((nwx, nwy)) windows = subwindowcoor((x2 - x1) // 2 - 5 * nwx, (x2 - x1) // 2 + 5 * nwx, (y2 - y1) // 2 - 5 * nwy, (y2 - y1) // 2 + 5 * nwy, **kargs) for i, j, xi, xf, yi, yf in windows: stdsig[i, j] = biasmean[xi:xf, yi:yf].std() * np.sqrt(len(biascroped)) # crop ff images ffcroped = [i.crop(*coor) for i in ffimages] # print(ffcroped[0].shape) # debiased ff ffcropdb = [(i - biasmean) for i in ffcroped] ffcropdb_data = [i.get_data() for i in ffcropdb] # compute signal ffsignal = meanstack(ffcropdb) # use only data from images ffsignal_data = ffsignal.get_data() # compute variance of all ffcropdb images along axis 0 ffcropdb_stacked = np.stack(ffcropdb_data) ffvar = ffcropdb_stacked.var(ddof=1, axis=0) # flatten resulting arrays ffsignal_flatten = ffsignal_data.flatten() ffvar_flatten = ffvar.flatten() # print("ffsignal_flatten, ffvar_flatten : {}, {}".format(len(ffsignal_flatten), len(ffvar_flatten))) # convert ffsignal_flatten in integer ffsignal_flatten = ffsignal_flatten.astype( int) # [int(i) for i in ffsignal_flatten] ffvar_flatten = ffvar_flatten.astype(int) # sort ffsignal_flatten and ffvar_flatten # indx = np.argsort(ffsignal_flatten) # ffsignal_flatten = ffsignal_flatten[indx] # ffvar_flatten =ffvar_flatten[indx] # get unique values in ff ffsignal_unique = np.unique(ffsignal_flatten) # print("ffsignal_unique : {}".format(len(ffsignal_unique))) # filter out values lower than LOW and higher than HIGH ffsig_unique = [i for i in ffsignal_unique if i >= low and i <= high] # print("ffsig_unique: {}".format(len(ffsig_unique))) # generate sampling values sampling = list(range(low, len(ffsig_unique), step)) # print("sampling ={}".format(len(sampling))) # create subset of unique values using sampling ffsampled = [ffsig_unique[i] for i in sampling] # print("ffsampled : {}".format(len(ffsampled))) # by default use mean computation for variance if kargs.get('MEDIAN', False): # Compute variance mean for values in ffsignalflatten that are in ffsampled variance = [ np.median(ffvar_flatten[ffsignal_flatten == i]) for i in ffsampled ] else: variance = [ np.mean(ffvar_flatten[ffsignal_flatten == i]) for i in ffsampled ] # filter out the pixels with variance==0 or greater than 200000 variance = np.array(variance) ffsampled = np.array(ffsampled) vfiltered_index = np.where((variance > 0) & (variance < 200000)) ffsampled = ffsampled[vfiltered_index] variance = variance[vfiltered_index] # print(len(ffsignal_unique), len(variance)) plt.scatter(ffsampled[::], variance[::]) plt.grid() plt.show() # compute polynomial without filtering outlayers coefts_nf = np.polyfit(ffsampled, variance, order) polyts_nf = np.poly1d(coefts_nf) # var_fitted = polyts_nf(ffsampled) if order == 2: gain = 1 / coefts_nf[1] print( f"GAIN = {1/coefts_nf[1]} -e/ADU RON = {gain*np.median(stdsig)} -e" ) else: gain = 1 / coefts_nf[0] print( f"GAIN = {1/coefts_nf[0]} -e/ADU RON = {gain*np.median(stdsig)} -e" )
def extractchannel(filelist, channel): """ extractchannel is a utility to read an extension from a multi extension fits file and save it as a fits file. It's usefull if we want to extract only one channel from, for example, a list of MUSE files. Usage: 1)single case example extractchannel('MUSE_WFM_FLAT303_0031.fits','CHAN04') extract CHAN04 extension and create MUSE_WFM_FLAT303_0031_CHAN04.fits 2)multi file example extractchannel('listing.txt','CHAN04') listing.txt is a text file with the listing of all files we want to extract the channel, it can be generated with the 'ls' command, like ls MUSE*WFM*fits > listing.txt 3)filelist is python list with files names ex: flist=['OMEGACAM_100.fits', 'OMEGACAM_102.fits', 'OMEGACAM_103.fits'] extractchannel(filelist, 'CCD_78') TODO: add OUTPATH which points to the directory where the extracted channels will be saved """ #check if filelist is a fits file name if isinstance(filelist, str) and filelist.upper().endswith('.FITS'): try: im = Image(filelist, channel) im.save(filelist[:-5] + '_' + channel + '.fits') except: print('Bad channel designator') #check if filelist is a file elif os.path.isfile(filelist): files = open(filelist, 'r').readlines() for lines in files: try: #need to remove /n and split() generate a list, so #take first element im = Image(lines.split()[0], channel) im.save(lines[:-5] + '_' + channel + '.fits') except: print('Bad channel designator') #check if filelist is a python list of strings finished in fits elif isinstance(filelist, list) and all( [isinstance(x, int) for x in filelist]): for files in filelist: try: im = Image(files, channel) im.save(lines[:-5] + '_' + channel + '.fits') except: print('Bad channel designator') else: print("Not valid name or file name list")
def ptc_ffpairs(imagelist, *coor, **kargs): """ TODO: Need to be finished !! NHA Perform ptc plot for pairs of ff at same level. The pairs of ff should have the same light level. The first 2 images in the list must be bias To eliminate the FPN, the 'shotnoise' image is computed as the subtraction of two debiased flat field images optional kargs arguments: FACTOR (default = 2.0) MAXSIGNAL (default 65535) => compute PTC only for signal values less than MAXSIGNAL VERBOSE (default=False) ==> print out table with signal and variance """ order = kargs.get('ORDER', 1) # order of polynomial regression if order > 2: order = 2 MAXSIGNAL = kargs.get('MAXSIGNAL', 65535.0) VERBOSE = kargs.get('VERBOSE', False) ext = kargs.get('EXT', 0) # read coordinates of first image x1, x2, y1, y2 = Image(imagelist[0], ext).get_windowcoor(*coor) oddimageindex = list(range(3, len(imagelist), 2)) evenimageindex = list(range(2, len(imagelist), 2)) # Read in bias1 and bias2 bias1 = Image(imagelist[0], ext).crop(x1, x2, y1, y2) bias2 = Image(imagelist[1], ext).crop(x1, x2, y1, y2) bias_dif = bias2 - bias1 # mask out all pixels with value greater or lower than 3*std bias_dif.mask() # Separate images in even and odd (crop the images..) ff1 = [Image(imagelist[i], ext).crop(x1, x2, y1, y2) for i in oddimageindex] ff2 = [Image(imagelist[i], ext).crop(x1, x2, y1, y2) for i in evenimageindex] # remove bias from both ff images ff1d = [(image - bias1) for image in ff1] ff2d = [(image - bias2) for image in ff2] if kargs.get('USE_FFMEAN', False): ffmean = [(image1/image2)*image2.mean() for image1, image2 in zip(ff1d, ff2d)] else: ffmean = [(image1+image2)/2.0 for image1, image2 in zip(ff1d, ff2d)] shotnoise = [(image1 - image2) for image1, image2 in zip(ff1d, ff2d)] signal = [image.mean() for image in ff1d] # ffmean] variance = [image.var()/2.0 for image in shotnoise] # Need to sort both signal and variance according to list containing mean signal zipped = zip(signal, variance) zipped_sorted = sorted(zipped) # remove signal,variance pairs where signal is above MAXSIGNAL zipped_sorted = [x for x in zipped_sorted if x[0] <= MAXSIGNAL] # Now we unpack to get back signal and variance sorted signal, variance = zip(*zipped_sorted) if kargs.get('VERBOSE', False): print('Mean signal Variance') for s, v in zip(signal, variance): print(' {:6.1f} {:6.1f}'.format(s, v)) # compute polynomial coeficients coefts = np.polyfit(signal, variance, order) polyts = np.poly1d(coefts) # compute the fitted values for variance variance_fitted = np.polyval(polyts, signal) # print('Intercept = {}'.format(polyts(0))) fig = plt.figure() # create a figure object ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure ax.set_ylabel('Variance') ax.set_xlabel('Signal') ax.grid(True) ax.set_title('Photon Transfer Curve') # plot variance v/s signal # figure() ax.plot(signal, variance, 'b.') ax.plot(signal, variance_fitted, 'r-') cf = 1/coefts[0] if order == 1: cf = 1/coefts[0] print( 'Extension: {} CF = {:2.3f} -e/ADU RON = {:2.3f}'.format(ext, cf, cf * bias_dif.std()/np.sqrt(2.0))) elif order == 2: cf = 1/coefts[1] print( 'Extension: {} CF = {:2.3f} -e/ADU RON = {:2.3f} -e'.format(ext, cf, cf * bias_dif.std()/np.sqrt(2.0)))