def make_library_image(amp_image, header, outname, fits_list, for_bias=True): a,b = amp_image.shape overscan = [] trimsec = [] bias = re.split('[\[ \] \: \,]', header['BIASSEC'])[1:-1] biassec = [int(t)-((i+1)%2) for i,t in enumerate(bias)] trim = re.split('[\[ \] \: \,]', header['TRIMSEC'])[1:-1] trimsec = [int(t)-((i+1)%2) for i,t in enumerate(trim)] for F in fits_list: overscan.append(biweight_location(F[0].data[biassec[2]:biassec[3], biassec[0]:biassec[1]])) del F[0].data A = [] for j,hdu in enumerate(fits_list): if for_bias: A.append(biweight_filter2d(hdu[0].data[:,i], (25,5),(5,1)) - overscan[j]) else: A.append(hdu[0].data - overscan[j]) amp_image[:,i] = biweight_location(A, axis=(0,)) good = np.isfinite(amp_image[:,i]) amp_image[:,i] = np.interp(np.arange(a), np.arange(a)[good], amp_image[good,i]) for hdu in fits_list: del hdu[0].data hdu.close() hdu = fits.PrimaryHDU(np.array(amp_image[trimsec[2]:trimsec[3], trimsec[0]:trimsec[1]], dtype='float32'), header=header) hdu.header.remove('BIASSEC') hdu.header.remove('TRIMSEC') hdu.header['DATASEC'] = '[%i:%i,%i:%i]' %(1,trimsec[1]-trimsec[0],1,a) hdu.writeto(outname, overwrite=True)
def check_darks(args, amp, folder, masterbias, edge=3, width=10): log = logging.getLogger('characterize') # Create empty lists for the left edge jump, right edge jump, and structure dark_counts = [] # Select only the bias frames that match the input amp, e.g., "RU" sel = [i for i, v in enumerate(args.drk_list) if v.amp == amp] if len(sel) <= 2 or args.quick: func = np.median else: func = biweight_location log.info('Writing masterdark_%s.fits' % (amp)) big_array = np.array( [v.image - masterbias for v in itemgetter(*sel)(args.drk_list)]) masterdark = func(big_array, axis=(0, )) a, b = masterdark.shape hdu = fits.PrimaryHDU(np.array(masterdark, dtype='float32')) hdu.writeto(op.join(folder, 'masterdark_%s.fits' % amp), clobber=True) # Loop through the bias list and measure the jump/structure for am in itemgetter(*sel)(args.drk_list): a, b = am.image.shape dark_counts.append(func(am.image - masterbias) / am.exptime) s = biweight_location(dark_counts) log.info('Average Dark counts/s: %0.5f' % s) return s, masterdark
def imstat(image1, image2, Fiber1, Fiber2, outname, fbins=50, fmax=8.): a,b = image1.shape images = [image1,image2] Fibers = [Fiber1,Fiber2] totstat = np.zeros((2*a,b)) totdist = np.zeros((2*a,b)) for i,image in enumerate(images): dist = np.zeros((a,b)) trace_array = np.array([fiber.trace for fiber in Fibers[i]]) for y in xrange(a): for x in xrange(b): dist[y,x] = np.min(np.abs(trace_array[:,x] - y)) if i==0: totdist[:a,:] = dist totstat[:a,:] = image else: totdist[a:,:] = dist totstat[a:,:] = image frange = np.linspace(0,fmax,fbins+1) totdist = totdist.ravel() totstat = totstat.ravel() stats = np.zeros((fbins,)) for i in xrange(fbins): sel = np.where((totdist>=frange[i]) * (totdist<frange[i+1]))[0] stats[i] = biweight_location(totstat[sel]) plt.figure(figsize=(6,5)) plt.plot(frange[:-1]+np.diff(frange)/2., stats, color=[1.0, 0.2, 0.2], lw=3) plt.xlim([0, fmax]) plt.xlabel('Fiber Distance', fontsize=14) plt.ylabel('Average Value', fontsize=14) plt.xticks(fontsize=12) plt.yticks(fontsize=12) plt.savefig(outname, dpi=150) plt.close()
def check_bias(args, amp, folder, edge=3, width=10): log = logging.getLogger('characterize') # Create empty lists for the left edge jump, right edge jump, and structure left_edge, right_edge, structure, overscan = [], [], [], [] # Select only the bias frames that match the input amp, e.g., "RU" sel = [i for i, v in enumerate(args.bia_list) if v.amp == amp] overscan_list = [[ v.overscan_value for i, v in enumerate(args.bia_list) if v.amp == amp ]] overscan = biweight_location(overscan_list) log.info('Overscan value for %s: %0.3f' % (amp, overscan)) # Loop through the bias list and measure the jump/structure big_array = np.array([v.image for v in itemgetter(*sel)(args.bia_list)]) if args.quick: func = np.median else: func = biweight_location masterbias = func(big_array, axis=(0, )) a, b = masterbias.shape #masterbias = biweight_filter2d(masterbias, (25,5), (3,1)) hdu = fits.PrimaryHDU(np.array(masterbias, dtype='float32')) log.info('Writing masterbias_%s.fits' % (amp)) hdu.writeto(op.join(folder, 'masterbias_%s.fits' % amp), clobber=True) left_edge = func(masterbias[:, edge:edge + width]) right_edge = func(masterbias[:, (b - width - edge):(b - edge)]) structure = func(masterbias[:, edge:(b - edge)], axis=(0, )) log.info('Left edge - Overscan, Right edge - Overscan: %0.3f, %0.3f' % (left_edge, right_edge)) return left_edge, right_edge, structure, overscan, masterbias
def overscansub(frames, amp): for f in frames: overscan_data = f.fits[amp][f.biassec[amp][2]:f.biassec[amp][3], f.biassec[amp][0]:f.biassec[amp][1]] overscan_value = biweight_location(overscan_data.flatten()) #print("Subtracting %0.3f from %s" %(overscan_value, # op.basename(f.indname[amp]))) subtractfits(f, amp, value=overscan_value)
def throughput_fiberextract(Felist, args): nifu = len(Felist) nw = len(Felist[0][0].data[0, :]) xp = np.linspace(0, 1, num=nw) nbspline = 12 a = np.linspace(0, 1, nbspline) knots = np.hstack([0, 0, np.vstack([a, a]).T.ravel(), 1, 1]) b = Bspline(knots, 3) basis = np.array([b(xi) for xi in xp]) B = np.zeros((nifu, nw)) for i in xrange(nifu): spec = biweight_location(Felist[i][0].data, axis=(0, )) mask = np.where((~np.isnan(spec)) * (~np.isinf(spec)) * (spec != 0))[0] sol = np.linalg.lstsq(basis[mask, :], spec[mask])[0] B[i, :] = np.dot(basis, sol) # if args.plot: # pltfile = op.join(args.outfolder, 'spectrum_%i.pdf' %i) # fig = plt.figure(figsize=(8, 6)) # plt.plot(xp, spec) # plt.plot(xp, B[i,:],'r--') # plt.xticks([]) # plt.xlabel('Wavelength') # plt.ylabel('Arbitrary Units') # plt.xlim([0, 1]) # fig.savefig(pltfile, dpi=150) # plt.close() if args.plot: norm = plt.Normalize() colors = plt.cm.viridis(norm(np.arange(nifu) + 1)) pltfile = op.join(args.outfolder, 'IFU_average_spectra.pdf') fig = plt.figure(figsize=(8, 6)) avgB = biweight_location(B, axis=(0, )) for i in xrange(nifu): with np.errstate(divide='ignore'): plt.plot(xp, B[i, :] / avgB, color=colors[i, 0:3], alpha=0.9) plt.xticks([]) plt.xlabel('Wavelength') plt.ylabel('Normalized Units') plt.xlim([0, 1]) plt.ylim([0.5, 1.5]) fig.savefig(pltfile, dpi=150) plt.close() return B, avgB
def biweight_filter_2d(image, width): a, b = image.shape if len(width) > 1: xw = width[0] yw = width[1] else: xw = width[0] yw = width[0] smooth = np.zeros(image.shape) for i in xrange(b): for j in xrange(a): xl = np.max([0, i - xw]) yl = np.max([0, j - yw]) xh = np.min([b - 1, i + xw]) yh = np.min([a - 1, j + yw]) loc = (j - yl) * (xh + 1 - xl) + (i - xl) smooth[j, i] = biweight_location( np.delete(image[yl:yh + 1, xl:xh + 1], (loc))) return smooth
def make_cube_file(args, filename, ifucen, scale, side): if args.instr.lower() == "lrs2": outname = op.join(op.dirname(filename),'Cu' + op.basename(filename)) outname2 = op.join(op.dirname(filename),'Co' + op.basename(filename)) print("Making Cube image for %s" %op.basename(outname)) try: F = fits.open(filename) except IOError: print("Could not open %s" %filename) return None data = F[0].data a,b = data.shape x = np.arange(ifucen[:,1].min()-scale, ifucen[:,1].max()+scale, scale) y = np.arange(ifucen[:,2].min()-scale, ifucen[:,2].max()+scale, scale) xgrid, ygrid = np.meshgrid(x, y) zgrid = np.zeros((b,)+xgrid.shape) for k in xrange(b): for i in xrange(len(x)): for j in xrange(len(y)): d = np.sqrt((ifucen[:,1] - xgrid[j,i])**2 + (ifucen[:,2] - ygrid[j,i])**2) w = np.exp(-1./2.*(d/1.)**2) sel = w > 1e-3 zgrid[k,j,i] = np.sum(data[sel,k]*w[sel])/np.sum(w[sel]) hdu = fits.PrimaryHDU(np.array(zgrid, dtype='float32')) zcol = biweight_location(zgrid[int(b/3):int(2*b/3),:,:],axis=(0,)) hdu.header['CDELT3'] = F[0].header['CDELT1'] hdu.header['CRVAL3'] = F[0].header['CRVAL1'] hdu.header['CRPIX3'] = F[0].header['CRPIX1'] write_to_fits(hdu, outname) hdu = fits.PrimaryHDU(np.array(zcol, dtype='float32')) write_to_fits(hdu, outname2) if args.instr.lower() == "virus": if side == "R": file2 =filename file1 = filename[:-6] + "L.fits" else: return None if not op.exists(file2): print("Could not open %s" %file2) return None if not op.exists(file1): print("Could not open %s" %file1) return None outname = op.join(op.dirname(filename),'Cu' + op.basename(filename)[:-7]+'.fits') outname2 = op.join(op.dirname(filename),'Co' + op.basename(filename)[:-7]+'.fits') print("Making Cube image for %s" %op.basename(outname)) F2 = fits.open(file2) F1 = fits.open(file1) data1 = F1[0].data data2 = F2[0].data a,b = data1.shape data = np.vstack([data1,data2]) if len(data[:,0]) != len(ifucen[:,1]): print("Length of IFUcen file not the same as Fe. Skipping Cube") return None x = np.arange(ifucen[:,1].min()-scale, ifucen[:,1].max()+scale, scale) y = np.arange(ifucen[:,2].min()-scale, ifucen[:,2].max()+scale, scale) xgrid, ygrid = np.meshgrid(x, y) zgrid = np.zeros((b,)+xgrid.shape) for k in xrange(b): for i in xrange(len(x)): for j in xrange(len(y)): d = np.sqrt((ifucen[:,1] - xgrid[j,i])**2 + (ifucen[:,2] - ygrid[j,i])**2) w = np.exp(-1./2.*(d/(2./2.35*2.))**2) sel = w > 1e-4 zgrid[k,j,i] = np.sum(data[sel,k]*w[sel])/np.sum(w[sel]) hdu = fits.PrimaryHDU(np.array(zgrid, dtype='float32')) zcol = biweight_location(zgrid[:,:,:],axis=(0,)) hdu.header['CDELT3'] = F1[0].header['CDELT1'] hdu.header['CRVAL3'] = F1[0].header['CRVAL1'] hdu.header['CRPIX3'] = F1[0].header['CRPIX1'] write_to_fits(hdu, outname) hdu = fits.PrimaryHDU(np.array(zcol, dtype='float32')) write_to_fits(hdu, outname2)
def main(args=None): if args is None: args = parse_args() # Load Star Grid and star names (different data type, and this is a sol(n)) stargrid = np.loadtxt(op.join(args.seddir, 'stargrid-150501.dat', usecols=[1, 2, 3, 4, 8, 9, 10, 11, 12], skiprows=1)) starnames = np.loadtxt(op.join(args.seddir, 'stargrid-150501.dat', usecols=[0], skiprows=1, dtype=str)) # Get MILES wavelength array p = fits.open(op.join(args.seddir, 'miles_spec', 'S'+starnames[0]+'.fits')) waveo = (p[0].header['CRVAL1'] + np.linspace(0, len(p[0].data)-1, len(p[0].data)) * p[0].header['CDELT1']) # Load MILES spectra spectra = load_spectra(waveo, stargrid[:, 0], starnames, args.seddir) # define wavelength wave = np.arange(args.wave_init, args.wave_final+args.bin_size, args.bin_size, dtype=float) # load the data from file data = np.loadtxt(args.filename) shot, ID = np.loadtxt(args.filename, usecols=[0, 1], dtype=int, unpack=True) # Load the priors on Mg and Z P = load_prior(args.seddir) # In case the "plots" directory does not exist mkpath('plots') mkpath('output') # Columns from data and stargrid for u,g,r,i (z is a +1 in loop) cols = [4, 5, 6, 7] # Extinction and wavelength vector for ugriz ext_vector = np.array([4.892, 3.771, 2.723, 2.090, 1.500]) wv_vector = np.array([3556, 4702, 6175, 7489, 8946]) # Guessing at an error vector from modeling and photometry # (no erros provided) mod_err = .02**2 + .02**2 e1 = np.sqrt(.05**2 + .02**2 + mod_err) # u-g errors e2 = np.sqrt(.02**2 + .02**2 + mod_err) # g-r, r-i, i-z errors err_vector = np.array([e1, e2, e2, e2]) # Using input extinction and correcting magnitudes ebv = args.ebv extinction = Cardelli(wave) stargrid[:, 4:9] = stargrid[:, 4:9] + ext_vector*ebv # remove an odd point from the grid sel = ((stargrid[:, 2] > 3.76)*(stargrid[:, 2] < 3.79) * (stargrid[:, 3] > 2.5)*(stargrid[:, 3] < 3.00)) stargrid = stargrid[~sel, :] spectra = spectra[~sel, :] # Calculate color distance chi2 and use for likelihood d = np.zeros((len(data), len(stargrid), 4)) for i, col in enumerate(cols): d[:, :, i] = 1/err_vector[i]*((data[:, col] - data[:, col+1])[:, np.newaxis] - (stargrid[:, col] - stargrid[:, col+1])) dd = d**2 + np.log(2*np.pi*err_vector**2) lnlike = -0.5 * dd.sum(axis=2) chi2 = 1./(len(err_vector)+1)*(d**2).sum(axis=2) # Calculate prior and add to likelihood for probability lnprior = P(stargrid[:, 0], stargrid[:, 1]) lnprob = lnlike + lnprior # Loop through all sources to best fit spectra with errors for lnp, sh, Id, m, chi in zip(lnprob, shot, ID, data[:, 4:6], chi2): bv = np.argsort(lnp)[-3] vmax = lnp[bv] errbounds = 2.5 ind = np.where((vmax - lnp) < errbounds)[0] normspec = [] for i in ind: fac = 10**(-0.4*(m[1] - (stargrid[i, 5]-ext_vector[1]*ebv))) normspec.append(fac * biweight_bin(wave, waveo, spectra[i]) * 10**(-.4*ebv*extinction)) if len(normspec) > 2: avgspec = biweight_location(normspec, axis=(0,)) stdspec = np.sqrt(biweight_midvariance(normspec, axis=(0,))**2 + err_vector[1]**2*avgspec**2) else: avgspec = np.mean(normspec, axis=(0,)) stdspec = 0.2 * avgspec if args.outfolder is None: args.outfolder = 'output' F = np.array([wave, avgspec, stdspec], dtype='float32').swapaxes(0, 1) n, d = F.shape F1 = np.zeros((n+1, d)) F1[1:, :] = F F1[0, :] = [chi[bv], 0, 0] np.savetxt(op.join(args.outfolder, '%06d_%i.txt' % (sh, Id)), F1) if args.make_plot: make_plot(vmax, errbounds, stargrid, lnp, ind, normspec, wave, avgspec, stdspec, wv_vector, sh, Id, m, chi[bv])
def main(): args = parse_args() # If it is lab data, the sub folder is different than if it is HET data if args.lab: subfolder = "camra" print("Looking at LAB data.") else: subfolder = "virus" print("Looking at HET data.") if args.specid is not None: for key in IFUSLOT_DICT: if IFUSLOT_DICT[key][0] == args.specid: ifuslot = key print(ifuslot) flt_names = glob.glob(op.join(args.flt, 'exp*', subfolder, '*' + ifuslot + SPEC[0] + '*.fits')) zro_names = glob.glob(op.join(args.zro, 'exp*', subfolder, '*' + ifuslot + SPEC[0] + '*.fits')) else: flt_names = glob.glob(op.join(args.flt, 'exp*', subfolder, '*' + SPEC[0] + '*.fits')) zro_names = glob.glob(op.join(args.zro, 'exp*', subfolder, '*' + SPEC[0] + '*.fits')) # Sort the flt names, not sure why they aren't sorted from glob.glob, but they aren't! flt_names = sorted(flt_names) npairs = len(flt_names) / 2 nbiases = len(zro_names) if npairs < 1: print("Must have at least two flt images for gain/readnoise measurement.") print("None found in: %s" %(op.join(args.flt, 'exp*', subfolder, '*' + SPEC[0] + '*.fits'))) return None print ( "Examining %d pairs of flt files for gain and readnoise." %( npairs ) ) print ( "Looking at flt files in %s" %( args.flt ) ) print ( "Looking at zro files in %s" %( args.zro ) ) flow = args.flow fhigh = args.fhigh fnum = args.fnum lthresh = 1500 hthresh = 32000 gain = np.zeros((4,npairs)) read = np.zeros((4,npairs)) gainunit = {} readunit = {} readnoiseavg = {} gainhead = {} rdnoisehead = {} spcount = 0 for sp in SPEC: beginning, ending = zro_names[0].split(SPEC[0]) # using first zero frame filenameb1 = beginning + sp + ending p = fits.open(filenameb1) blank, xlow, xhigh, ylow, yhigh, blank = re.split('[: \[ \] ,]',p[0].header['BIASSEC']) xlow = int(xlow) xhigh = int(xhigh) ylow = int(ylow) yhigh = int(yhigh) blank, txlow, txhigh, tylow, tyhigh, blank = re.split('[: \[ \] ,]',p[0].header['TRIMSEC']) txlow = int(txlow)-1 txhigh = int(txhigh) tylow = int(tylow)-1 tyhigh = int(tyhigh) bias1 = np.array(p[0].data.copy(),dtype=np.float) overscan = biweight_location(bias1[ylow:yhigh,xlow:xhigh]) bias1 -= overscan bias1 = bias1[tylow:tyhigh,txlow:txhigh] if args.bin_bias: bias1 = (bias1[0::2,0::2] + bias1[0::2,1::2] + bias1[1::2,0::2] + bias1[1::2,1::2]) beginning, ending = flt_names[0].split(SPEC[0]) # using first zero frame filenameb1 = beginning + sp + ending p = fits.open(filenameb1) blank, fxlow, fxhigh, fylow, fyhigh, blank = re.split('[: \[ \] ,]',p[0].header['BIASSEC']) fxlow = int(fxlow)-1 fxhigh = int(fxhigh) fylow = int(fylow)-1 fyhigh = int(fyhigh) blank, ftxlow, ftxhigh, ftylow, ftyhigh, blank = re.split('[: \[ \] ,]',p[0].header['TRIMSEC']) ftxlow = int(ftxlow)-1 ftxhigh = int(ftxhigh) ftylow = int(ftylow)-1 ftyhigh = int(ftyhigh) mf1 = np.zeros((npairs,)) mf2 = np.zeros((npairs,)) if args.fiber_trace: beginning, ending = flt_names[0].split(SPEC[0]) # looping through the first of the frames filenamef1 = beginning + sp + ending print("Calculating trace from: %s" %(filenamef1)) p = fits.open(filenamef1) gainhead[sp] = p[0].header['GAIN'] rdnoisehead[sp] = p[0].header['RDNOISE'] flat1 = np.array(p[0].data.copy(),dtype=np.float) overscan = biweight_location(flat1[fylow:fyhigh,fxlow:fxhigh]) flat1 -= overscan flat1 = flat1[ftylow:ftyhigh,ftxlow:ftxhigh] T = Trace(flat1, debug=args.debug) allfibers = T.calculate_trace(interactive=False) y = np.array([F.trace for F in allfibers]) a,b = flat1.shape yarr,xarr = np.indices(flat1.shape) mask = np.zeros((a,b),dtype=bool) if args.debug: t1 = time.time() for i in xrange(a): for j in xrange(b): mask[i,j] = np.any(np.abs(y[:,j]-yarr[i,j])<args.dist_from_trace) if args.debug: t2 = time.time() print("Time taken creating mask: %0.2f s" %(t2-t1)) else: mask = np.zeros(bias1.shape,dtype=bool) a,b = bias1.shape mask[(a/2-50):(a/2+50),(b/2-50):(b/2+50)] = True bigbias = np.zeros(bias1.shape + (nbiases,)) avgflat = np.zeros(bias1.shape + (npairs,)) avgdiff = np.zeros(bias1.shape + (npairs,)) for i in xrange(nbiases): beginning, ending = zro_names[i].split(SPEC[0]) # using second zero frame filenameb = beginning + sp + ending p = fits.open(filenameb) bias = np.array(p[0].data.copy(), dtype=np.float) overscan = biweight_location (bias[ylow:yhigh,xlow:xhigh]) bias -= overscan bias = bias[tylow:tyhigh,txlow:txhigh] if args.bin_bias: bias = (bias[0::2,0::2] + bias[0::2,1::2] + bias[1::2,0::2] + bias[1::2,1::2]) bigbias[:,:,i] = bias rdnoiseimage = biweight_midvariance(bigbias, axis=(2,) ) #biasimage = biweight_location(bigbias, axis=(2,) ) #p[0].data = np.array(rdnoiseimage) #p.writeto('../masterbias/rdnoise_image_%s.fits' %sp,clobber=True) #p[0].data = np.array(biasimage) #p.writeto('../masterbias/bias_image_%s.fits' %sp,clobber=True) # avgbiasimage = biweight_location(bigbias, axis=(2,) ) readnoiseavg[sp] = biweight_location(rdnoiseimage) for i in xrange(npairs): beginning, ending = flt_names[2*i].split(SPEC[0]) # looping through the first of the frames filenamef1 = beginning + sp + ending p = fits.open(filenamef1) # exptime1 = p[0].header['EXPTIME'] # readtime1 = p[0].header['READTIME'] flat1 = np.array(p[0].data.copy(),dtype=np.float) overscan = biweight_location(flat1[fylow:fyhigh,fxlow:fxhigh]) flat1 -= overscan flat1 = flat1[ftylow:ftyhigh,ftxlow:ftxhigh] beginning, ending = flt_names[2*i+1].split(SPEC[0]) # looking at consecutive pairs filenamef2 = beginning + sp + ending p = fits.open(filenamef2) # exptime2 = p[0].header['EXPTIME'] # readtime2 = p[0].header['READTIME'] flat2 = np.array(p[0].data.copy(),dtype=np.float) overscan = biweight_location(flat2[fylow:fyhigh,fxlow:fxhigh]) flat2 -= overscan flat2 = flat2[ftylow:ftyhigh,ftxlow:ftxhigh] x, y = np.where((flat1 > flow) * (flat1 < fhigh) * (mask)) if len(x)>10: mf1[i] = biweight_location(flat1[x,y]) mf2[i] = biweight_location(flat2[x,y]) #mb = biweight_location(avgbiasimage[x,y]) #df = flat1[x,y] - flat2[x,y]*mf1[i]/mf2[i] #sdv = biweight_midvariance(df) #mn = (mf1[i] + mf2[i] - 2*mb) / 2. #vr = (sdv**2 - 2.*readnoiseavg[sp]**2) / 2. #gain[spcount,i] = mn / vr #read[spcount,i] = gain[spcount,i] * readnoiseavg[sp] avgflat[:,:,i] = (flat1+flat2)/2. avgdiff[:,:,i] = (flat1-flat2*mf1[i]/mf2[i]) #print("%s | Gain: %01.3f | RDNOISE: %01.3f | F1: %5d | F2: %5d | Var: %05.1f | E1: %3.2f | E2: %3.2f | R1: %3.2f | R2: %3.2f " %(sp, gain[spcount,i],read[spcount,i], mf1[i], mf2[i], vr, exptime1, exptime2, readtime1, readtime2)) bins = np.logspace(np.log10(flow),np.log10(fhigh),fnum) gn = [] for i in xrange(len(bins)-1): loc = np.where((avgflat.ravel()>bins[i]) * (avgflat.ravel()<bins[i+1]))[0] std = biweight_midvariance(avgdiff.ravel()[loc]) vr = (std**2 - 2.*readnoiseavg[sp]**2) / 2. mn = biweight_location(avgflat.ravel()[loc]) print("%s | Gain: %01.3f | RDNOISE: %01.3f | <ADU>: %0.1f | Pixels: %i" %(sp, mn / vr, mn / vr * readnoiseavg[sp], mn, len(loc))) gn.append(mn/vr) gainunit[sp] = biweight_location(gn) # Only include pixel flats above lthresh readnoiseavg[sp] *= gainunit[sp] spcount = spcount+1 if args.fiber_trace: print('SPECID_AMP, GAIN, RDNOISE, GAIN_HEADER, RDNOISE_HEADER:') for sp in SPEC: print("%s_%s: %0.3f, %0.3f, %0.3f, %0.3f" %(args.specid, sp, gainunit[sp], readnoiseavg[sp], gainhead[sp], rdnoisehead[sp])) else: print('GAIN, RDNOISE:') for sp in SPEC: print ("%s : %0.3f, %0.3f" %(sp, gainunit[sp], readnoiseavg[sp]))
def meanfits(frames, amp): bigarray = np.array([f.fits[amp][f.trimsec[amp][2]:f.trimsec[amp][3], f.trimsec[amp][0]:f.trimsec[amp][1]] for f in frames]) return biweight_location(bigarray, axis=(0,))
def custom(args): lowfib = int(112 / 4. - 1.) midfib = int(112 / 2. - 1.) highfib = int(3.* 112. / 4. - 1.) trace_list = {"LL":[],"LU":[],"RU":[],"RL":[]} amps = {"LL":"LL","LU":"LL","RU":"RU","RL":"RU"} for spec in args.specid: spec_ind_twi = np.where(args.twi_df['Specid'] == spec)[0] spec_ind_sci = np.where(args.sci_df['Specid'] == spec)[0] for ind in spec_ind_twi: amp = args.twi_df['Amp'][ind] AMP = amps[amp] twi = Amplifier(args.twi_df['Files'][ind], args.twi_df['Output'][ind], calpath=args.twi_df['Output'][ind], debug=True, dark_mult=0.0, darkpath=args.darkdir, biaspath=args.biasdir, virusconfig=args.configdir, specname=args.specname[AMP], use_pixelflat=(args.pixelflats<1), init_lims=args.wvl_dict[AMP], check_fibermodel=True, check_wave=True, fsize=args.fsize, fibmodel_nbins=args.fibmodel_bins, sigma=args.fibmodel_sig, power=args.fibmodel_pow, use_trace_ref=args.use_trace_ref) twi.load_fibers() if len(twi.fibers)==0: twi.get_trace() else: if not hasattr(twi.fibers[0],'trace'): twi.get_trace() blue = int(twi.D /4.) green = int(twi.D /2.) red = int(3.*twi.D /4.) trace_list[amp].append(np.array([twi.fibers[lowfib].trace[blue], twi.fibers[lowfib].trace[green], twi.fibers[lowfib].trace[red], twi.fibers[midfib].trace[blue], twi.fibers[midfib].trace[green], twi.fibers[midfib].trace[red], twi.fibers[highfib].trace[blue], twi.fibers[highfib].trace[green], twi.fibers[highfib].trace[red]])) for ind in spec_ind_sci: amp = args.sci_df['Amp'][ind] AMP = amps[amp] print(args.sci_df['Files'][ind]) sci = Amplifier(args.sci_df['Files'][ind], args.sci_df['Output'][ind], calpath=args.twi_dir, skypath=args.sky_dir, debug=False, refit=True, dark_mult=args.dark_mult[AMP], darkpath=args.darkdir, biaspath=args.biasdir, virusconfig=args.configdir, specname=args.specname[AMP], use_pixelflat=(args.pixelflats<1), use_trace_ref=args.use_trace_ref, calculate_shift=False) sci.load_fibers() if len(sci.fibers)==0: sci.get_trace() else: if not hasattr(sci.fibers[0],'trace'): sci.get_trace() blue = int(sci.D /4.) green = int(sci.D /2.) red = int(3.*sci.D /4.) trace_list[amp].append(np.array([sci.fibers[lowfib].trace[blue], sci.fibers[lowfib].trace[green], sci.fibers[lowfib].trace[red], sci.fibers[midfib].trace[blue], sci.fibers[midfib].trace[green], sci.fibers[midfib].trace[red], sci.fibers[highfib].trace[blue], sci.fibers[highfib].trace[green], sci.fibers[highfib].trace[red]])) import matplotlib.pyplot as plt plt.figure(figsize=(12,12)) ax1 = plt.axes([0.1,0.1,0.35,0.35]) ax2 = plt.axes([0.1,0.55,0.35,0.35]) ax3 = plt.axes([0.55,0.1,0.35,0.35]) ax4 = plt.axes([0.55,0.55,0.35,0.35]) amps = ["LL","LU","RU","RL"] ax = [ax1,ax2,ax3,ax4] for i,amp in enumerate(amps): TR = np.array(trace_list[amp]) avg = biweight_location(TR,axis=(0,)) print(TR-avg) ax[i].plot(TR-avg) fn = op.join(args.output, args.scidir_date[0], args.instr, 'trace_%s.png' %args.specid[0]) plt.savefig(fn,dpi=150)
def meanfits(frames, amp): bigarray = np.array([f.fits[amp] for f in frames]) return biweight_location(bigarray, axis=(0, ))