def configmap(infilelist, confitemlist, debug='False'): """general purpose mapper of observing configurations Parameters ---------- infilelist: list List of filenames confitemlist: list List of header keywords which define a configuration Returns ------- obs_i: numpy 1d array observation number (unique object, config) for each file config_i: numpy 1d array config number (unique config) for each file obstab: astropy Table object name and config number for each observation configtab: astropy Table config items for each config """ # create the observation log obsdict = obslog(infilelist) images = len(infilelist) # make table of unique configurations confdatlisti = [] for i in range(images): confdatlist = [] for item in confitemlist: confdatlist.append(obsdict[item][i]) confdatlisti.append(confdatlist) dtypelist = map(type, confdatlist) configtab = Table(np.array(confdatlisti), names=confitemlist, dtype=dtypelist) config_i = np.array([np.where(configtab[i]==unique(configtab)) \ for i in range(images)]).flatten() configtab = unique(configtab) # make table of unique observations obsdatlisti = [] for i in range(images): object = obsdict['OBJECT'][i].replace(' ', '') obsdatlisti.append([object, config_i[i]]) obstab = Table(np.array(obsdatlisti), names=['object', 'config'], dtype=[str, int]) obs_i = np.array([np.where(obstab[i]==unique(obstab)) \ for i in range(images)]).flatten() obstab = unique(obstab) return obs_i, config_i, obstab, configtab
def configmap(infilelist): obs_dict=obslog(infilelist) infiles = len(infilelist) grating_i = [obs_dict['GRATING'][i].strip() for i in range(infiles)] grang_i = np.array(map(float,obs_dict['GR-ANGLE'])) artic_i = np.array(map(float,obs_dict['CAMANG'])) configdat_i = [tuple((grating_i[i],grang_i[i],artic_i[i])) for i in range(infiles)] confdatlist = list(set(configdat_i)) # list tuples of the unique configurations _c confno_i = np.array([confdatlist.index(configdat_i[i]) for i in range(infiles)],dtype=int) return confno_i,confdatlist
def configmap(infilelist,confitemlist,debug='False'): """general purpose mapper of observing configurations Parameters ---------- infilelist: list List of filenames confitemlist: list List of header keywords which define a configuration Returns ------- obs_i: numpy 1d array observation number (unique object, config) for each file config_i: numpy 1d array config number (unique config) for each file obstab: astropy Table object name and config number for each observation configtab: astropy Table config items for each config """ # create the observation log obsdict = obslog(infilelist) images = len(infilelist) # make table of unique configurations confdatlisti = [] for i in range(images): confdatlist = [] for item in confitemlist: confdatlist.append(obsdict[item][i]) confdatlisti.append(confdatlist) dtypelist = map(type,confdatlist) configtab = Table(np.array(confdatlisti),names=confitemlist,dtype=dtypelist) config_i = np.array([np.where(configtab[i]==unique(configtab)) \ for i in range(images)]).flatten() configtab = unique(configtab) # make table of unique observations obsdatlisti = [] for i in range(images): object = obsdict['OBJECT'][i].replace(' ','') obsdatlisti.append([object, config_i[i]]) obstab = Table(np.array(obsdatlisti),names=['object','config'],dtype=[str,int]) obs_i = np.array([np.where(obstab[i]==unique(obstab)) \ for i in range(images)]).flatten() obstab = unique(obstab) return obs_i,config_i,obstab,configtab
def mosred(infile_list, slitmask,propcode=None, dy=0, inter=True, guesstype='rss', guessfile='', rstep=100, automethod='Matchlines', preprocess=False): #set up the files infiles=','.join(['%s' % x for x in infile_list]) obsdate=os.path.basename(infile_list[0])[7:15] #set up some files that will be needed logfile='spec'+obsdate+'.log' dbfile='spec%s.db' % obsdate #create the observation log obs_dict=obslog(infile_list) #check the value of dy #apply the mask to the data sets for i in range(len(infile_list)): specslit(image=infile_list[i], outimage='', outpref='s', exttype='rsmt', slitfile=slitmask, outputslitfile='', regprefix='ds_', sections=3, width=25.0, sigma=2.2, thres=6.0, order=1, padding=5, yoffset=dy, inter=False, clobber=True, logfile=logfile, verbose=True) for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip()=='ARC' and (obs_dict['PROPID'][i].upper().strip()==propcode or propcode is None): lamp=obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage='s'+os.path.basename(infile_list[i]) if lamp == 'NONE': lamp='CuAr' lampfile=iraf.osfn("pysalt$data/linelists/%s.salt" % lamp) specselfid(arcimage, '', 'a', arcimage, 'middlerow', 3, clobber=True, logfile=logfile, verbose=True) specidentify('a'+arcimage, lampfile, dbfile, guesstype=guesstype, guessfile=guessfile, automethod=automethod, function='legendre', order=3, rstep=rstep, rstart='middlerow', mdiff=20, thresh=3, niter=5, smooth=3, inter=True, clobber=True, preprocess=True, logfile=logfile, verbose=True) #specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', # function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, # blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages='' spec_list=[] for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS') and \ (obs_dict['PROPID'][i].upper().strip()==propcode or propcode is None) and \ obs_dict['OBSMODE'][i].count('SPECTROSCOPY'): img = infile_list[i] ##rectify it specselfid('s'+img, '', 'a', arcimage, 'middlerow', 3, clobber=True, logfile=logfile, verbose=True) specrectify('as'+img, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True)
def configmap(infilelist): obs_dict = obslog(infilelist) infiles = len(infilelist) grating_i = [obs_dict['GRATING'][i].strip() for i in range(infiles)] grang_i = np.array(map(float, obs_dict['GR-ANGLE'])) artic_i = np.array(map(float, obs_dict['CAMANG'])) configdat_i = [ tuple((grating_i[i], grang_i[i], artic_i[i])) for i in range(infiles) ] confdatlist = list( set(configdat_i)) # list tuples of the unique configurations _c confno_i = np.array( [confdatlist.index(configdat_i[i]) for i in range(infiles)], dtype=int) return confno_i, confdatlist
def mosred(infile_list, slitmask,propcode=None, dy=0, inter=True, guesstype='rss', guessfile='', rstep=100, automethod='Matchlines'): #set up the files infiles=','.join(['%s' % x for x in infile_list]) obsdate=os.path.basename(infile_list[0])[7:15] #set up some files that will be needed logfile='spec'+obsdate+'.log' dbfile='spec%s.db' % obsdate #create the observation log obs_dict=obslog(infile_list) #apply the mask to the data sets for i in range(len(infile_list)): specslit(image=infile_list[i], outimage='', outpref='s', exttype='rsmt', slitfile='../../P001423N01.xml', outputslitfile='', regprefix='ds_', sections=3, width=25.0, sigma=2.2, thres=6.0, order=1, padding=5, yoffset=dy, inter=False, clobber=True, logfile=logfile, verbose=True) for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip()=='ARC' and obs_dict['PROPID'][i].upper().strip()==propcode: lamp=obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage='s'+os.path.basename(infile_list[i]) if lamp == 'NONE': lamp='CuAr' lampfile=iraf.osfn("../../%s.salt" % lamp) specselfid(arcimage, '', 'a', arcimage, 'middlerow', 3, clobber=True, logfile=logfile, verbose=True) specidentify('a'+arcimage, lampfile, dbfile, guesstype=guesstype, guessfile=guessfile, automethod=automethod, function='legendre', order=3, rstep=rstep, rstart='middlerow', mdiff=20, thresh=3, niter=5, smooth=3, inter=False, clobber=True, logfile=logfile, verbose=True) #specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', # function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, # blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages='' spec_list=[] for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS') and \ obs_dict['PROPID'][i].upper().strip()==propcode and \ obs_dict['OBSMODE'][i].count('SPECTROSCOPY'): img = infile_list[i] ##rectify it specselfid('s'+img, '', 'a', arcimage, 'middlerow', 3, clobber=True, logfile=logfile, verbose=True) specrectify('as'+img, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True)
def specpolfilter(filterlist, infilelist): obss = len(infilelist) obsdict = obslog(infilelist) for b in range(obss): hdul = pyfits.open(infilelist[b]) dwav = float(hdul['SCI'].header['CDELT1']) wav0 = float(hdul['SCI'].header['CRVAL1']) wavs = int(hdul['SCI'].header['NAXIS1']) ctypelist = (hdul['SCI'].header['CTYPE3']).split(',') stokes_sw = hdul['SCI'].data[:, 0, :] var_sw = hdul['VAR'].data[:, 0, :] pstokess = len(ctypelist) - 1 ok_w = (hdul['BPM'].data[:, 0, :] == 0).all(axis=0) wav_w = wav0 + dwav * np.arange(wavs) print "\n" + infilelist[b] print("Filter " + pstokess * "%5s Err ") % tuple( ctypelist[1:]) for filter in filterlist: if filter in ("U", "B", "V"): filterfile = iraf.osfn("pysalt$data/scam/filters/Johnson_" + filter + ".txt") elif filter in ("R", "I"): filterfile = iraf.osfn("pysalt$data/scam/filters/Cousins_" + filter + ".txt") # else: # (filter file in cwd) wav_l, feff_l = np.loadtxt(filterfile, dtype=float, unpack=True) feff_l[feff_l < .0001] = 0. feff_w = interp1d(wav_l, feff_l, kind='linear', bounds_error=False)(wav_w) okf_w = (ok_w & (feff_w > .0003)) feff_w[~okf_w] = 0. if feff_w[okf_w].sum() == 0: continue stokesfil_s = (feff_w[okf_w] * stokes_sw[:, okf_w]).sum( axis=1) / feff_w[okf_w].sum() varfil_s = (feff_w[okf_w]**2 * var_sw[:, okf_w]).sum( axis=1) / (feff_w[okf_w].sum()**2) nstokesfil_s = 100. * stokesfil_s / stokesfil_s[0] nerrfil_s = 100. * np.sqrt( varfil_s[:pstokess + 1]) / stokesfil_s[0] print ("%4s "+pstokess*"%9.4f %7.4f ") % \ (tuple(filter)+tuple(np.vstack((nstokesfil_s[1:],nerrfil_s[1:])).T.ravel())) return ()
def specred(infile_list, target, propcode, calfile=None, inter=True, automethod='Matchlines'): #set up the files infiles=','.join(['%s' % x for x in infile_list]) obsdate=os.path.basename(infile_list[0])[7:15] #set up some files that will be needed logfile='spec'+obsdate+'.log' dbfile='spec%s.db' % obsdate #create the observation log obs_dict=obslog(infile_list) for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip()=='ARC' and obs_dict['PROPID'][i].upper().strip()==propcode: lamp=obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage=os.path.basename(infile_list[i]) if lamp == 'NONE': lamp='CuAr' lampfile=iraf.osfn("pysalt$data/linelists/%s.salt" % lamp) specidentify(arcimage, lampfile, dbfile, guesstype='rss', guessfile='', automethod=automethod, function='legendre', order=3, rstep=100, rstart='middlerow', mdiff=20, thresh=3, niter=5, smooth=3, inter=False, clobber=True, logfile=logfile, verbose=True) specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages='' spec_list=[] for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS') and obs_dict['PROPID'][i].upper().strip()==propcode: img = infile_list[i] ##rectify it specrectify(img, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) #extract the spectra spec_list.append(extract_spectra('x'+img, yc=1030, calfile=calfile, findobject=True, smooth=False, maskzeros=True, clobber=True)) #combine the results w,f,e = speccombine(spec_list, obsdate) outfile = "%s_%s.spec" % (target, obsdate) write_spectra(outfile, w,f,e)
def specpolfilter(filterlist, infilelist): obss = len(infilelist) obsdict=obslog(infilelist) for b in range(obss): hdul = pyfits.open(infilelist[b]) dwav = float(hdul['SCI'].header['CDELT1']) wav0 = float(hdul['SCI'].header['CRVAL1']) wavs = int(hdul['SCI'].header['NAXIS1']) ctypelist = (hdul['SCI'].header['CTYPE3']).split(',') stokes_sw = hdul['SCI'].data[:,0,:] var_sw = hdul['VAR'].data[:,0,:] pstokess = len(ctypelist)-1 ok_w = (hdul['BPM'].data[:,0,:] == 0).all(axis=0) wav_w = wav0 + dwav*np.arange(wavs) print "\n"+infilelist[b] print ("Filter "+pstokess*"%5s Err ") % tuple(ctypelist[1:]) for filter in filterlist: if filter in ("U","B","V"): filterfile = iraf.osfn("pysalt$data/scam/filters/Johnson_"+filter+".txt") elif filter in ("R","I"): filterfile = iraf.osfn("pysalt$data/scam/filters/Cousins_"+filter+".txt") # else: # (filter file in cwd) wav_l,feff_l = np.loadtxt(filterfile,dtype=float,unpack=True) feff_l[feff_l < .0001] = 0. feff_w = interp1d(wav_l,feff_l,kind='linear',bounds_error=False)(wav_w) okf_w = (ok_w & (feff_w > .0003)) feff_w[~okf_w] = 0. if feff_w[okf_w].sum() == 0: continue stokesfil_s = (feff_w[okf_w]*stokes_sw[:,okf_w]).sum(axis=1)/feff_w[okf_w].sum() varfil_s = (feff_w[okf_w]**2*var_sw[:,okf_w]).sum(axis=1)/(feff_w[okf_w].sum()**2) nstokesfil_s = 100.*stokesfil_s/stokesfil_s[0] nerrfil_s = 100.*np.sqrt(varfil_s[:pstokess+1])/stokesfil_s[0] print ("%4s "+pstokess*"%9.4f %7.4f ") % \ (tuple(filter)+tuple(np.vstack((nstokesfil_s[1:],nerrfil_s[1:])).T.ravel())) return()
def specred(infile_list, propcode=None, inter=True, automethod='Matchlines'): #set up the files infiles=','.join(['%s' % x for x in infile_list]) obsdate=os.path.basename(infile_list[0])[7:15] #set up some files that will be needed logfile='spec'+obsdate+'.log' dbfile='spec%s.db' % obsdate #create the observation log obs_dict=obslog(infile_list) for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip()=='ARC' and obs_dict['PROPID'][i].upper().strip()==propcode: lamp=obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage=os.path.basename(infile_list[i]) if lamp == 'NONE': lamp='CuAr' lampfile=iraf.osfn("pysalt$data/linelists/%s.salt" % lamp) #lampfile='/Users/crawford/research/kepler/Xe.salt' specidentify(arcimage, lampfile, dbfile, guesstype='rss', guessfile='', automethod=automethod, function='legendre', order=3, rstep=100, rstart='middlerow', mdiff=20, thresh=3, niter=5, smooth=3, inter=inter, clobber=True, logfile=logfile, verbose=True) #specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', # function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, # blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages='' spec_list=[] for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS') and obs_dict['PROPID'][i].upper().strip()==propcode: img = infile_list[i] ##rectify it specrectify(img, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, nearest=True, clobber=True, logfile=logfile, verbose=True)
def specpolrawstokes(infile_list, logfile='salt.log'): #set up some files that will be needed obsdate = os.path.basename(infile_list[0]).split('.')[0][-12:-4] logfile = 'specpol' + obsdate + '.log' patternfile = open(datadir + 'wppaterns.txt', 'r') with logging(logfile, debug) as log: #create the observation log obs_dict = obslog(infile_list) images = len(infile_list) hsta_i = np.array([int(float(s) / 11.25) for s in obs_dict['HWP-ANG']]) qsta_i = np.array([int(float(s) / 11.25) for s in obs_dict['QWP-STA']]) img_i = np.array( [int(os.path.basename(s).split('.')[0][-4:]) for s in infile_list]) wpstate_i = [['unknown', 'out', 'qbl', 'hw', 'hqw'][int(s[1])] for s in obs_dict['WP-STATE']] # wppat_i = obs_dict['WPPATERN'] wppat_i = ['UNKNOWN' for i in range(images)] # until WPPATERN is put in obslog object_i = obs_dict['OBJECT'] config_i = np.zeros(images, dtype='int') obs_i = -np.ones(images, dtype='int') # make table of observations configs = 0 obss = 0 for i in range(images): if (wpstate_i[i] == 'unknown'): log.message( 'Warning: Image %s WP-STATE UNKNOWN, assume it is 3 (HW)' % img_i[i], with_header=False) wpstate_i[i] = 'hw' elif (wpstate_i[i] == 'out'): log.message('Image %i not in a WP pattern, will skip' % img_i[i], with_header=False) continue if object_i[i].count('NONE'): object_i[i] = obs_dict['LAMPID'][i] object_i[i] = object_i[i].replace(' ', '') cbin, rbin = np.array(obs_dict["CCDSUM"][i].split(" ")).astype(int) grating = obs_dict['GRATING'][i].strip() grang = float(obs_dict['GR-ANGLE'][i]) artic = float(obs_dict['CAMANG'][i]) confdat_d = [rbin, cbin, grating, grang, artic, wppat_i[i]] obsdat_d = [ object_i[i], rbin, cbin, grating, grang, artic, wppat_i[i] ] if configs == 0: confdat_cd = [confdat_d] obsdat_od = [obsdat_d] configs = len(confdat_cd) config = 0 while config < configs: if confdat_d == confdat_cd[config]: break config += 1 if config == configs: confdat_cd.append(confdat_d) config_i[i] = config obss = len(obsdat_od) obs = 0 while obs < obss: if obsdat_d == obsdat_od[obs]: break obs += 1 if obs == obss: obsdat_od.append(obsdat_d) obs_i[i] = obs patternlist = patternfile.readlines() log.message( 'Raw Stokes File OBS CCDSUM GRATING GR-ANGLE CAMANG WPPATERN', with_header=False) # Compute E-O raw stokes for obs in range(obss): idx_j = np.where(obs_i == obs) i0 = idx_j[0][0] name_n = [] if wppat_i[i0].count('UNKNOWN'): if (hsta_i[idx_j] % 2).max() == 0: wppat = "Linear" else: wppat = "Linear-Hi" for i in idx_j[0]: wppat_i[i] = wppat if not ( ((wpstate_i[i0] == 'hw') & (wppat_i[i0] in ('Linear', 'Linear-Hi')) | (wpstate_i[i0] == 'hqw') & (wppat_i[i0] in ('Circular', 'Circular-Hi', 'All-Stokes')))): print "Observation",obs,": wpstate ",wpstate_i[i0], \ " and wppattern ",wppat_i[i0], "not consistent" continue for p in patternlist: if (p.split()[0] == wppat_i[i0]) & (p.split()[2] == 'hwp'): wpat_p = np.array(p.split()[3:]).astype(int) if (p.split()[0] == wppat_i[i0]) & (p.split()[2] == 'qwp'): wpat_dp = np.vstack(wpat_p, np.array(p.split()[3:])) stokes = 0 j = -1 while j < (len(idx_j[0]) - 2): j += 1 i = idx_j[0][j] if (wpstate_i[i] == 'hw'): if (np.where(wpat_p[0::2] == hsta_i[i])[0].size > 0): idxp = np.where(wpat_p == hsta_i[i])[0][0] if hsta_i[i + 1] != wpat_p[idxp + 1]: continue else: continue if (wpstate_i[i] == 'hqw'): if (np.where(wpat_dp[0::2] == (hsta_i[i], qsta_i[i]))[0].size > 0): idxp = np.where(wpat_dp == (hsta_i[i], qsta_i[i]))[0][0] if (hsta_i[i + 1], qsta_i[i + 1]) != wpat_dp[None, idxp + 1]: continue else: continue if stokes == 0: wavs = pyfits.getheader(infile_list[i], 'SCI', 1)['NAXIS1'] sci_fow = np.zeros((2, 2, wavs)) var_fow = np.zeros_like(sci_fow) \ bpm_fow = np.zeros_like(sci_fow) for f in (0, 1): hdulist = pyfits.open(infile_list[i + f]) sci_fow[f] = hdulist['sci'].data.reshape((2, -1)) var_fow[f] = hdulist['var'].data.reshape((2, -1)) bpm_fow[f] = hdulist['bpm'].data.reshape((2, -1)) # compute intensity, E-O stokes spectrum, VAR, BPM. # fits out: unnormalized (int,stokes),(length 1) spatial,wavelength # wavelength marked bad if it is bad in either filter or order bpm_w = (bpm_fow.sum(axis=0).sum(axis=0) > 0).astype(int) wok = (bpm_w == 0) stokes_sw = np.zeros((2, wavs), dtype='float32') var_sw = np.zeros_like(stokes_sw) stokes_sw[0, wok] = 0.5 * sci_fow[:, :, wok].reshape( (2, 2, -1)).sum(axis=0).sum(axis=0) var_sw[0, wok] = 0.25 * var_fow[:, :, wok].reshape( (2, 2, -1)).sum(axis=0).sum(axis=0) stokes_sw[1,wok] = 0.5*((sci_fow[0,1,wok]-sci_fow[1,1,wok])/(sci_fow[0,1,wok]+sci_fow[1,1,wok]) \ - (sci_fow[0,0,wok]-sci_fow[1,0,wok])/(sci_fow[0,0,wok]+sci_fow[1,0,wok])) var_sw[1,wok] = 0.5*((var_fow[0,1,wok]+var_fow[1,1,wok])/(sci_fow[0,1,wok]+sci_fow[1,1,wok])**2 \ + (var_fow[0,0,wok]+var_fow[1,0,wok])/(sci_fow[0,0,wok]+sci_fow[1,0,wok])**2) stokes_sw[1] *= stokes_sw[0] var_sw[1] *= stokes_sw[0]**2 bpm_sw = np.array([bpm_w, bpm_w], dtype='uint8').reshape( (2, wavs)) name = object_i[i] + '_c' + str(config_i[i]) + '_h' + str( hsta_i[i]) + str(hsta_i[i + 1]) if (wpstate_i[i] == 'hqw'): name += 'q' + ['m', 'p'][qsta_i[i] == 4] + [ 'm', 'p' ][qsta_i[i + 1] == 4] count = " ".join(name_n).count(name) name += ('_%02i' % (count + 1)) log.message('%20s %1i %1i %1i %8s %8.2f %8.2f %12s' % \ (name,obs,rbin,cbin,grating,grang,artic,wppat_i[i]), with_header=False) hduout = pyfits.PrimaryHDU(header=hdulist[0].header) hduout = pyfits.HDUList(hduout) hduout[0].header.update('WPPATERN', wppat_i[i]) header = hdulist['SCI'].header.copy() header.update('VAREXT', 2) header.update('BPMEXT', 3) header.update('CTYPE3', 'I,S') hduout.append( pyfits.ImageHDU(data=stokes_sw.reshape((2, 1, wavs)), header=header, name='SCI')) header.update('SCIEXT', 1, 'Extension for Science Frame', before='VAREXT') hduout.append( pyfits.ImageHDU(data=var_sw.reshape((2, 1, wavs)), header=header, name='VAR')) hduout.append( pyfits.ImageHDU(data=bpm_sw.reshape((2, 1, wavs)), header=header, name='BPM')) hduout.writeto(name + '.fits', clobber=True, output_verify='warn') name_n.append(name) i += 1 stokes += 1 return
def science_red(rawdir, prodir, imreduce=True, specreduce=True, bpmfile=None, calfile=None, lampfile='Ar', automethod='Matchlines', skysection=[800,1000], cleanup=True): print rawdir print prodir #get the name of the files infile_list=glob.glob(rawdir+'P*.fits') infiles=','.join(['%s' % x for x in infile_list]) #get the current date for the files obsdate=os.path.basename(infile_list[0])[1:9] print obsdate #set up some files that will be needed logfile='spec'+obsdate+'.log' flatimage='FLAT%s.fits' % (obsdate) dbfile='spec%s.db' % obsdate #create the observation log obs_dict=obslog(infile_list) if imreduce: #prepare the data saltprepare(infiles, '', 'p', createvar=False, badpixelimage='', clobber=True, logfile=logfile, verbose=True) #bias subtract the data saltbias('pP*fits', '', 'b', subover=True, trim=True, subbias=False, masterbias='', median=False, function='polynomial', order=5, rej_lo=3.0, rej_hi=5.0, niter=10, plotover=False, turbo=False, clobber=True, logfile=logfile, verbose=True) add_variance('bpP*fits', bpmfile) #gain correct the data saltgain('bpP*fits', '', 'g', usedb=False, mult=True, clobber=True, logfile=logfile, verbose=True) #cross talk correct the data saltxtalk('gbpP*fits', '', 'x', xtalkfile = "", usedb=False, clobber=True, logfile=logfile, verbose=True) #flat field correct the data flat_imgs='' for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('FLAT'): if flat_imgs: flat_imgs += ',' flat_imgs += 'xgbp'+os.path.basename(infile_list[i]) if 0: #len(flat_imgs)!=0: saltcombine(flat_imgs,flatimage, method='median', reject=None, mask=False, \ weight=False, blank=0, scale=None, statsec='[200:300, 600:800]', lthresh=3, \ hthresh=3, clobber=True, logfile=logfile, verbose=True) saltillum(flatimage, flatimage, '', mbox=11, clobber=True, logfile=logfile, verbose=True) saltflat('xgbpP*fits', '', 'f', flatimage, minflat=0.8, allext=False, clobber=True, logfile=logfile, verbose=True) else: flats=None imfiles=glob.glob('xgbpP*fits') for f in imfiles: shutil.copy(f, 'f'+f) #cosmic ray clean the data #only clean the object data for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS'): img='fxgbp'+os.path.basename(infile_list[i]) saltcrclean(img, img, '', crtype='edge', thresh=5, mbox=11, bthresh=5.0, flux_ratio=0.2, bbox=25, gain=1.0, rdnoise=5.0, fthresh=5.0, bfactor=2, gbox=3, maxiter=5, multithread=True, clobber=True, logfile=logfile, verbose=True) #mosaic the data geomfile=iraf.osfn("pysalt$data/rss/RSSgeom.dat") saltmosaic('fxgbpP*fits', '', 'm', geomfile, interp='linear', cleanup=True, geotran=True, clobber=True, logfile=logfile, verbose=True) #clean up the images if cleanup: for f in glob.glob('p*fits'): os.remove(f) for f in glob.glob('bp*fits'): os.remove(f) for f in glob.glob('gbp*fits'): os.remove(f) for f in glob.glob('xgbp*fits'): os.remove(f) for f in glob.glob('fxgbp*fits'): os.remove(f) #set up the name of the images if specreduce: for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip()=='ARC': lamp=obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage='mfxgbp'+os.path.basename(infile_list[i]) #lampfile=iraf.osfn("pysalt$data/linelists/%s.salt" % lamp) specidentify(arcimage, lampfile, dbfile, guesstype='rss', guessfile='', automethod=automethod, function='legendre', order=3, rstep=100, rstart='middlerow', mdiff=50, thresh=2, niter=5, smooth=3, inter=True, clobber=True, logfile=logfile, verbose=True) specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages='' for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS'): if objimages: objimages += ',' objimages+='mfxgbp'+os.path.basename(infile_list[i]) if specreduce: #run specidentify on the arc files specrectify(objimages, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) return
def specpolwavmap(infilelist, linelistlib="", inter=True, automethod='Matchlines', logfile='salt.log'): obsdate = os.path.basename(infilelist[0])[7:15] with logging(logfile, debug) as log: # create the observation log obs_dict = obslog(infilelist) log.message('Pysalt Version: ' + pysalt.verno, with_header=False) if len(linelistlib) == 0: linelistlib = datadir + "linelistlib.txt" with open(linelistlib) as fd: linelistdict = dict(line.strip().split(None, 1) for line in fd) # eliminate inapplicable images for i in range(len(infilelist)): if int(obs_dict['BS-STATE'][i][1]) != 2: del infilelist[i] obs_dict = obslog(infilelist) # Map out which arc goes with which image. Use arc in closest wavcal block of the config. # wavcal block: neither spectrograph config nor track changes, and no gap in data files infiles = len(infilelist) newtrk = 5. # new track when rotator changes by more (deg) trkrho_i = np.array(map(float, obs_dict['TRKRHO'])) trkno_i = np.zeros((infiles), dtype=int) trkno_i[1:] = ((np.abs(trkrho_i[1:] - trkrho_i[:-1])) > newtrk).cumsum() confno_i, confdatlist = configmap(infilelist) configs = len(confdatlist) imageno_i = np.array([int(os.path.basename(infilelist[i]).split('.')[0][-4:]) \ for i in range(infiles)]) filegrp_i = np.zeros((infiles), dtype=int) filegrp_i[1:] = ((imageno_i[1:] - imageno_i[:-1]) > 1).cumsum() isarc_i = np.array([(obs_dict['OBJECT'][i].upper().strip() == 'ARC') for i in range(infiles)]) wavblk_i = np.zeros((infiles), dtype=int) wavblk_i[1:] = ((filegrp_i[1:] != filegrp_i[:-1]) \ | (trkno_i[1:] != trkno_i[:-1]) \ | (confno_i[1:] != confno_i[:-1])).cumsum() wavblks = wavblk_i.max() + 1 arcs_c = (isarc_i[:, None] & (confno_i[:, None] == range(configs))).sum(axis=0) np.savetxt("wavblktbl.txt",np.vstack((trkrho_i,imageno_i,filegrp_i,trkno_i, \ confno_i,wavblk_i,isarc_i)).T,fmt="%7.2f "+6*"%3i ",header=" rho img grp trk conf wblk arc") for c in range(configs): # worst: no arc for config, remove images if arcs_c[c] == 0: lostimages = imageno_i[confno_i == c] log.message('No Arc for this configuration: ' \ +("Grating %s Grang %6.2f Artic %6.2f" % confdatlist[c]) \ +("\n Images: "+lostimages.shape[0]*"%i " % tuple(lostimages)), with_header=False) wavblk_i[confno_i == c] = -1 if arcs_c.sum() == 0: log.message("Cannot calibrate any images", with_header=False) exit() iarc_i = -np.zeros((infiles), dtype=int) for w in range(wavblks): blkimages = imageno_i[wavblk_i == w] if blkimages.shape[0] == 0: continue iarc_I = np.where((wavblk_i == w) & (isarc_i))[0] if iarc_I.shape[0] > 0: iarc = iarc_I[0] # best: arc is in wavblk, take first else: conf = confno_i[wavblk_i == w][ 0] # fallback: take closest arc of this config iarc_I = np.where((confno_i == conf) & (isarc_i))[0] blkimagepos = blkimages.mean() iarc = iarc_I[np.argmin(imageno_i[iarc_I] - blkimagepos)] iarc_i[wavblk_i == w] = iarc log.message(("For images: "+blkimages.shape[0]*"%i " % tuple(blkimages)) \ +("\n Use Arc %5i" % imageno_i[iarc]), with_header=False) iarc_a = np.unique(iarc_i[iarc_i != -1]) arcs = iarc_a.shape[0] lam_m = np.loadtxt(datadir + "wollaston.txt", dtype=float, usecols=(0, )) rpix_om = np.loadtxt(datadir + "wollaston.txt", dtype=float, unpack=True, usecols=(1, 2)) for a in range(arcs): iarc = iarc_a[a] conf = confno_i[iarc] # use arc to make first-guess wavecal from model, locate beamsplitter split point cbin, rbin = np.array( obs_dict["CCDSUM"][iarc].split(" ")).astype(int) grating, grang, artic = confdatlist[confno_i[iarc]] hduarc = pyfits.open(infilelist[iarc]) arc_rc = hduarc['SCI'].data rows, cols = arc_rc.shape lam_c = rssmodelwave(grating, grang, artic, cbin, cols) arc_r = arc_rc.sum(axis=1) expectrow_o = ((2052 + interp1d(lam_m,rpix_om,kind='cubic') \ (lam_c[cols/2-cols/16:cols/2+cols/16])).mean(axis=1)/rbin).astype(int) foundrow_o = np.zeros((2), dtype=int) for o in (0, 1): foundrow_o[o] = expectrow_o[o]-100/rbin \ + np.argmax(arc_r[expectrow_o[o]-100/rbin:expectrow_o[o]+100/rbin]) arcsignal = arc_r[foundrow_o[o]] topedge = foundrow_o[o] + np.argmax( arc_r[foundrow_o[o]:] < 0.75 * arcsignal) botedge = foundrow_o[o] - np.argmax( arc_r[foundrow_o[o]::-1] < 0.75 * arcsignal) foundrow_o[o] = (botedge + topedge) / 2. splitrow = foundrow_o.mean() offset = int(splitrow - rows / 2) # split arc into o/e images padbins = (np.indices((rows, cols))[0] < offset) | (np.indices( (rows, cols))[0] > rows + offset) arc_rc = np.roll(arc_rc, -offset, axis=0) arc_rc[padbins] = 0. arc_orc = arc_rc.reshape((2, rows / 2, cols)) # for O,E arc straighten spectrum, identify for each, form (unstraightened) wavelength map lamp = obs_dict['LAMPID'][iarc].strip().replace(' ', '') if lamp == 'NONE': lamp = 'CuAr' hduarc[0].header.update('MASKTYP', 'LONGSLIT') hduarc[0].header.update('MASKID', 'PL0100N001') del hduarc['VAR'] del hduarc['BPM'] # lampfile=iraf.osfn("pysalt$data/linelists/%s.wav" % lamp) zsalt version: not good lampfile = iraf.osfn("pysalt$data/linelists/" + linelistdict[lamp]) rpix_oc = interp1d(lam_m, rpix_om, kind='cubic')(lam_c) log.message('\nARC: image '+str(imageno_i[iarc])+' GRATING '+grating\ +' GRANG '+("%8.3f" % grang)+' ARTIC '+("%8.3f" % artic)+' LAMP '+lamp, with_header=False) log.message(' Split Row: ' + ("%4i " % splitrow), with_header=False) wavmap_orc = np.zeros_like(arc_orc) for o in (0, 1): foundrow_o[o] += -offset - o * rows / 2 arc_Rc = np.zeros((rows / 2, cols), dtype='float32') for c in range(cols): shift(arc_orc[o,:,c], \ -(rpix_oc[o,c]-rpix_oc[o,cols/2])/rbin,arc_Rc[:,c]) hduarc['SCI'].data = arc_Rc arcimage = "arc_" + str( imageno_i[iarc]) + "_" + str(o) + ".fits" dbfilename = "arcdb_" + str( imageno_i[iarc]) + "_" + str(o) + ".txt" order = 3 if os.path.isfile(dbfilename): # guessfile = "guess_"+str(o)+".txt" # os.rename(dbfilename,guessfile) guesstype = 'file' else: hduarc.writeto(arcimage, clobber=True) guesstype = 'rss' guessfile = '' Rstart = foundrow_o[o] specidentify(arcimage, lampfile, dbfilename, guesstype=guesstype, guessfile=guessfile, automethod=automethod, function='legendre', order=order, rstep=20, rstart=Rstart, mdiff=20, thresh=3, niter=5, smooth=3, inter=inter, clobber=True, logfile=logfile, verbose=True) wavlegR_y = np.loadtxt(dbfilename, dtype=float, usecols=(0, ), ndmin=1) wavlegcof_ly = np.loadtxt(dbfilename, unpack=True, dtype=float, usecols=range(1, order + 2), ndmin=2) if wavlegR_y.min() < 0: wavlegcof_ly = np.delete(wavlegcof_ly, np.where(wavlegR_y < 0)[0], axis=1) wavlegR_y = np.delete(wavlegR_y, np.where(wavlegR_y < 0)[0], axis=0) wavmap_yc = np.polynomial.legendre.legval( np.arange(cols), wavlegcof_ly) mediancof_l = np.median(wavlegcof_ly, axis=1) rms_l = np.sqrt( np.median((wavlegcof_ly - mediancof_l[:, None])**2, axis=1)) sigma_ly = (wavlegcof_ly - mediancof_l[:, None]) / rms_l[:, None] usey = (sigma_ly[0] < 3) & (sigma_ly[1] < 3) wavmap_Rc = np.zeros((rows / 2, cols), dtype='float32') R_y = wavlegR_y[usey] if usey.shape[0] < 5: # for future: if few lines in db, use model shifted to agree, for now just use first wavmap_orc[o] = wavmap_yc[0] else: if usey.shape[0] > 9: aa = np.vstack( (R_y**3, R_y**2, R_y, np.ones_like(R_y))).T else: aa = np.vstack((R_y**2, R_y, np.ones_like(R_y))).T for c in range(cols): polycofs = la.lstsq(aa, wavmap_yc[usey, c])[0] wavmap_orc[o,:,c] \ = np.polyval(polycofs,range(rows/2)+(rpix_oc[o,c]-rpix_oc[o,cols/2])/rbin) # os.remove(arcimage) # for images using this arc,save split data along third fits axis, # add wavmap extension, save as 'w' file hduwav = pyfits.ImageHDU(data=wavmap_orc, header=hduarc['SCI'].header, name='WAV') for i in np.where(iarc_i == iarc_a[a])[0]: hdu = pyfits.open(infilelist[i]) image_rc = np.roll(hdu['SCI'].data, -offset, axis=0) image_rc[padbins] = 0. hdu['SCI'].data = image_rc.reshape((2, rows / 2, cols)) var_rc = np.roll(hdu['VAR'].data, -offset, axis=0) var_rc[padbins] = 0. hdu['VAR'].data = var_rc.reshape((2, rows / 2, cols)) bpm_rc = np.roll(hdu['BPM'].data, -offset, axis=0) bpm_rc[padbins] = 1 hdu['BPM'].data = bpm_rc.reshape((2, rows / 2, cols)) hdu.append(hduwav) for f in ('SCI', 'VAR', 'BPM', 'WAV'): hdu[f].header.update('CTYPE3', 'O,E') hdu.writeto('w' + infilelist[i], clobber='True') log.message('Output file ' + 'w' + infilelist[i], with_header=False) return
def specred(rawdir, prodir, imreduce=True, specreduce=True, calfile=None, lamp='Ar', automethod='Matchlines', skysection=[800,1000], cleanup=True): print rawdir print prodir #get the name of the files infile_list=glob.glob(rawdir+'*.fits') infiles=','.join(['%s' % x for x in infile_list]) #get the current date for the files obsdate=os.path.basename(infile_list[0])[1:9] print obsdate #set up some files that will be needed logfile='spec'+obsdate+'.log' flatimage='FLAT%s.fits' % (obsdate) dbfile='spec%s.db' % obsdate #create the observation log obs_dict=obslog(infile_list) if imreduce: #prepare the data saltprepare(infiles, '', 'p', createvar=False, badpixelimage='', clobber=True, logfile=logfile, verbose=True) #bias subtract the data saltbias('pP*fits', '', 'b', subover=True, trim=True, subbias=False, masterbias='', median=False, function='polynomial', order=5, rej_lo=3.0, rej_hi=5.0, niter=10, plotover=False, turbo=False, clobber=True, logfile=logfile, verbose=True) #gain correct the data saltgain('bpP*fits', '', 'g', usedb=False, mult=True, clobber=True, logfile=logfile, verbose=True) #cross talk correct the data saltxtalk('gbpP*fits', '', 'x', xtalkfile = "", usedb=False, clobber=True, logfile=logfile, verbose=True) #cosmic ray clean the data #only clean the object data for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS'): img='xgbp'+os.path.basename(infile_list[i]) saltcrclean(img, img, '', crtype='edge', thresh=5, mbox=11, bthresh=5.0, flux_ratio=0.2, bbox=25, gain=1.0, rdnoise=5.0, fthresh=5.0, bfactor=2, gbox=3, maxiter=5, multithread=True, clobber=True, logfile=logfile, verbose=True) #flat field correct the data flat_imgs='' for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('FLAT'): if flat_imgs: flat_imgs += ',' flat_imgs += 'xgbp'+os.path.basename(infile_list[i]) if len(flat_imgs)!=0: saltcombine(flat_imgs,flatimage, method='median', reject=None, mask=False, \ weight=True, blank=0, scale='average', statsec='[200:300, 600:800]', lthresh=3, \ hthresh=3, clobber=True, logfile=logfile, verbose=True) saltillum(flatimage, flatimage, '', mbox=11, clobber=True, logfile=logfile, verbose=True) saltflat('xgbpP*fits', '', 'f', flatimage, minflat=500, clobber=True, logfile=logfile, verbose=True) else: flats=None imfiles=glob.glob('cxgbpP*fits') for f in imfiles: shutil.copy(f, 'f'+f) #mosaic the data geomfile=iraf.osfn("pysalt$data/rss/RSSgeom.dat") saltmosaic('fxgbpP*fits', '', 'm', geomfile, interp='linear', cleanup=True, geotran=True, clobber=True, logfile=logfile, verbose=True) #clean up the images if cleanup: for f in glob.glob('p*fits'): os.remove(f) for f in glob.glob('bp*fits'): os.remove(f) for f in glob.glob('gbp*fits'): os.remove(f) for f in glob.glob('xgbp*fits'): os.remove(f) for f in glob.glob('fxgbp*fits'): os.remove(f) #set up the name of the images if specreduce: for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip()=='ARC': lamp=obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage='mfxgbp'+os.path.basename(infile_list[i]) lampfile=iraf.osfn("pysalt$data/linelists/%s.txt" % lamp) specidentify(arcimage, lampfile, dbfile, guesstype='rss', guessfile='', automethod=automethod, function='legendre', order=5, rstep=100, rstart='middlerow', mdiff=10, thresh=3, niter=5, inter=True, clobber=True, logfile=logfile, verbose=True) specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages='' for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS'): if objimages: objimages += ',' objimages+='mfxgbp'+os.path.basename(infile_list[i]) if specreduce: #run specidentify on the arc files specrectify(objimages, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) #create the spectra text files for all of our objects spec_list=[] for img in objimages.split(','): spec_list.extend(createspectra('x'+img, obsdate, smooth=False, skysection=skysection, clobber=True)) print spec_list #determine the spectrophotometric standard extfile=iraf.osfn('pysalt$data/site/suth_extinct.dat') for spec, am, et, pc in spec_list: if pc=='CAL_SPST': stdstar=spec.split('.')[0] print stdstar, am, et stdfile=iraf.osfn('pysalt$data/standards/spectroscopic/m%s.dat' % stdstar.lower().replace('-', '_')) print stdfile ofile=spec.replace('txt', 'sens') calfile=ofile #assumes only one observations of a SP standard specsens(spec, ofile, stdfile, extfile, airmass=am, exptime=et, stdzp=3.68e-20, function='polynomial', order=3, thresh=3, niter=5, clobber=True, logfile='salt.log',verbose=True) for spec, am, et, pc in spec_list: if pc!='CAL_SPST': ofile=spec.replace('txt', 'spec') speccal(spec, ofile, calfile, extfile, airmass=am, exptime=et, clobber=True, logfile='salt.log',verbose=True) #clean up the spectra for bad pixels cleanspectra(ofile)
def saltclean(images, outpath, obslogfile=None, gaindb=None,xtalkfile=None, geomfile=None,subover=True,trim=True,masbias=None, subbias=False, median=False, function='polynomial', order=5,rej_lo=3, rej_hi=3,niter=5,interp='linear', clobber=False, logfile='salt.log', verbose=True): """SALTCLEAN will provide basic CCD reductions for a set of data. It will sort the data, and first process the biases, flats, and then the science frames. It will record basic quality control information about each of the steps. """ plotover=False #start logging with logging(logfile,debug) as log: # Check the input images infiles = saltio.argunpack ('Input',images) # create list of output files outpath=saltio.abspath(outpath) #does the gain database file exist if gaindb: dblist= saltio.readgaindb(gaindb) else: dblist=[] # does crosstalk coefficient data exist if xtalkfile: xtalkfile = xtalkfile.strip() xdict = saltio.readxtalkcoeff(xtalkfile) else: xdict=None #does the mosaic file exist--raise error if no saltio.fileexists(geomfile) # Delete the obslog file if it already exists if os.path.isfile(obslogfile) and clobber: saltio.delete(obslogfile) #read in the obsveration log or create it if os.path.isfile(obslogfile): msg='The observing log already exists. Please either delete it or run saltclean with clobber=yes' raise SaltError(msg) else: headerDict=obslog(infiles, log) obsstruct=createobslogfits(headerDict) saltio.writefits(obsstruct, obslogfile) #create the list of bias frames and process them filename=obsstruct.data.field('FILENAME') detmode=obsstruct.data.field('DETMODE') ccdtype=obsstruct.data.field('CCDTYPE') #set the bias list of objects biaslist=filename[ccdtype=='ZERO'] masterbias_dict={} for img in infiles: if os.path.basename(img) in biaslist: #open the image struct=pyfits.open(img) bimg=outpath+'bxgp'+os.path.basename(img) #print the message if log: message='Processing Zero frame %s' % img log.message(message, with_stdout=verbose) #process the image struct=clean(struct, createvar=False, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN',time.asctime(time.localtime()),'Images have been gain corrected',struct[0]) saltkey.new('SXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('SBIAS',time.asctime(time.localtime()),'Images have been de-biased',struct[0]) # write FITS file saltio.writefits(struct,bimg, clobber=clobber) saltio.closefits(struct) #add files to the master bias list masterbias_dict=compareimages(struct, bimg, masterbias_dict, keylist=biasheader_list) #create the master bias frame for i in masterbias_dict.keys(): bkeys=masterbias_dict[i][0] blist=masterbias_dict[i][1:] mbiasname=outpath+createmasterbiasname(blist, bkeys) bfiles=','.join(blist) saltcombine(bfiles, mbiasname, method='median', reject='sigclip', mask=False, weight=False, blank=0, scale=None, statsec=None, lthresh=3, \ hthresh=3, clobber=False, logfile=logfile,verbose=verbose) #create the list of flatfields and process them flatlist=filename[ccdtype=='FLAT'] masterflat_dict={} for img in infiles: if os.path.basename(img) in flatlist: #open the image struct=pyfits.open(img) fimg=outpath+'bxgp'+os.path.basename(img) #print the message if log: message='Processing Flat frame %s' % img log.message(message, with_stdout=verbose) #process the image struct=clean(struct, createvar=False, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN',time.asctime(time.localtime()),'Images have been gain corrected',struct[0]) saltkey.new('SXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('SBIAS',time.asctime(time.localtime()),'Images have been de-biased',struct[0]) # write FITS file saltio.writefits(struct,fimg, clobber=clobber) saltio.closefits(struct) #add files to the master bias list masterflat_dict=compareimages(struct, fimg, masterflat_dict, keylist=flatheader_list) #create the master flat frame for i in masterflat_dict.keys(): fkeys=masterflat_dict[i][0] flist=masterflat_dict[i][1:] mflatname=outpath+createmasterflatname(flist, fkeys) ffiles=','.join(flist) saltcombine(ffiles, mflatname, method='median', reject='sigclip', mask=False, weight=False, blank=0, scale=None, statsec=None, lthresh=3, \ hthresh=3, clobber=False, logfile=logfile,verbose=verbose) #process the science data for img in infiles: nimg=os.path.basename(img) if not nimg in flatlist or not nimg in biaslist: #open the image struct=pyfits.open(img) simg=outpath+'bxgp'+os.path.basename(img) #print the message if log: message='Processing science frame %s' % img log.message(message, with_stdout=verbose) #process the image struct=clean(struct, createvar=False, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN',time.asctime(time.localtime()),'Images have been gain corrected',struct[0]) saltkey.new('SXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('SBIAS',time.asctime(time.localtime()),'Images have been de-biased',struct[0]) # write FITS file saltio.writefits(struct,simg, clobber=clobber) saltio.closefits(struct) #mosaic the files--currently not in the proper format--will update when it is if not saltkey.fastmode(saltkey.get('DETMODE', struct[0])): mimg=outpath+'mbxgp'+os.path.basename(img) saltmosaic(images=simg, outimages=mimg,outpref='',geomfile=geomfile, interp=interp,cleanup=True,clobber=clobber,logfile=logfile, verbose=verbose) #remove the intermediate steps saltio.delete(simg)
def specpolrawstokes(infile_list, logfile='salt.log'): """Produces an unnormalized stokes measurement in intensity from a pair of WP filter positions Parameters ---------- infile_list: list List of filenames that include an extracted spectra logfile: str Name of file for logging Notes ----- The input file is a FITS file containing a 1D extracted spectrum with an e and o level includes the intensity, variance, and bad pixels as extracted from the 2D spectrum. For each pair of stokes measurements, it produces an output FITS file now with two columns that are the intensity and difference for the pair measured as a function of wavelength and also includes the variance and bad pixel maps. The output file is named as the target name_configuration number_wave plate positions_number of repeats.fits """ # set up some files that will be needed obsdate = os.path.basename(infile_list[0]).split('.')[0][-12:-4] patternfile = open(datadir + 'wppaterns.txt', 'r') with logging(logfile, debug) as log: # create the observation log obs_dict = obslog(infile_list) images = len(infile_list) hsta_i = np.array([int(float(s) / 11.25) for s in obs_dict['HWP-ANG']]) qsta_i = np.array([int(float(s) / 11.25) for s in obs_dict['QWP-STA']]) img_i = np.array( [int(os.path.basename(s).split('.')[0][-4:]) for s in infile_list]) wpstate_i = [['unknown', 'out', 'qbl', 'hw', 'hqw'] [int(s[1])] for s in obs_dict['WP-STATE']] # wppat_i = obs_dict['WPPATERN'] # until WPPATERN is put in obslog wppat_i = ['UNKNOWN' for i in range(images)] object_i = obs_dict['OBJECT'] config_i = np.zeros(images, dtype='int') obs_i = -np.ones(images, dtype='int') # make table of observations # TODO: This should be moved to its own module configs = 0 obss = 0 for i in range(images): if (wpstate_i[i] == 'unknown'): log.message( 'Warning: Image %s WP-STATE UNKNOWN, assume it is 3 (HW)' % img_i[i], with_header=False) wpstate_i[i] = 'hw' elif (wpstate_i[i] == 'out'): log.message( 'Image %i not in a WP pattern, will skip' % img_i[i], with_header=False) continue if object_i[i].count('NONE'): object_i[i] = obs_dict['LAMPID'][i] object_i[i] = object_i[i].replace(' ', '') cbin, rbin = np.array(obs_dict["CCDSUM"][i].split(" ")).astype(int) grating = obs_dict['GRATING'][i].strip() grang = float(obs_dict['GR-ANGLE'][i]) artic = float(obs_dict['CAMANG'][i]) confdat_d = [rbin, cbin, grating, grang, artic, wppat_i[i]] obsdat_d = [ object_i[i], rbin, cbin, grating, grang, artic, wppat_i[i]] if configs == 0: confdat_cd = [confdat_d] obsdat_od = [obsdat_d] configs = len(confdat_cd) config = 0 while config < configs: if confdat_d == confdat_cd[config]: break config += 1 if config == configs: confdat_cd.append(confdat_d) config_i[i] = config obss = len(obsdat_od) obs = 0 while obs < obss: if obsdat_d == obsdat_od[obs]: break obs += 1 if obs == obss: obsdat_od.append(obsdat_d) obs_i[i] = obs patternlist = patternfile.readlines() log.message( 'Raw Stokes File OBS CCDSUM GRATING GR-ANGLE CAMANG WPPATERN', with_header=False) # Compute E-O raw stokes for obs in range(obss): idx_j = np.where(obs_i == obs) i0 = idx_j[0][0] name_n = [] if wppat_i[i0].count('UNKNOWN'): if (hsta_i[idx_j] % 2).max() == 0: wppat = "Linear" else: wppat = "Linear-Hi" for i in idx_j[0]: wppat_i[i] = wppat if not(((wpstate_i[i0] == 'hw') & (wppat_i[i0] in ('Linear', 'Linear-Hi')) | (wpstate_i[i0] == 'hqw') & (wppat_i[i0] in ('Circular', 'Circular-Hi', 'All-Stokes')))): print "Observation", obs, ": wpstate ", wpstate_i[i0], \ " and wppattern ", wppat_i[i0], "not consistent" continue for p in patternlist: if (p.split()[0] == wppat_i[i0]) & (p.split()[2] == 'hwp'): wpat_p = np.array(p.split()[3:]).astype(int) if (p.split()[0] == wppat_i[i0]) & (p.split()[2] == 'qwp'): wpat_dp = np.vstack(wpat_p, np.array(p.split()[3:])) stokes = 0 j = -1 while j < (len(idx_j[0]) - 2): j += 1 i = idx_j[0][j] if (wpstate_i[i] == 'hw'): if (np.where(wpat_p[0::2] == hsta_i[i])[0].size > 0): idxp = np.where(wpat_p == hsta_i[i])[0][0] if hsta_i[i + 1] != wpat_p[idxp + 1]: continue else: continue if (wpstate_i[i] == 'hqw'): if (np.where(wpat_dp[0::2] == (hsta_i[i], qsta_i[i]))[0].size > 0): idxp = np.where( wpat_dp == ( hsta_i[i], qsta_i[i]))[0][0] if (hsta_i[i + 1], qsta_i[i + 1]) != wpat_dp[None, idxp + 1]: continue else: continue first_pair_file = infile_list[i] second_pair_file = infile_list[i + 1] name = object_i[i] + '_c' + str(config_i[i]) + '_h' + str(hsta_i[i]) + str(hsta_i[i + 1]) if (wpstate_i[i] == 'hqw'): name += 'q' + ['m', 'p'][qsta_i[i] == 4] + ['m', 'p'][qsta_i[i + 1] == 4] count = " ".join(name_n).count(name) name += ('_%02i' % (count + 1)) create_raw_stokes_file( first_pair_file, second_pair_file, output_file=name + '.fits', wppat=wppat_i[i]) log.message('%20s %1i %1i %1i %8s %8.2f %8.2f %12s' % (name, obs, rbin, cbin, grating, grang, artic, wppat_i[i]), with_header=False) name_n.append(name) i += 1 stokes += 1 return
def flexure_rss(fitslist, option=""): global sex_js, rd, B, pixarcsec, gridsize, niter global rho_f, rc0_dgg, usespots_gg, fwhminterp_g # for cube analysis if option == "filesave": prefix = raw_input("file prefix: ") pixel = 15. # pixel size in microns pix_scale = 0.125 sexparams = ["X_IMAGE","Y_IMAGE","FLUX_ISO","FLUX_MAX","FLAGS","CLASS_STAR", \ "X2WIN_IMAGE","Y2WIN_IMAGE","XYWIN_IMAGE","ERRX2WIN_IMAGE"] np.savetxt("qred_thrufoc.param", sexparams, fmt="%s") fmaxcol, flagcol, xvarcol, yvarcol, xerrcol = ( 3, 4, 6, 7, 9) # column nos (from 0) of data in sextractor imagestooclosefactor = 3.0 # too close if factor*sep < sqrt(var) gaptooclose = 1.25 # arcsec edgetooclose = 1.25 # arcsec rattolerance = 0.25 toofaint = 250. # FMAX counts galaxydelta = 0.4 # arcsec MOSimagelimit = 1. # arcsec deblend = .005 # default flexposns = len(fitslist) obsdict = obslog(fitslist) image_f = [fitslist[fpos].split(".")[0][-12:] for fpos in range(flexposns)] dateobs = int(image_f[0][:8]) if dateobs > 20110928: rho_f = np.array(obsdict["TRKRHO"]).astype(float) else: rho_f = np.array(obsdict["TELRHO"]).astype(float) catpos = np.argmin(np.abs(rho_f)) cbin, rbin = np.array(obsdict["CCDSUM"][catpos].split(" ")).astype(int) maskid = obsdict["MASKID"][catpos].strip() filter = obsdict["FILTER"][catpos].strip() grating = obsdict["GRATING"][catpos].strip() rows, cols = pyfits.getdata(fitslist[catpos]).shape isspec = (obsdict["GR-STATE"][catpos][1] == "4") print str(datetime.now()), "\n" print "Mask: ", maskid print "Filter: ", filter print "Grating: ", grating # make catalog of stars using image closest to rho=0 (capital S) sex_js = sextract(fitslist[catpos], deblend=deblend) fluxisomedian = np.median(np.sort( sex_js[2])[-10:]) # median of 10 brightest ok_s = sex_js[2] > fluxisomedian / 100. # get rid of bogus stars sexcols = sex_js.shape[0] Stars = ok_s.sum() sexdata_jfS = np.zeros((sexcols, flexposns, Stars)) sexdata_jfS[:, catpos] = sex_js[:, ok_s] xcenter = 0.5 * (sexdata_jfS[0, catpos].min() + sexdata_jfS[0, catpos].max()) ycenter = 0.5 * (sexdata_jfS[1, catpos].min() + sexdata_jfS[1, catpos].max()) print "\n fits rho stars rshift cshift rslope cslope rmserr " print " deg arcsec arcsec arcmin arcmin bins" print ("%12s %5.1f %5i "+5*"%7.2f ") % \ (image_f[catpos], rho_f[catpos], Stars, 0., 0., 0., 0., 0.) if option == "filesave": np.savetxt(prefix+"Stars.txt",sexdata_jfS[:,catpos].T, \ fmt=2*"%9.2f "+"%9.0f "+"%9.1f "+"%4i "+"%6.2f "+3*"%7.2f "+"%11.3e") # find stars in flexure series, in order of increasing abs(rho), and store sextractor output row_fd = np.zeros((flexposns, 2)) col_fd = np.zeros((flexposns, 2)) for dirn in (1, -1): refpos = catpos posdirlist = np.argsort(dirn * rho_f) poslist = posdirlist[dirn * rho_f[posdirlist] > rho_f[refpos]] for fpos in poslist: col_S, row_S = sexdata_jfS[0:2, refpos, :] sex_js = sextract(fitslist[fpos], "sexwt.fits", deblend=deblend) bintol = 16 / cbin # 2 arcsec tolerance for finding star binsqerr_sS = (sex_js[1, :, None] - row_S[None, :])**2 + ( sex_js[0, :, None] - col_S[None, :])**2 S_s = np.argmin(binsqerr_sS, axis=1) # First compute image shift by averaging small errors rowerr_s = sex_js[1] - row_S[S_s] colerr_s = sex_js[0] - col_S[S_s] hist_r, bin_r = np.histogram(rowerr_s, bins=32, range=(-2 * bintol, 2 * bintol)) drow = rowerr_s[(rowerr_s > bin_r[np.argmax(hist_r)]-bintol) & \ (rowerr_s < bin_r[np.argmax(hist_r)]+bintol)].mean() hist_c, bin_c = np.histogram(colerr_s, bins=32, range=(-2 * bintol, 2 * bintol)) dcol = colerr_s[(colerr_s > bin_r[np.argmax(hist_r)]-bintol) & \ (colerr_s < bin_r[np.argmax(hist_r)]+bintol)].mean() # Now refind the closest ID binsqerr_sS = (sex_js[1,:,None] - row_S[None,:] -drow)**2 + \ (sex_js[0,:,None] - col_S[None,:] -dcol)**2 binsqerr_s = binsqerr_sS.min(axis=1) isfound_s = binsqerr_s < bintol**2 S_s = np.argmin(binsqerr_sS, axis=1) isfound_s &= (binsqerr_s == binsqerr_sS[:, S_s].min(axis=0)) isfound_S = np.array([S in S_s[isfound_s] for S in range(Stars)]) sexdata_jfS[:, fpos, S_s[isfound_s]] = sex_js[:, isfound_s] drow_S = sexdata_jfS[1, fpos] - sexdata_jfS[1, catpos] dcol_S = sexdata_jfS[0, fpos] - sexdata_jfS[0, catpos] row_fd[fpos],rowchi,d,d,d = np.polyfit(sexdata_jfS[0,catpos,isfound_S]-xcenter, \ drow_S[isfound_S],deg=1,full=True) col_fd[fpos],colchi,d,d,d = np.polyfit(sexdata_jfS[1,catpos,isfound_S]-ycenter, \ dcol_S[isfound_S],deg=1,full=True) rms = np.sqrt((rowchi + colchi) / (2 * isfound_S.sum())) print ("%12s %5.0f %5i "+5*"%7.2f ") % (image_f[fpos], rho_f[fpos], isfound_S.sum(), \ row_fd[fpos,1]*rbin*pix_scale, col_fd[fpos,1]*cbin*pix_scale, \ 60.*np.degrees(row_fd[fpos,0]),-60.*np.degrees(col_fd[fpos,0]), rms) if option == "filesave": np.savetxt(prefix+"flex_"+str(fpos)+".txt",np.vstack((isfound_S,drow_S,dcol_S)).T, \ fmt = "%2i %8.3f %8.3f") np.savetxt(prefix + "sextr_" + str(fpos) + ".txt", sexdata_jfS[:, fpos].T) # make plots fig, plot_s = plt.subplots(2, 1, sharex=True) plt.xlabel('Rho (deg)') plt.xlim(-120, 120) plt.xticks(range(-120, 120, 30)) fig.set_size_inches((8.5, 11)) fig.subplots_adjust(left=0.175) plot_s[0].set_title(str(dateobs) + " Imaging Flexure") plot_s[0].set_ylabel('Mean Position (arcsec)') plot_s[0].set_ylim(-0.5, 2.) plot_s[1].set_ylabel('Rotation (arcmin ccw)') plot_s[1].set_ylim(-6., 6.) plot_s[0].plot(rho_f, row_fd[:, 1] * rbin * pix_scale, marker='D', label='row') plot_s[0].plot(rho_f, col_fd[:, 1] * rbin * pix_scale, marker='D', label='col') plot_s[1].plot(rho_f, 60. * np.degrees(row_fd[:, 0]), marker='D', label='row') plot_s[1].plot(rho_f, -60. * np.degrees(col_fd[:, 0]), marker='D', label='col') plot_s[0].legend(fontsize='medium', loc='upper center') plotfile = str(dateobs) + '_imflex.pdf' plt.savefig(plotfile, orientation='portrait') if os.name == 'posix': if os.popen('ps -C evince -f').read().count(plotfile) == 0: os.system('evince ' + plotfile + ' &') os.remove("out.txt") os.remove("qred_thrufoc.param") os.remove("sexwt.fits") return
def specred(infile_list, propcode=None, inter=True, automethod='Matchlines'): #set up the files infiles = ','.join(['%s' % x for x in infile_list]) obsdate = os.path.basename(infile_list[0])[7:15] #set up some files that will be needed logfile = 'spec' + obsdate + '.log' dbfile = 'spec%s.db' % obsdate #create the observation log obs_dict = obslog(infile_list) for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip( ) == 'ARC' and obs_dict['PROPID'][i].upper().strip() == propcode: lamp = obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage = os.path.basename(infile_list[i]) if lamp == 'NONE': lamp = 'CuAr' lampfile = iraf.osfn("pysalt$data/linelists/%s.salt" % lamp) #lampfile='/Users/crawford/research/kepler/Xe.salt' specidentify(arcimage, lampfile, dbfile, guesstype='rss', guessfile='', automethod=automethod, function='legendre', order=3, rstep=100, rstart='middlerow', mdiff=20, thresh=3, niter=5, smooth=3, inter=inter, clobber=True, logfile=logfile, verbose=True) #specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', # function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, # blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages = '' spec_list = [] for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][ i].count('RSS') and obs_dict['PROPID'][i].upper().strip( ) == propcode: img = infile_list[i] ##rectify it specrectify(img, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, nearest=True, clobber=True, logfile=logfile, verbose=True)
def imred(rawdir, prodir, cleanup=True): print rawdir print prodir #get the name of the files infile_list = glob.glob(rawdir + '*.fits') infiles = ','.join(['%s' % x for x in infile_list]) #get the current date for the files obsdate = os.path.basename(infile_list[0])[1:9] print obsdate #set up some files that will be needed logfile = 'imred' + obsdate + '.log' flatimage = 'FLAT%s.fits' % (obsdate) dbfile = 'spec%s.db' % obsdate #create the observation log obs_dict = obslog(infile_list) #prepare the data saltprepare(infiles, '', 'p', createvar=False, badpixelimage='', clobber=True, logfile=logfile, verbose=True) #bias subtract the data saltbias('pP*fits', '', 'b', subover=True, trim=True, subbias=False, masterbias='', median=False, function='polynomial', order=5, rej_lo=3.0, rej_hi=5.0, niter=10, plotover=False, turbo=False, clobber=True, logfile=logfile, verbose=True) #gain correct the data saltgain('bpP*fits', '', 'g', usedb=False, mult=True, clobber=True, logfile=logfile, verbose=True) #cross talk correct the data saltxtalk('gbpP*fits', '', 'x', xtalkfile="", usedb=False, clobber=True, logfile=logfile, verbose=True) #cosmic ray clean the data #only clean the object data for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count( 'OBJECT') and obs_dict['INSTRUME'][i].count('RSS'): img = 'xgbp' + os.path.basename(infile_list[i]) saltcrclean(img, img, '', crtype='edge', thresh=5, mbox=11, bthresh=5.0, flux_ratio=0.2, bbox=25, gain=1.0, rdnoise=5.0, fthresh=5.0, bfactor=2, gbox=3, maxiter=5, multithread=True, clobber=True, logfile=logfile, verbose=True) #flat field correct the data flat_imgs = '' for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('FLAT'): if flat_imgs: flat_imgs += ',' flat_imgs += 'xgbp' + os.path.basename(infile_list[i]) if len(flat_imgs) != 0: saltcombine(flat_imgs,flatimage, method='median', reject=None, mask=False, \ weight=True, blank=0, scale='average', statsec='[200:300, 600:800]', lthresh=3, \ hthresh=3, clobber=True, logfile=logfile, verbose=True) #saltillum(flatimage, flatimage, '', mbox=11, clobber=True, logfile=logfile, verbose=True) saltflat('xgbpP*fits', '', 'f', flatimage, minflat=500, clobber=True, logfile=logfile, verbose=True) else: flats = None imfiles = glob.glob('xgbpP*fits') for f in imfiles: shutil.copy(f, 'f' + f) #mosaic the data geomfile = iraf.osfn("pysalt$data/rss/RSSgeom.dat") saltmosaic('fxgbpP*fits', '', 'm', geomfile, interp='linear', cleanup=True, geotran=True, clobber=True, logfile=logfile, verbose=True) #clean up the images if cleanup: for f in glob.glob('p*fits'): os.remove(f) for f in glob.glob('bp*fits'): os.remove(f) for f in glob.glob('gbp*fits'): os.remove(f) for f in glob.glob('xgbp*fits'): os.remove(f) for f in glob.glob('fxgbp*fits'): os.remove(f)
def specpolfinalstokes(infilelist,logfile='salt.log',debug=False, \ HW_Cal_override=False,Linear_PolZeropoint_override=False,PAZeropoint_override=False): """Combine the raw stokes and apply the polarimetric calibrations Parameters ---------- infilelist: list List of filenames that include an extracted spectrum logfile: str Name of file for logging """ """ _l: line in calibration file _i: index in file list _j: rawstokes = waveplate position pair index (enumeration within config, including repeats) _J: cycle number idx (0,1,..) for each rawstokes _k: combstokes = waveplate position pair index (enumeration within config, repeats combined) _K: pair = waveplate position pair index (enumeration within obs) _p: pair = waveplate position pair # (eg 0,1,2,3 = 0 4 1 5 2 6 3 7 for LINEAR-HI, sorted in h0 order) _s: normalized linear stokes for zeropoint correction (0,1) = (q,u) _S: unnormalized raw stokes within waveplate position pair: (eg 0,1 = I,Q) _F: unnormalized final stokes (eg 0,1,2 = I,Q,U) """ calhistorylist = ["PolCal Model: 20170429",] patternlist = open(datadir+'wppaterns.txt','r').readlines() patternpairs = dict(); patternstokes = dict(); patterndict = dict() for p in patternlist: if p.split()[0] == '#': continue patterndict[p.split()[0]]=np.array(p.split()[3:]).astype(int).reshape((-1,2)) patternpairs[p.split()[0]]=(len(p.split())-3)/2 patternstokes[p.split()[0]]=int(p.split()[1]) if len(glob.glob('specpol*.log')): logfile=glob.glob('specpol*.log')[0] with logging(logfile, debug) as log: log.message('specpolfinalstokes version: 20171226', with_header=False) # organize data using names. # allrawlist = infileidx,object,config,wvplt,cycle for each infile. obsdict=obslog(infilelist) files = len(infilelist) allrawlist = [] for i in range(files): object,config,wvplt,cycle = os.path.basename(infilelist[i]).rsplit('.',1)[0].rsplit('_',3) if (config[0]!='c')|(wvplt[0]!='h')|(not cycle.isdigit()): log.message('File '+infilelist[i]+' is not a raw stokes file.' , with_header=False) continue allrawlist.append([i,object,config,wvplt,cycle]) configlist = sorted(list(set(ele[2] for ele in allrawlist))) # unique configs # input correct HWCal and TelZeropoint calibration files dateobs = obsdict['DATE-OBS'][0].replace('-','') HWCalibrationfile = datedfile(datadir+"RSSpol_HW_Calibration_yyyymmdd_vnn.txt",dateobs) hwav_l,heff_l,hpa_l = np.loadtxt(HWCalibrationfile,dtype=float,unpack=True,usecols=(0,1,2),ndmin=2) TelZeropointfile = datedfile(datadir+"RSSpol_Linear_TelZeropoint_yyyymmdd_vnn.txt",dateobs) twav_l,tq0_l,tu0_l,err_l = np.loadtxt(TelZeropointfile,dtype=float,unpack=True,ndmin=2) # input PAZeropoint file and get correct entry dpadatever,dpa = datedline(datadir+"RSSpol_Linear_PAZeropoint.txt",dateobs).split() dpa = float(dpa) # prepare calibration keyword documentation pacaltype = "Equatorial" if HW_Cal_override: Linear_PolZeropoint_override=True PAZeropoint_override=True pacaltype = "Instrumental" calhistorylist.append("HWCal: Uncalibrated") elif Linear_PolZeropoint_override: PAZeropoint_override=True calhistorylist.extend(["HWCal: "+os.path.basename(HWCalibrationfile),"PolZeropoint: Null"]) elif PAZeropoint_override: calhistorylist.extend(["HWCal: "+os.path.basename(HWCalibrationfile), \ "PolZeropoint: "+os.path.basename(TelZeropointfile), "PAZeropoint: Null"]) else: calhistorylist.extend(["HWCal: "+os.path.basename(HWCalibrationfile), \ "PolZeropoint: "+os.path.basename(TelZeropointfile), \ "PAZeropoint: RSSpol_Linear_PAZeropoint.txt "+str(dpadatever)+" "+str(dpa)]) log.message(' PA type: '+pacaltype, with_header=False) if len(calhistorylist): log.message(' '+'\n '.join(calhistorylist), with_header=False) chifence_d = 2.2*np.array([6.43,4.08,3.31,2.91,2.65,2.49,2.35,2.25]) # *q3 for upper outer fence outlier for each dof # do one config at a time. # rawlist = infileidx,object,config,wvplt,cycle for each infile *in this config*. # rawlist is sorted with cycle varying fastest # rawstokes = len(rawlist). j is idx in rawlist. for conf in configlist: log.message("\nConfiguration: %s" % conf, with_header=False) rawlist = [entry for entry in allrawlist if entry[2]==conf] for col in (4,3,1,2): rawlist = sorted(rawlist,key=operator.itemgetter(col)) rawstokes = len(rawlist) # rawlist is sorted with cycle varying fastest wav0 = pyfits.getheader(infilelist[rawlist[0][0]],'SCI')['CRVAL1'] dwav = pyfits.getheader(infilelist[rawlist[0][0]],'SCI')['CDELT1'] wavs = pyfits.getheader(infilelist[rawlist[0][0]],'SCI')['NAXIS1'] wav_w = wav0 + dwav*np.arange(wavs) # interpolate HW, telZeropoint calibration wavelength dependence for this config okcal_w = np.ones(wavs).astype(bool) if not HW_Cal_override: heff_w = interp1d(hwav_l,heff_l,kind='cubic',bounds_error=False)(wav_w) hpar_w = -interp1d(hwav_l,hpa_l,kind='cubic',bounds_error=False)(wav_w) okcal_w &= ~np.isnan(heff_w) hpar_w[~okcal_w] = 0. if not Linear_PolZeropoint_override: tel0_sw = interp1d(twav_l,np.array([tq0_l,tu0_l]),kind='cubic',bounds_error=False)(wav_w) okcal_w &= ~np.isnan(tel0_sw[0]) tel0_sw /= 100. # table is in % # get spectrograph calibration file, spectrograph coordinates grating = pyfits.getheader(infilelist[rawlist[0][0]])['GRATING'] grang = pyfits.getheader(infilelist[rawlist[0][0]])['GR-ANGLE'] artic = pyfits.getheader(infilelist[rawlist[0][0]])['AR-ANGLE'] SpecZeropointfile = datedfile(datadir+ "RSSpol_Linear_SpecZeropoint_"+grating+"_yyyymmdd_vnn.txt",dateobs) if len(SpecZeropointfile): calhistorylist.append(SpecZeropointfile) # get all rawstokes data # comblist = last rawlistidx,object,config,wvplt,cycles,wppat # one entry for each set of cycles that needs to be combined (i.e, one for each wvplt) stokes_jSw = np.zeros((rawstokes,2,wavs)) var_jSw = np.zeros_like(stokes_jSw) covar_jSw = np.zeros_like(stokes_jSw) bpm_jSw = np.zeros_like(stokes_jSw).astype(int) comblist = [] for j in range(rawstokes): i,object,config,wvplt,cycle = rawlist[j] if j==0: cycles = 1 lampid = pyfits.getheader(infilelist[i],0)['LAMPID'].strip().upper() telpa = float(pyfits.getheader(infilelist[i],0)['TELPA']) if lampid != "NONE": pacaltype ="Instrumental" if pacaltype == "Equatorial": eqpar_w = hpar_w + dpa + (telpa % 180) # if object,config,wvplt changes, start a new comblist entry else: if rawlist[j-1][1:4] != rawlist[j][1:4]: cycles = 1 else: cycles += 1 wppat = pyfits.getheader(infilelist[i])['WPPATERN'].upper() stokes_jSw[j] = pyfits.open(infilelist[i])['SCI'].data.reshape((2,-1)) var_jSw[j] = pyfits.open(infilelist[i])['VAR'].data.reshape((2,-1)) covar_jSw[j] = pyfits.open(infilelist[i])['COV'].data.reshape((2,-1)) bpm_jSw[j] = pyfits.open(infilelist[i])['BPM'].data.reshape((2,-1)) # apply telescope zeropoint calibration, q rotated to raw coordinates if not Linear_PolZeropoint_override: trkrho = pyfits.getheader(infilelist[i])['TRKRHO'] dpatelraw_w = -(22.5*float(wvplt[1]) + hpar_w + trkrho + dpa) rawtel0_sw = \ specpolrotate(tel0_sw,0,0,dpatelraw_w,normalized=True)[0] rawtel0_sw[:,okcal_w] *= heff_w[okcal_w] stokes_jSw[j,1,okcal_w] -= stokes_jSw[j,0,okcal_w]*rawtel0_sw[0,okcal_w] if cycles==1: comblist.append((j,object,config,wvplt,1,wppat)) else: comblist[-1] = (j,object,config,wvplt,cycles,wppat) # combine multiple cycles as necessary. Absolute stokes is on a per cycle basis. # polarimetric combination on normalized stokes basis # to avoid coupling mean syserr into polarimetric spectral features combstokess = len(comblist) stokes_kSw = np.zeros((combstokess,2,wavs)) var_kSw = np.zeros_like(stokes_kSw) covar_kSw = np.zeros_like(stokes_kSw) cycles_kw = np.zeros((combstokess,wavs)).astype(int) chi2cycle_kw = np.zeros((combstokess,wavs)) badcyclechi_kw = np.zeros((combstokess,wavs),dtype=bool) havecyclechi_k = np.zeros(combstokess,dtype=bool) # obslist = first comblist idx,object,config,wppat,pairs # k = idx in comblist obslist = [] jlistk = [] # list of rawstokes idx for each comblist entry Jlistk = [] # list of cycle number for each comblist entry obsobject = '' obsconfig = '' chi2cycle_j = np.zeros(rawstokes) syserrcycle_j = np.zeros(rawstokes) iscull_jw = np.zeros((rawstokes,wavs),dtype=bool) stokes_kSw = np.zeros((combstokess,2,wavs)) var_kSw = np.zeros_like(stokes_kSw) nstokes_kw = np.zeros((combstokess,wavs)) nvar_kw = np.zeros_like(nstokes_kw) ncovar_kw = np.zeros_like(nstokes_kw) chi2cyclenet_k = np.zeros(combstokess) syserrcyclenet_k = np.zeros(combstokess) for k in range(combstokess): j,object,config,wvplt,cycles,wppat = comblist[k] jlistk.append(range(j-cycles+1,j+1)) Jlistk.append([int(rawlist[jj][4])-1 for jj in range(j-cycles+1,j+1)]) # J = cycle-1, counting from 0 nstokes_Jw = np.zeros((cycles,wavs)) nvar_Jw = np.zeros((cycles,wavs)) ncovar_Jw = np.zeros((cycles,wavs)) bpm_Jw = np.zeros((cycles,wavs)) ok_Jw = np.zeros((cycles,wavs),dtype=bool) for J,j in enumerate(jlistk[k]): bpm_Jw[J] = bpm_jSw[j,0] ok_Jw[J] = (bpm_Jw[J] ==0) nstokes_Jw[J][ok_Jw[J]] = stokes_jSw[j,1][ok_Jw[J]]/stokes_jSw[j,0][ok_Jw[J]] nvar_Jw[J][ok_Jw[J]] = var_jSw[j,1][ok_Jw[J]]/(stokes_jSw[j,0][ok_Jw[J]])**2 ncovar_Jw[J][ok_Jw[J]] = covar_jSw[j,1][ok_Jw[J]]/(stokes_jSw[j,0][ok_Jw[J]])**2 # Culling: for multiple cycles, compare each cycle with every other cycle (dof=1). # bad wavelengths flagged for P < .02% (1/2000): chisq > 13.8 (chi2.isf(q=.0002,df=1)) # for cycles>2, vote to cull specific pair/wavelength, otherwise cull wavelength cycles_kw[k] = (1-bpm_Jw).sum(axis=0).astype(int) okchi_w = (cycles_kw[k] > 1) chi2lim = 13.8 havecyclechi_k[k] = okchi_w.any() if cycles > 1: ok_Jw[J] = okchi_w & (bpm_Jw[J] ==0) chi2cycle_JJw = np.zeros((cycles,cycles,wavs)) badcyclechi_JJw = np.zeros((cycles,cycles,wavs)) ok_JJw = ok_Jw[:,None,:] & ok_Jw[None,:,:] nstokes_JJw = nstokes_Jw[:,None] - nstokes_Jw[None,:] nvar_JJw = nvar_Jw[:,None] + nvar_Jw[None,:] chi2cycle_JJw[ok_JJw] = nstokes_JJw[ok_JJw]**2/nvar_JJw[ok_JJw] triuidx = np.triu_indices(cycles,1) # _i enumeration of cycle differences chi2cycle_iw = chi2cycle_JJw[triuidx] badcyclechi_w = (chi2cycle_iw > chi2lim).any(axis=(0)) badcyclechiall_w = (badcyclechi_w & (ok_JJw[triuidx].reshape((-1,wavs)).sum(axis=0)<3)) badcyclechicull_w = (badcyclechi_w & np.logical_not(badcyclechiall_w)) wavcull_W = np.where(badcyclechicull_w)[0] # cycles>2, cull by voting if wavcull_W.shape[0]: for W,w in enumerate(wavcull_W): J_I = np.array(triuidx).T[np.argsort(chi2cycle_iw[:,w])].flatten() _,idx = np.unique(J_I,return_index=True) Jcull = J_I[np.sort(idx)][-1] jcull = jlistk[k][Jcull] iscull_jw[jcull,w] = True # for reporting bpm_jSw[jcull,:,w] = 1 else: for j in jlistk[k]: iscull_jw[j] = badcyclechiall_w # for reporting bpm_jSw[j][:,badcyclechiall_w] = 1 for J,j in enumerate(jlistk[k]): bpm_Jw[J] = bpm_jSw[j,0] if debug: obsname = object+"_"+config ok_Jw = okchi_w[None,:] & (bpm_Jw ==0) np.savetxt(obsname+"_nstokes_Jw_"+str(k)+".txt",np.vstack((wav_w,ok_Jw.astype(int), \ nstokes_Jw,nvar_Jw)).T, fmt="%8.2f "+cycles*"%3i "+cycles*"%10.6f "+cycles*"%10.12f ") np.savetxt(obsname+"_chi2cycle_iw_"+str(k)+".txt",np.vstack((wav_w,okchi_w.astype(int), \ chi2cycle_iw.reshape((-1,wavs)),badcyclechi_w,ok_JJw[triuidx].reshape((-1,wavs)).sum(axis=0))).T, \ fmt="%8.2f %3i "+chi2cycle_iw.shape[0]*"%10.7f "+" %2i %2i") np.savetxt(obsname+"_Jcull_kw_"+str(k)+".txt",np.vstack((wav_w,okchi_w.astype(int), \ iscull_jw[jlistk[k]].astype(int).reshape((-1,wavs)))).T, fmt="%8.2f %3i "+cycles*" %3i") if ((object != obsobject) | (config != obsconfig)): obslist.append([k,object,config,wppat,1]) obsobject = object; obsconfig = config else: obslist[-1][4] +=1 # Now combine cycles, using normalized stokes to minimize systematic errors # first normalize cycle members J at wavelengths where all cycles have data: cycles_kw[k] = (1-bpm_Jw).sum(axis=0).astype(int) ok_w = (cycles_kw[k] > 0) okall_w = (cycles_kw[k] == cycles) normint_J = np.array(stokes_jSw[jlistk[k],0][:,okall_w].sum(axis=1)) normint_J /= np.mean(normint_J) stokes_JSw = stokes_jSw[jlistk[k]]/normint_J[:,None,None] var_JSw = var_jSw[jlistk[k]]/normint_J[:,None,None]**2 covar_JSw = covar_jSw[jlistk[k]]/normint_J[:,None,None]**2 for J in range(cycles): okJ_w = ok_w & (bpm_Jw[J] ==0) # average the intensity stokes_kSw[k,0,okJ_w] += stokes_JSw[J,0,okJ_w]/cycles_kw[k][okJ_w] var_kSw[k,0,okJ_w] += var_JSw[J,0,okJ_w]/cycles_kw[k][okJ_w]**2 covar_kSw[k,0,okJ_w] += covar_JSw[J,0,okJ_w]/cycles_kw[k][okJ_w]**2 # now the normalized stokes nstokes_kw[k][okJ_w] += (stokes_JSw[J,1][okJ_w]/stokes_JSw[J,0][okJ_w])/cycles_kw[k][okJ_w] nvar_kw[k][okJ_w] += (var_JSw[J,1][okJ_w]/stokes_JSw[J,0][okJ_w]**2)/cycles_kw[k][okJ_w]**2 ncovar_kw[k][okJ_w] += (covar_JSw[J,1][okJ_w]/stokes_JSw[J,0][okJ_w]**2)/cycles_kw[k][okJ_w]**2 stokes_kSw[k,1] = nstokes_kw[k]*stokes_kSw[k,0] var_kSw[k,1] = nvar_kw[k]*stokes_kSw[k,0]**2 covar_kSw[k,1] = ncovar_kw[k]*stokes_kSw[k,0]**2 if debug: obsname = object+"_"+config np.savetxt(obsname+"_stokes_kSw_"+str(k)+".txt",np.vstack((wav_w,ok_w.astype(int), \ stokes_kSw[k])).T, fmt="%8.2f %3i "+2*"%12.3f ") # compute mean chisq for each pair having multiple cycles if cycles > 1: nstokeserr_Jw = np.zeros((cycles,wavs)) nerr_Jw = np.zeros((cycles,wavs)) for J in range(cycles): okJ_w = ok_w & (bpm_Jw[J] ==0) nstokes_Jw[J][okJ_w] = stokes_JSw[J,1][okJ_w]/stokes_JSw[J,0][okJ_w] nvar_Jw[J][okJ_w] = var_JSw[J,1][okJ_w]/(stokes_JSw[J,0][okJ_w])**2 nstokeserr_Jw[J] = (nstokes_Jw[J] - nstokes_kw[k]) nvar_w = nvar_Jw[J] - nvar_kw[k] okall_w &= (nvar_w > 0.) nerr_Jw[J,okall_w] = np.sqrt(nvar_w[okall_w]) nstokessyserr_J = np.average(nstokeserr_Jw[:,okall_w],weights=1./nerr_Jw[:,okall_w],axis=1) nstokeserr_Jw -= nstokessyserr_J[:,None] for J,j in enumerate(jlistk[k]): loc,scale = norm.fit(nstokeserr_Jw[J,okall_w]/nerr_Jw[J,okall_w]) chi2cycle_j[j] = scale**2 syserrcycle_j[j] = nstokessyserr_J[J] chi2cyclenet_k[k] = chi2cycle_j[jlistk[k]].mean() syserrcyclenet_k[k] = np.sqrt((syserrcycle_j[jlistk[k]]**2).sum())/len(jlistk[k]) if debug: obsname = object+"_"+config chisqanalysis(obsname,nstokeserr_Jw,nerr_Jw,okall_w) # for each obs combine raw stokes, apply efficiency and PA calibration as appropriate for pattern, and save obss = len(obslist) for obs in range(obss): k0,object,config,wppat,pairs = obslist[obs] patpairs = patternpairs[wppat] klist = range(k0,k0+pairs) # entries in comblist for this obs obsname = object+"_"+config wplist = [comblist[k][3][1:] for k in klist] patwplist = sorted((patpairs*"%1s%1s " % tuple(patterndict[wppat].flatten())).split()) plist = [patwplist.index(wplist[P]) for P in range(pairs)] k_p = np.zeros(patpairs,dtype=int) k_p[plist] = klist # idx in klist for each pair idx cycles_p = np.zeros_like(k_p) cycles_p[plist] = np.array([comblist[k][4] for k in klist]) # number of cycles in comb cycles_pw = np.zeros((patpairs,wavs),dtype=int) cycles_pw[plist] = cycles_kw[klist] # of ok cycles for each wavelength havecyclechi_p = np.zeros(patpairs,dtype=bool) havecyclechi_p[plist] = havecyclechi_k[klist] havelinhichi_p = np.zeros(patpairs,dtype=bool) # name result to document hw cycles included kplist = list(k_p) if cycles_p.max()==cycles_p.min(): kplist = [klist[0],] for p in range(len(kplist)): obsname += "_" j0 = comblist[k_p[p]][0] - cycles_p[p] + 1 for j in range(j0,j0+cycles_p[p]): obsname+=rawlist[j][4][-1] log.message("\n Observation: %s Date: %s" % (obsname,dateobs), with_header=False) finstokes = patternstokes[wppat] if pairs != patpairs: if (pairs<2): log.message((' Only %1i pair, skipping observation' % pairs), with_header=False) continue elif ((max(plist) < 2) | (min(plist) > 1)): log.message(' Pattern not usable, skipping observation', with_header=False) continue stokes_Fw = np.zeros((finstokes,wavs)) var_Fw = np.zeros_like(stokes_Fw) covar_Fw = np.zeros_like(stokes_Fw) # normalize pairs in obs at wavelengths _W where all pair/cycles have data: okall_w = okcal_w & (cycles_pw[plist] == cycles_p[plist,None]).all(axis=0) normint_K = stokes_kSw[klist,0][:,okall_w].sum(axis=1) normint_K /= np.mean(normint_K) stokes_kSw[klist] /= normint_K[:,None,None] var_kSw[klist] /= normint_K[:,None,None]**2 covar_kSw[klist] /= normint_K[:,None,None]**2 # first, the intensity stokes_Fw[0] = stokes_kSw[klist,0].sum(axis=0)/pairs var_Fw[0] = var_kSw[klist,0].sum(axis=0)/pairs**2 covar_Fw[0] = covar_kSw[klist,0].sum(axis=0)/pairs**2 # now, the polarization stokes if wppat.count('LINEAR'): var_Fw = np.vstack((var_Fw,np.zeros(wavs))) # add QU covariance if (wppat=='LINEAR'): # wavelengths with both pairs having good, calibratable data in at least one cycle ok_w = okcal_w & (cycles_pw[plist] > 0).all(axis=0) bpm_Fw = np.repeat((np.logical_not(ok_w))[None,:],finstokes,axis=0) stokes_Fw[1:,ok_w] = stokes_kSw[klist,1][:,ok_w]*(stokes_Fw[0,ok_w]/stokes_kSw[klist,0][:,ok_w]) var_Fw[1:3,ok_w] = var_kSw[klist,1][:,ok_w]*(stokes_Fw[0,ok_w]/stokes_kSw[klist,0][:,ok_w])**2 covar_Fw[1:,ok_w] = covar_kSw[klist,1][:,ok_w]*(stokes_Fw[0,ok_w]/stokes_kSw[klist,0][:,ok_w])**2 if debug: np.savetxt(obsname+"_stokes.txt",np.vstack((wav_w,ok_w.astype(int),stokes_Fw)).T, \ fmt="%8.2f "+"%2i "+3*" %10.6f") np.savetxt(obsname+"_var.txt",np.vstack((wav_w,ok_w.astype(int),var_Fw)).T, \ fmt="%8.2f "+"%2i "+4*"%14.9f ") np.savetxt(obsname+"_covar.txt",np.vstack((wav_w,ok_w.astype(int),covar_Fw)).T, \ fmt="%8.2f "+"%2i "+3*"%14.9f ") elif wppat=='LINEAR-HI': # for Linear-Hi, must go to normalized stokes in order for the pair combination to cancel systematic errors # each pair p at each wavelength w is linear combination of pairs, including primary p and secondary sec_p # linhi chisq is from comparison of primary and secondary # evaluate wavelengths with at least both pairs 0,2 or 1,3 having good, calibratable data in at least one cycle: ok_pw = okcal_w[None,:] & (cycles_pw > 0) ok_w = (ok_pw[0] & ok_pw[2]) | (ok_pw[1] & ok_pw[3]) bpm_Fw = np.repeat((np.logical_not(ok_w))[None,:],finstokes,axis=0) stokespri_pw = np.zeros((patpairs,wavs)) varpri_pw = np.zeros_like(stokespri_pw) covarpri_pw = np.zeros_like(stokespri_pw) stokespri_pw[plist] = nstokes_kw[klist] varpri_pw[plist] = nvar_kw[klist] covarpri_pw[plist] = ncovar_kw[klist] haveraw_pw = (cycles_pw > 0) pricof_ppw = np.identity(patpairs)[:,:,None]*haveraw_pw[None,:,:] qq = 1./np.sqrt(2.) seccofb_pp = np.array([[ 0,1, 0,-1],[1, 0,1, 0],[ 0,1, 0,1],[-1, 0,1, 0]])*qq # both secs avail seccof1_pp = np.array([[qq,1,-qq, 0],[1,qq,0, qq],[-qq,1,qq,0],[-1, qq,0,qq]])*qq # only 1st sec seccof2_pp = np.array([[qq,0, qq,-1],[0,qq,1,-qq],[ qq,0,qq,1],[ 0,-qq,1,qq]])*qq # only 2nd sec seclist_p = np.array([[1,3],[0,2],[1,3],[0,2]]) havesecb_pw = haveraw_pw[seclist_p].all(axis=1) onlysec1_pw = (np.logical_not(havesecb_pw) & haveraw_pw[seclist_p][:,0] & havesecb_pw[seclist_p][:,1]) onlysec2_pw = (np.logical_not(havesecb_pw) & haveraw_pw[seclist_p][:,1] & havesecb_pw[seclist_p][:,0]) seccof_ppw = seccofb_pp[:,:,None]*havesecb_pw[:,None,:] + \ seccof1_pp[:,:,None]*onlysec1_pw[:,None,:] + \ seccof2_pp[:,:,None]*onlysec2_pw[:,None,:] stokessec_pw = (seccof_ppw*stokespri_pw[:,None,:]).sum(axis=0) varsec_pw = (seccof_ppw**2*varpri_pw[:,None,:]).sum(axis=0) covarsec_pw = (seccof_ppw**2*covarpri_pw[:,None,:]).sum(axis=0) havesec_pw = (havesecb_pw | onlysec1_pw | onlysec2_pw) prisec_pw = (haveraw_pw & havesec_pw) onlypri_pw = (haveraw_pw & np.logical_not(havesec_pw)) onlysec_pw = (np.logical_not(haveraw_pw) & havesec_pw) cof_ppw = onlypri_pw[:,None,:]*pricof_ppw + onlysec_pw[:,None,:]*seccof_ppw + \ 0.5*prisec_pw[:,None,:]*(pricof_ppw+seccof_ppw) # now do the combination stokes_pw = (cof_ppw*stokespri_pw[None,:,:]).sum(axis=1) var_pw = (cof_ppw**2*varpri_pw[None,:,:]).sum(axis=1) covar_pw = (cof_ppw**2*covarpri_pw[None,:,:]).sum(axis=1) covarprisec_pw = 0.5*varpri_pw*np.logical_or(onlysec1_pw,onlysec2_pw) covarqu_w = (cof_ppw[0]*cof_ppw[2]*varpri_pw).sum(axis=0) # cull wavelengths based on chisq between primary and secondary chi2linhi_pw = np.zeros((patpairs,wavs)) badlinhichi_w = np.zeros(wavs) havelinhichi_p = prisec_pw.any(axis=1) linhichis = havelinhichi_p.sum() chi2linhi_pw[prisec_pw] = ((stokespri_pw[prisec_pw] - stokessec_pw[prisec_pw])**2 / \ (varpri_pw[prisec_pw] + varsec_pw[prisec_pw] - 2.*covarprisec_pw[prisec_pw])) q3_p = np.percentile(chi2linhi_pw[:,okall_w].reshape((4,-1)),75,axis=1) badlinhichi_w[ok_w] = ((chi2linhi_pw[:,ok_w] > (chifence_d[2]*q3_p)[:,None])).any(axis=0) ok_w &= np.logical_not(badlinhichi_w) okall_w &= np.logical_not(badlinhichi_w) chi2linhi_p = np.zeros(patpairs) chi2linhi_p[havelinhichi_p] = (chi2linhi_pw[havelinhichi_p][:,ok_w]).sum(axis=1)/ \ (prisec_pw[havelinhichi_p][:,ok_w]).sum(axis=1) syserrlinhi_pw = np.zeros((patpairs,wavs)) varlinhi_pw = np.zeros((patpairs,wavs)) syserrlinhi_p = np.zeros(patpairs) syserrlinhi_pw[prisec_pw] = (stokespri_pw[prisec_pw] - stokessec_pw[prisec_pw]) varlinhi_pw[prisec_pw] = varpri_pw[prisec_pw] + varsec_pw[prisec_pw] - 2.*covarprisec_pw[prisec_pw] syserrlinhi_p[havelinhichi_p] = np.average(syserrlinhi_pw[havelinhichi_p][:,okall_w], \ weights=1./np.sqrt(varlinhi_pw[havelinhichi_p][:,okall_w]),axis=1) if debug: np.savetxt(obsname+"_have_pw.txt",np.vstack((wav_w,ok_pw.astype(int),haveraw_pw,havesecb_pw, \ onlysec1_pw,onlysec2_pw,havesec_pw,prisec_pw,onlypri_pw,onlysec_pw)).T, \ fmt="%8.2f "+9*"%2i %2i %2i %2i ") np.savetxt(obsname+"_seccof_ppw.txt",np.vstack((wav_w,ok_pw.astype(int),seccof_ppw.reshape((16,-1)))).T, \ fmt="%8.2f "+4*"%2i "+16*" %6.3f") np.savetxt(obsname+"_cof_ppw.txt",np.vstack((wav_w,ok_pw.astype(int),cof_ppw.reshape((16,-1)))).T, \ fmt="%8.2f "+4*"%2i "+16*" %6.3f") np.savetxt(obsname+"_stokes.txt",np.vstack((wav_w,ok_pw.astype(int),stokespri_pw,stokes_pw)).T, \ fmt="%8.2f "+4*"%2i "+8*" %10.6f") np.savetxt(obsname+"_var.txt",np.vstack((wav_w,ok_pw.astype(int),varpri_pw,var_pw)).T, \ fmt="%8.2f "+4*"%2i "+8*"%14.9f ") np.savetxt(obsname+"_covar.txt",np.vstack((wav_w,ok_pw.astype(int),covarpri_pw,covar_pw)).T, \ fmt="%8.2f "+4*"%2i "+8*"%14.9f ") np.savetxt(obsname+"_chi2linhi_pw.txt",np.vstack((wav_w,stokes_Fw[0],ok_pw.astype(int), \ chi2linhi_pw)).T, fmt="%8.2f %10.0f "+4*"%2i "+4*"%10.4f ") stokes_Fw[1:] = stokes_pw[[0,2]]*stokes_Fw[0] var_Fw[1:3] = var_pw[[0,2]]*stokes_Fw[0]**2 var_Fw[3] = covarqu_w*stokes_Fw[0]**2 covar_Fw[1:] = covar_pw[[0,2]]*stokes_Fw[0]**2 bpm_Fw = ((bpm_Fw==1) | np.logical_not(ok_w)).astype(int) # document chisq results, combine flagoffs, compute mean chisq for observation, combine with final bpm if (havecyclechi_p.any() | havelinhichi_p.any()): chi2cyclenet = 0. syserrcyclenet = 0. chi2linhinet = 0. syserrlinhinet = 0. if havecyclechi_p.any(): log.message(("\n"+14*" "+"{:^"+str(5*patpairs)+"}{:^"+str(8*patpairs)+"}{:^"+str(6*patpairs)+"}")\ .format("culled","sys %err","mean chisq"), with_header=False) log.message((9*" "+"HW "+patpairs*" %4s"+patpairs*" %7s"+patpairs*" %5s") \ % tuple(3*patwplist),with_header=False) jlist = sum([jlistk[k] for k in klist],[]) Jlist = list(set(sum([Jlistk[k] for k in klist],[]))) Jmax = max(Jlist) ok_pJ = np.zeros((patpairs,Jmax+1),dtype=bool) for p in plist: ok_pJ[p][Jlistk[k_p[p]]] = True syserrcycle_pJ = np.zeros((patpairs,Jmax+1)) syserrcycle_pJ[ok_pJ] = syserrcycle_j[jlist] syserrcyclenet_p = np.zeros(patpairs) syserrcyclenet_p[plist] = syserrcyclenet_k[klist] syserrcyclenet = np.sqrt((syserrcyclenet_p**2).sum()/patpairs) chi2cycle_pJ = np.zeros((patpairs,Jmax+1)) chi2cycle_pJ[ok_pJ] = chi2cycle_j[jlist] chi2cyclenet_p = np.zeros(patpairs) chi2cyclenet_p[plist] = chi2cyclenet_k[klist] chi2cyclenet = chi2cyclenet_p.sum()/patpairs culls_pJ = np.zeros((patpairs,Jmax+1),dtype=int) culls_pJ[ok_pJ] = iscull_jw[jlist].sum(axis=1) if cycles_p.max() > 2: for J in set(Jlist): log.message(((" cycle %2i: "+patpairs*"%4i "+patpairs*"%7.3f "+patpairs*"%5.2f ") % \ ((J+1,)+tuple(culls_pJ[:,J])+tuple(100.*syserrcycle_pJ[:,J])+tuple(chi2cycle_pJ[:,J]))), \ with_header=False) netculls_p = [iscull_jw[jlistk[k_p[p]]].all(axis=0).sum() for p in range(patpairs)] log.message((" net : "+patpairs*"%4i "+patpairs*"%7.3f "+patpairs*"%5.2f ") % \ (tuple(netculls_p)+tuple(100*syserrcyclenet_p)+tuple(chi2cyclenet_p)), with_header=False) if (havelinhichi_p.any()): log.message(("\n"+14*" "+"{:^"+str(5*patpairs)+"}{:^"+str(8*patpairs)+"}{:^"+str(6*patpairs)+"}")\ .format("culled","sys %err","mean chisq"), with_header=False) log.message((9*" "+"HW "+(4*patpairs/2)*" "+" all"+(4*patpairs/2)*" "+patpairs*" %7s"+patpairs*" %5s") \ % tuple(2*patwplist),with_header=False) chicount = int(badlinhichi_w.sum()) chi2linhinet = chi2linhi_p.sum()/(havelinhichi_p.sum()) syserrlinhinet = np.sqrt((syserrlinhi_p**2).sum()/(havelinhichi_p.sum())) log.message((" Linhi: "+(2*patpairs)*" "+"%3i "+(2*patpairs)*" "+patpairs*"%7.3f "+patpairs*"%5.2f ") % \ ((chicount,)+tuple(100.*syserrlinhi_p)+tuple(chi2linhi_p)), with_header=False) chi2qudof = (chi2cyclenet+chi2linhinet)/(int(chi2cyclenet>0)+int(chi2linhinet>0)) syserr = np.sqrt((syserrcyclenet**2+syserrlinhinet**2)/ \ (int(syserrcyclenet>0)+int(syserrlinhinet>0))) log.message(("\n Estimated sys %%error: %5.3f%% Mean Chisq: %6.2f") % \ (100.*syserr,chi2qudof), with_header=False) if not HW_Cal_override: # apply hw efficiency, equatorial PA rotation calibration stokes_Fw[1:,ok_w] /= heff_w[ok_w] var_Fw[1:,ok_w] /= heff_w[ok_w]**2 covar_Fw[1:,ok_w] /= heff_w[ok_w]**2 stokes_Fw,var_Fw,covar_Fw = specpolrotate(stokes_Fw,var_Fw,covar_Fw,eqpar_w) # save final stokes fits file for this observation. Strain out nans. infile = infilelist[rawlist[comblist[k][0]][0]] hduout = pyfits.open(infile) hduout['SCI'].data = np.nan_to_num(stokes_Fw.reshape((3,1,-1))) hduout['SCI'].header['CTYPE3'] = 'I,Q,U' hduout['VAR'].data = np.nan_to_num(var_Fw.reshape((4,1,-1))) hduout['VAR'].header['CTYPE3'] = 'I,Q,U,QU' hduout['COV'].data = np.nan_to_num(covar_Fw.reshape((3,1,-1))) hduout['COV'].header['CTYPE3'] = 'I,Q,U,QU' hduout['BPM'].data = bpm_Fw.astype('uint8').reshape((3,1,-1)) hduout['BPM'].header['CTYPE3'] = 'I,Q,U' hduout[0].header['WPPATERN'] = wppat hduout[0].header['PATYPE'] = pacaltype if len(calhistorylist): for line in calhistorylist: hduout[0].header.add_history(line) if (havecyclechi_p.any() | havelinhichi_p.any()): hduout[0].header['SYSERR'] = (100.*syserr,'estimated % systematic error') outfile = obsname+'_stokes.fits' hduout.writeto(outfile,overwrite=True,output_verify='warn') log.message('\n '+outfile+' Stokes I,Q,U', with_header=False) # apply flux calibration, if available fluxcal_w = specpolflux(outfile,logfile=logfile) if fluxcal_w.shape[0]>0: stokes_Fw *= fluxcal_w var_Fw *= fluxcal_w**2 covar_Fw *= fluxcal_w**2 # calculate, print means (stokes averaged in unnorm space) avstokes_f, avvar_f, avwav = spv.avstokes(stokes_Fw,var_Fw[:-1],covar_Fw,wav_w) avstokes_F = np.insert(avstokes_f,0,1.) avvar_F = np.insert(avvar_f,0,1.) spv.printstokes(avstokes_F,avvar_F,avwav,tcenter=np.pi/2.,textfile='tmp.log') log.message(open('tmp.log').read(), with_header=False) os.remove('tmp.log') # elif wppat.count('CIRCULAR'): TBS # elif wppat=='ALL-STOKES': TBS # end of obs loop # end of config loop return
def hrsclean(images, outpath, obslogfile=None, subover=True, trim=True, masbias=None, subbias=True, median=False, function='polynomial', order=5, rej_lo=3, rej_hi=3, niter=5, interp='linear', clobber=False, logfile='salt.log',verbose=True): """Convert MEF HRS data into a single image. If variance frames and BPMs, then convert them to the same format as well. Returns an MEF image but that is combined into a single frame """ with logging(logfile,debug) as log: # Check the input images infiles = saltio.argunpack ('Input',images) # create list of output files outpath=saltio.abspath(outpath) if saltio.checkfornone(obslogfile) is None: raise SaltError('Obslog file is required') # Delete the obslog file if it already exists if (os.path.isfile(obslogfile) and clobber) or not os.path.isfile(obslogfile): if os.path.isfile(obslogfile): saltio.delete(obslogfile) #read in the obsveration log or create it headerDict=obslog(infiles, log) obsstruct=createobslogfits(headerDict) saltio.writefits(obsstruct, obslogfile) else: obsstruct=saltio.openfits(obslogfile) #create the list of bias frames and process them filename=obsstruct.data.field('FILENAME') detmode=obsstruct.data.field('DETMODE') ccdtype=obsstruct.data.field('OBJECT') biaslist=filename[ccdtype=='Bias'] masterbias_dict={} if log: log.message('Processing Bias Frames') for img in infiles: if os.path.basename(img) in biaslist: #open the image struct=pyfits.open(img) bimg=outpath+'bgph'+os.path.basename(img) #print the message if log: message='Processing Zero frame %s' % img log.message(message, with_stdout=verbose, with_header=False) #process the image struct=clean(struct, createvar=False, badpixelstruct=None, mult=True, subover=subover, trim=trim, subbias=False, imstack=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],'HPREPARE', 'Images have been prepared', hist) saltkey.new('HGAIN',time.asctime(time.localtime()),'Images have been gain corrected',struct[0]) #saltkey.new('HXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('HBIAS',time.asctime(time.localtime()),'Images have been de-biased',struct[0]) # write FITS file saltio.writefits(struct,bimg, clobber=clobber) saltio.closefits(struct) #add files to the master bias list masterbias_dict=compareimages(struct, bimg, masterbias_dict, keylist=hrsbiasheader_list) #create the master bias frame for i in list(masterbias_dict.keys()): bkeys=masterbias_dict[i][0] blist=masterbias_dict[i][1:] mbiasname=outpath+createmasterbiasname(blist, bkeys, x1=5, x2=13) bfiles=','.join(blist) saltcombine(bfiles, mbiasname, method='median', reject='sigclip', mask=False, weight=False, blank=0, scale=None, statsec=None, lthresh=3, \ hthresh=3, clobber=False, logfile=logfile,verbose=verbose) #apply full reductions to the science data for img in infiles: nimg=os.path.basename(img) if not nimg in biaslist: #open the image struct=pyfits.open(img) simg=outpath+'mbgph'+os.path.basename(img) #print the message if log: message='Processing science frame %s' % img log.message(message, with_stdout=verbose) #get master bias frame masterbias=get_masterbias(struct, masterbias_dict, keylist=hrsbiasheader_list) if masterbias: subbias=True bstruct=saltio.openfits(masterbias) else: subbias=False bstruct=None #process the image struct=clean(struct, createvar=False, badpixelstruct=None, mult=True, subover=subover, trim=trim, subbias=subbias, imstack=True, bstruct=bstruct, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],'HPREPARE', 'Images have been prepared', hist) saltkey.new('HGAIN',time.asctime(time.localtime()),'Images have been gain corrected',struct[0]) #saltkey.new('HXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('HBIAS',time.asctime(time.localtime()),'Images have been de-biased',struct[0]) # write FITS file saltio.writefits(struct,simg, clobber=clobber) saltio.closefits(struct) return
def specpolmap(infile_list, propcode=None, inter=True, automethod='Matchlines',logfile='salt.log'): obsdate=os.path.basename(infile_list[0])[7:15] with logging(logfile, debug) as log: #create the observation log obs_dict=obslog(infile_list) # eliminate inapplicable images for i in range(len(infile_list)): if obs_dict['PROPID'][i].upper().strip()!=propcode or int(obs_dict['BS-STATE'][i][1])!=2: del infile_list[i] obs_dict=obslog(infile_list) # use arc to make first-guess wavecal from model, locate beamsplitter split point for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip()=='ARC' : arcidx = i rbin,cbin = np.array(obs_dict["CCDSUM"][arcidx].split(" ")).astype(int) grating = obs_dict['GRATING'][arcidx].strip() grang = float(obs_dict['GR-ANGLE'][arcidx]) artic = float(obs_dict['CAMANG'][arcidx]) hduarc = pyfits.open(infile_list[arcidx]) arc_rc = hduarc['SCI'].data rows,cols = arc_rc.shape lam_c = rssmodelwave(grating,grang,artic,cbin,cols) arc_r = arc_rc.sum(axis=1) lam_m = np.loadtxt(datadir+"wollaston.txt",dtype=float,usecols=(0,)) rpix_om = np.loadtxt(datadir+"wollaston.txt",dtype=float,unpack=True,usecols=(1,2)) expectrow_o = ((2052 + interp1d(lam_m,rpix_om,kind='cubic') \ (lam_c[cols/2-cols/16:cols/2+cols/16])).mean(axis=1)/rbin).astype(int) foundrow_o = np.zeros((2),dtype=int) for o in (0,1): foundrow_o[o] = expectrow_o[o]-100/rbin \ + np.argmax(arc_r[expectrow_o[o]-100/rbin:expectrow_o[o]+100/rbin]) arcsignal = arc_r[foundrow_o[o]] botedge = foundrow_o[o] + np.argmax(arc_r[foundrow_o[o]:] < arcsignal/2.) topedge = foundrow_o[o] - np.argmax(arc_r[foundrow_o[o]::-1] < arcsignal/2.) foundrow_o[o] = (botedge+topedge)/2. splitrow = foundrow_o.mean() offset = int(splitrow - rows/2) log.message('Split Row: '+("%4i " % splitrow), with_header=False) # split arc into o/e images padbins = (np.indices((rows,cols))[0]<offset) | (np.indices((rows,cols))[0]>rows+offset) arc_rc = np.roll(arc_rc,-offset,axis=0) arc_rc[padbins] = 0. arc_orc = arc_rc.reshape((2,rows/2,cols)) # for O,E arc straighten spectrum, identify for each, form (unstraightened) wavelength map lamp=obs_dict['LAMPID'][arcidx].strip().replace(' ', '') if lamp == 'NONE': lamp='CuAr' hduarc[0].header.update('MASKTYP','LONGSLIT') hduarc[0].header.update('MASKID','PL0100N001') del hduarc['VAR'] del hduarc['BPM'] lampfile=iraf.osfn("pysalt$data/linelists/%s.wav" % lamp) rpix_oc = interp1d(lam_m, rpix_om,kind ='cubic')(lam_c) log.message('\nARC: image '+infile_list[i].split('.')[0][-4:]+' GRATING '+grating\ +' GRANG '+("%8.3f" % grang)+' ARTIC '+("%8.3f" % artic)+' LAMP '+lamp, with_header=False) wavmap_orc = np.zeros_like(arc_orc) for o in (0,1): foundrow_o[o] += -offset + o*rows/2 arc_Rc = np.zeros((rows/2,cols),dtype='float32') for c in range(cols): shift(arc_orc[o,:,c], \ -(rpix_oc[o,c]-rpix_oc[o,cols/2])/rbin,arc_Rc[:,c]) hduarc['SCI'].data = arc_Rc arcimage = "arc_"+str(o)+".fits" hduarc.writeto(arcimage,clobber=True) Rstart = foundrow_o[o] order = 3 dbfilename = "arcdb_"+str(o)+".txt" if os.path.isfile(dbfilename): # guessfile = "guess_"+str(o)+".txt" # os.rename(dbfilename,guessfile) guesstype = 'file' else: guessfile = '' guesstype = 'rss' specidentify(arcimage, lampfile, dbfilename, guesstype=guesstype, guessfile=guessfile, automethod=automethod, function='legendre', order=order, rstep=10, rstart=Rstart, mdiff=20, thresh=3, niter=5, smooth=3, inter=inter, clobber=True, logfile=logfile, verbose=True) wavlegR_y = np.loadtxt(dbfilename,dtype=float,usecols=(0,)) wavlegcof_ly = np.loadtxt(dbfilename,unpack=True,dtype=float,usecols=range(1,order+2)) wavlegcof_ly = np.delete(wavlegcof_ly,np.where(wavlegR_y<0)[0],axis=1) wavlegR_y = np.delete(wavlegR_y,np.where(wavlegR_y<0)[0],axis=0) wavmap_yc = np.polynomial.legendre.legval(np.arange(cols),wavlegcof_ly) mediancof_l = np.median(wavlegcof_ly,axis=1) rms_l = np.sqrt(np.median((wavlegcof_ly - mediancof_l[:,None])**2,axis=1)) sigma_ly = (wavlegcof_ly - mediancof_l[:,None])/rms_l[:,None] usey = (sigma_ly[0]<3) & (sigma_ly[1]<3) wavmap_Rc = np.zeros((rows/2,cols),dtype='float32') R_y = wavlegR_y[usey] a = np.vstack((R_y**3,R_y**2,R_y,np.ones_like(R_y))).T for c in range(cols): polycofs = la.lstsq(a,wavmap_yc[usey,c])[0] wavmap_orc[o,:,c] \ = np.polyval(polycofs,range(rows/2)+(rpix_oc[o,c]-rpix_oc[o,cols/2])/rbin) # save split data along third fits axis, add wavmap extension, save as 'w' file hduwav = pyfits.ImageHDU(data=wavmap_orc, header=hduarc['SCI'].header, name='WAV') for i in range(len(infile_list)): hdu = pyfits.open(infile_list[i]) image_rc = np.roll(hdu['SCI'].data,-offset,axis=0) image_rc[padbins] = 0. hdu['SCI'].data = image_rc.reshape((2,rows/2,cols)) var_rc = np.roll(hdu['VAR'].data,-offset,axis=0) var_rc[padbins] = 0. hdu['VAR'].data = var_rc.reshape((2,rows/2,cols)) bpm_rc = np.roll(hdu['BPM'].data,-offset,axis=0) bpm_rc[padbins] = 1 hdu['BPM'].data = bpm_rc.reshape((2,rows/2,cols)) hdu.append(hduwav) for f in ('SCI','VAR','BPM','WAV'): hdu[f].header.update('CTYPE3','O,E') hdu.writeto('w'+infile_list[i],clobber='True') log.message('Output file '+'w'+infile_list[i] , with_header=False)
def specpolmap(infile_list, propcode=None, inter=True, automethod='Matchlines', logfile='salt.log'): obsdate = os.path.basename(infile_list[0])[7:15] with logging(logfile, debug) as log: #create the observation log obs_dict = obslog(infile_list) # eliminate inapplicable images for i in range(len(infile_list)): if obs_dict['PROPID'][i].upper().strip() != propcode or int( obs_dict['BS-STATE'][i][1]) != 2: del infile_list[i] obs_dict = obslog(infile_list) # use arc to make first-guess wavecal from model, locate beamsplitter split point for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip() == 'ARC': arcidx = i rbin, cbin = np.array( obs_dict["CCDSUM"][arcidx].split(" ")).astype(int) grating = obs_dict['GRATING'][arcidx].strip() grang = float(obs_dict['GR-ANGLE'][arcidx]) artic = float(obs_dict['CAMANG'][arcidx]) hduarc = pyfits.open(infile_list[arcidx]) arc_rc = hduarc['SCI'].data rows, cols = arc_rc.shape lam_c = rssmodelwave(grating, grang, artic, cbin, cols) arc_r = arc_rc.sum(axis=1) lam_m = np.loadtxt(datadir + "wollaston.txt", dtype=float, usecols=(0, )) rpix_om = np.loadtxt(datadir + "wollaston.txt", dtype=float, unpack=True, usecols=(1, 2)) expectrow_o = ((2052 + interp1d(lam_m,rpix_om,kind='cubic') \ (lam_c[cols/2-cols/16:cols/2+cols/16])).mean(axis=1)/rbin).astype(int) foundrow_o = np.zeros((2), dtype=int) for o in (0, 1): foundrow_o[o] = expectrow_o[o]-100/rbin \ + np.argmax(arc_r[expectrow_o[o]-100/rbin:expectrow_o[o]+100/rbin]) arcsignal = arc_r[foundrow_o[o]] botedge = foundrow_o[o] + np.argmax( arc_r[foundrow_o[o]:] < arcsignal / 2.) topedge = foundrow_o[o] - np.argmax( arc_r[foundrow_o[o]::-1] < arcsignal / 2.) foundrow_o[o] = (botedge + topedge) / 2. splitrow = foundrow_o.mean() offset = int(splitrow - rows / 2) log.message('Split Row: ' + ("%4i " % splitrow), with_header=False) # split arc into o/e images padbins = (np.indices((rows, cols))[0] < offset) | (np.indices( (rows, cols))[0] > rows + offset) arc_rc = np.roll(arc_rc, -offset, axis=0) arc_rc[padbins] = 0. arc_orc = arc_rc.reshape((2, rows / 2, cols)) # for O,E arc straighten spectrum, identify for each, form (unstraightened) wavelength map lamp = obs_dict['LAMPID'][arcidx].strip().replace(' ', '') if lamp == 'NONE': lamp = 'CuAr' hduarc[0].header.update('MASKTYP', 'LONGSLIT') hduarc[0].header.update('MASKID', 'PL0100N001') del hduarc['VAR'] del hduarc['BPM'] lampfile = iraf.osfn("pysalt$data/linelists/%s.wav" % lamp) rpix_oc = interp1d(lam_m, rpix_om, kind='cubic')(lam_c) log.message('\nARC: image '+infile_list[i].split('.')[0][-4:]+' GRATING '+grating\ +' GRANG '+("%8.3f" % grang)+' ARTIC '+("%8.3f" % artic)+' LAMP '+lamp, with_header=False) wavmap_orc = np.zeros_like(arc_orc) for o in (0, 1): foundrow_o[o] += -offset + o * rows / 2 arc_Rc = np.zeros((rows / 2, cols), dtype='float32') for c in range(cols): shift(arc_orc[o,:,c], \ -(rpix_oc[o,c]-rpix_oc[o,cols/2])/rbin,arc_Rc[:,c]) hduarc['SCI'].data = arc_Rc arcimage = "arc_" + str(o) + ".fits" hduarc.writeto(arcimage, clobber=True) Rstart = foundrow_o[o] order = 3 dbfilename = "arcdb_" + str(o) + ".txt" if os.path.isfile(dbfilename): # guessfile = "guess_"+str(o)+".txt" # os.rename(dbfilename,guessfile) guesstype = 'file' else: guessfile = '' guesstype = 'rss' specidentify(arcimage, lampfile, dbfilename, guesstype=guesstype, guessfile=guessfile, automethod=automethod, function='legendre', order=order, rstep=10, rstart=Rstart, mdiff=20, thresh=3, niter=5, smooth=3, inter=inter, clobber=True, logfile=logfile, verbose=True) wavlegR_y = np.loadtxt(dbfilename, dtype=float, usecols=(0, )) wavlegcof_ly = np.loadtxt(dbfilename, unpack=True, dtype=float, usecols=range(1, order + 2)) wavlegcof_ly = np.delete(wavlegcof_ly, np.where(wavlegR_y < 0)[0], axis=1) wavlegR_y = np.delete(wavlegR_y, np.where(wavlegR_y < 0)[0], axis=0) wavmap_yc = np.polynomial.legendre.legval(np.arange(cols), wavlegcof_ly) mediancof_l = np.median(wavlegcof_ly, axis=1) rms_l = np.sqrt( np.median((wavlegcof_ly - mediancof_l[:, None])**2, axis=1)) sigma_ly = (wavlegcof_ly - mediancof_l[:, None]) / rms_l[:, None] usey = (sigma_ly[0] < 3) & (sigma_ly[1] < 3) wavmap_Rc = np.zeros((rows / 2, cols), dtype='float32') R_y = wavlegR_y[usey] a = np.vstack((R_y**3, R_y**2, R_y, np.ones_like(R_y))).T for c in range(cols): polycofs = la.lstsq(a, wavmap_yc[usey, c])[0] wavmap_orc[o,:,c] \ = np.polyval(polycofs,range(rows/2)+(rpix_oc[o,c]-rpix_oc[o,cols/2])/rbin) # save split data along third fits axis, add wavmap extension, save as 'w' file hduwav = pyfits.ImageHDU(data=wavmap_orc, header=hduarc['SCI'].header, name='WAV') for i in range(len(infile_list)): hdu = pyfits.open(infile_list[i]) image_rc = np.roll(hdu['SCI'].data, -offset, axis=0) image_rc[padbins] = 0. hdu['SCI'].data = image_rc.reshape((2, rows / 2, cols)) var_rc = np.roll(hdu['VAR'].data, -offset, axis=0) var_rc[padbins] = 0. hdu['VAR'].data = var_rc.reshape((2, rows / 2, cols)) bpm_rc = np.roll(hdu['BPM'].data, -offset, axis=0) bpm_rc[padbins] = 1 hdu['BPM'].data = bpm_rc.reshape((2, rows / 2, cols)) hdu.append(hduwav) for f in ('SCI', 'VAR', 'BPM', 'WAV'): hdu[f].header.update('CTYPE3', 'O,E') hdu.writeto('w' + infile_list[i], clobber='True') log.message('Output file ' + 'w' + infile_list[i], with_header=False)
if not os.path.isdir('sci'): os.mkdir('sci') os.chdir('sci') #basic image reuctions infile_list = glob.glob('../raw/P*fits') if args.basic_red: imred(infile_list, './', bpmfile, cleanup=True) #proceed with the spectroscopic reductions logfile = 'spec{}.log'.format(ddir) dbfile = 'spec{}.db'.format(ddir) infile_list = glob.glob('m*fits') infiles=','.join(['%s' % x for x in infile_list]) obsdate=os.path.basename(infile_list[0])[7:15] obs_dict=obslog(infile_list) for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].upper().strip()=='ARC': arc_image = infile_list[i] auto_arc_lens(arc_image, dbfile=dbfile, ndstep=20, logfile=logfile) obj_dict = {} for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS'): img = infile_list[i] specrectify(img, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, nearest=True, clobber=True, logfile=logfile, verbose=True) obj = obs_dict['OBJECT'][i].strip()
def specpolrawstokes(infile_list, logfile='salt.log'): #set up some files that will be needed obsdate=os.path.basename(infile_list[0]).split('.')[0][-12:-4] logfile='specpol'+obsdate+'.log' patternfile=open(datadir+'wppaterns.txt','r') with logging(logfile, debug) as log: #create the observation log obs_dict=obslog(infile_list) images = len(infile_list) hsta_i = np.array([int(float(s)/11.25) for s in obs_dict['HWP-ANG']]) qsta_i = np.array([int(float(s)/11.25) for s in obs_dict['QWP-STA']]) img_i = np.array([int(os.path.basename(s).split('.')[0][-4:]) for s in infile_list]) wpstate_i = [['unknown','out','qbl','hw','hqw'][int(s[1])] for s in obs_dict['WP-STATE']] # wppat_i = obs_dict['WPPATERN'] wppat_i = ['UNKNOWN' for i in range(images)] # until WPPATERN is put in obslog object_i = obs_dict['OBJECT'] config_i = np.zeros(images,dtype='int') obs_i = -np.ones(images,dtype='int') # make table of observations configs = 0; obss = 0 for i in range(images): if (wpstate_i[i] == 'unknown'): log.message('Warning: Image %s WP-STATE UNKNOWN, assume it is 3 (HW)' % img_i[i] , with_header=False) wpstate_i[i] = 'hw' elif (wpstate_i[i] == 'out'): log.message('Image %i not in a WP pattern, will skip' % img_i[i] , with_header=False) continue if object_i[i].count('NONE'): object_i[i] = obs_dict['LAMPID'][i] object_i[i] = object_i[i].replace(' ','') cbin,rbin = np.array(obs_dict["CCDSUM"][i].split(" ")).astype(int) grating = obs_dict['GRATING'][i].strip() grang = float(obs_dict['GR-ANGLE'][i]) artic = float(obs_dict['CAMANG'][i]) confdat_d = [rbin,cbin,grating,grang,artic,wppat_i[i]] obsdat_d = [object_i[i],rbin,cbin,grating,grang,artic,wppat_i[i]] if configs==0: confdat_cd = [confdat_d] obsdat_od = [obsdat_d] configs = len(confdat_cd); config = 0 while config<configs: if confdat_d == confdat_cd[config]: break config += 1 if config == configs: confdat_cd.append(confdat_d) config_i[i] = config obss = len(obsdat_od); obs = 0 while obs<obss: if obsdat_d == obsdat_od[obs]: break obs += 1 if obs == obss: obsdat_od.append(obsdat_d) obs_i[i] = obs patternlist = patternfile.readlines() log.message('Raw Stokes File OBS CCDSUM GRATING GR-ANGLE CAMANG WPPATERN' , with_header=False) # Compute E-O raw stokes for obs in range(obss): idx_j = np.where(obs_i == obs) i0 = idx_j[0][0] name_n = [] if wppat_i[i0].count('UNKNOWN'): if (hsta_i[idx_j] % 2).max() == 0: wppat = "Linear" else: wppat = "Linear-Hi" for i in idx_j[0]: wppat_i[i] = wppat if not(((wpstate_i[i0]=='hw') & (wppat_i[i0] in ('Linear','Linear-Hi')) | (wpstate_i[i0]=='hqw')& (wppat_i[i0] in ('Circular','Circular-Hi','All-Stokes')))): print "Observation",obs,": wpstate ",wpstate_i[i0], \ " and wppattern ",wppat_i[i0], "not consistent" continue for p in patternlist: if (p.split()[0]==wppat_i[i0])&(p.split()[2]=='hwp'): wpat_p = np.array(p.split()[3:]).astype(int) if (p.split()[0]==wppat_i[i0])&(p.split()[2]=='qwp'): wpat_dp = np.vstack(wpat_p,np.array(p.split()[3:])) stokes=0; j=-1 while j < (len(idx_j[0])-2): j += 1 i = idx_j[0][j] if (wpstate_i[i]=='hw'): if (np.where(wpat_p[0::2]==hsta_i[i])[0].size > 0): idxp = np.where(wpat_p==hsta_i[i])[0][0] if hsta_i[i+1] != wpat_p[idxp+1]: continue else: continue if (wpstate_i[i]=='hqw'): if (np.where(wpat_dp[0::2]==(hsta_i[i],qsta_i[i]))[0].size > 0): idxp = np.where(wpat_dp==(hsta_i[i],qsta_i[i]))[0][0] if (hsta_i[i+1],qsta_i[i+1]) != wpat_dp[None,idxp+1]: continue else: continue if stokes==0: wavs = pyfits.getheader(infile_list[i],'SCI',1)['NAXIS1'] sci_fow = np.zeros((2,2,wavs)); var_fow = np.zeros_like(sci_fow); \ bpm_fow = np.zeros_like(sci_fow) for f in (0,1): hdulist = pyfits.open(infile_list[i+f]) sci_fow[f] = hdulist['sci'].data.reshape((2,-1)) var_fow[f] = hdulist['var'].data.reshape((2,-1)) bpm_fow[f] = hdulist['bpm'].data.reshape((2,-1)) # compute intensity, E-O stokes spectrum, VAR, BPM. # fits out: unnormalized (int,stokes),(length 1) spatial,wavelength # wavelength marked bad if it is bad in either filter or order bpm_w = (bpm_fow.sum(axis=0).sum(axis=0) > 0).astype(int) wok = (bpm_w==0) stokes_sw = np.zeros((2,wavs),dtype='float32'); var_sw = np.zeros_like(stokes_sw) stokes_sw[0,wok] = 0.5*sci_fow[:,:,wok].reshape((2,2,-1)).sum(axis=0).sum(axis=0) var_sw[0,wok] = 0.25*var_fow[:,:,wok].reshape((2,2,-1)).sum(axis=0).sum(axis=0) stokes_sw[1,wok] = 0.5*((sci_fow[0,1,wok]-sci_fow[1,1,wok])/(sci_fow[0,1,wok]+sci_fow[1,1,wok]) \ - (sci_fow[0,0,wok]-sci_fow[1,0,wok])/(sci_fow[0,0,wok]+sci_fow[1,0,wok])) var_sw[1,wok] = 0.5*((var_fow[0,1,wok]+var_fow[1,1,wok])/(sci_fow[0,1,wok]+sci_fow[1,1,wok])**2 \ + (var_fow[0,0,wok]+var_fow[1,0,wok])/(sci_fow[0,0,wok]+sci_fow[1,0,wok])**2) stokes_sw[1] *= stokes_sw[0] var_sw[1] *= stokes_sw[0]**2 bpm_sw = np.array([bpm_w,bpm_w],dtype='uint8').reshape((2,wavs)) name = object_i[i] + '_c' + str(config_i[i]) + '_h' + str(hsta_i[i]) + str(hsta_i[i+1]) if (wpstate_i[i]=='hqw'): name += 'q'+['m','p'][qsta_i[i]==4]+['m','p'][qsta_i[i+1]==4] count = " ".join(name_n).count(name) name += ('_%02i' % (count+1)) log.message('%20s %1i %1i %1i %8s %8.2f %8.2f %12s' % \ (name,obs,rbin,cbin,grating,grang,artic,wppat_i[i]), with_header=False) hduout = pyfits.PrimaryHDU(header=hdulist[0].header) hduout = pyfits.HDUList(hduout) hduout[0].header.update('WPPATERN',wppat_i[i]) header=hdulist['SCI'].header.copy() header.update('VAREXT',2) header.update('BPMEXT',3) header.update('CTYPE3','I,S') hduout.append(pyfits.ImageHDU(data=stokes_sw.reshape((2,1,wavs)), header=header, name='SCI')) header.update('SCIEXT',1,'Extension for Science Frame',before='VAREXT') hduout.append(pyfits.ImageHDU(data=var_sw.reshape((2,1,wavs)), header=header, name='VAR')) hduout.append(pyfits.ImageHDU(data=bpm_sw.reshape((2,1,wavs)), header=header, name='BPM')) hduout.writeto(name+'.fits',clobber=True,output_verify='warn') name_n.append(name) i += 1 stokes += 1 return
def hrsclean(images, outpath, obslogfile=None, subover=True, trim=True, masbias=None, subbias=True, median=False, function='polynomial', order=5, rej_lo=3, rej_hi=3, niter=5, interp='linear', clobber=False, logfile='salt.log', verbose=True): """Convert MEF HRS data into a single image. If variance frames and BPMs, then convert them to the same format as well. Returns an MEF image but that is combined into a single frame """ with logging(logfile, debug) as log: # Check the input images infiles = saltio.argunpack('Input', images) # create list of output files outpath = saltio.abspath(outpath) if saltio.checkfornone(obslogfile) is None: raise SaltError('Obslog file is required') # Delete the obslog file if it already exists if (os.path.isfile(obslogfile) and clobber) or not os.path.isfile(obslogfile): if os.path.isfile(obslogfile): saltio.delete(obslogfile) #read in the obsveration log or create it headerDict = obslog(infiles, log) obsstruct = createobslogfits(headerDict) saltio.writefits(obsstruct, obslogfile) else: obsstruct = saltio.openfits(obslogfile) #create the list of bias frames and process them filename = obsstruct.data.field('FILENAME') detmode = obsstruct.data.field('DETMODE') ccdtype = obsstruct.data.field('OBJECT') biaslist = filename[ccdtype == 'Bias'] masterbias_dict = {} if log: log.message('Processing Bias Frames') for img in infiles: if os.path.basename(img) in biaslist: #open the image struct = pyfits.open(img) bimg = outpath + 'bgph' + os.path.basename(img) #print the message if log: message = 'Processing Zero frame %s' % img log.message(message, with_stdout=verbose, with_header=False) #process the image struct = clean(struct, createvar=False, badpixelstruct=None, mult=True, subover=subover, trim=trim, subbias=False, imstack=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist = history( level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0], 'HPREPARE', 'Images have been prepared', hist) saltkey.new('HGAIN', time.asctime(time.localtime()), 'Images have been gain corrected', struct[0]) #saltkey.new('HXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('HBIAS', time.asctime(time.localtime()), 'Images have been de-biased', struct[0]) # write FITS file saltio.writefits(struct, bimg, clobber=clobber) saltio.closefits(struct) #add files to the master bias list masterbias_dict = compareimages(struct, bimg, masterbias_dict, keylist=hrsbiasheader_list) #create the master bias frame for i in masterbias_dict.keys(): bkeys = masterbias_dict[i][0] blist = masterbias_dict[i][1:] mbiasname = outpath + createmasterbiasname( blist, bkeys, x1=5, x2=13) bfiles = ','.join(blist) saltcombine(bfiles, mbiasname, method='median', reject='sigclip', mask=False, weight=False, blank=0, scale=None, statsec=None, lthresh=3, \ hthresh=3, clobber=False, logfile=logfile,verbose=verbose) #apply full reductions to the science data for img in infiles: nimg = os.path.basename(img) if not nimg in biaslist: #open the image struct = pyfits.open(img) simg = outpath + 'mbgph' + os.path.basename(img) #print the message if log: message = 'Processing science frame %s' % img log.message(message, with_stdout=verbose) #get master bias frame masterbias = get_masterbias(struct, masterbias_dict, keylist=hrsbiasheader_list) if masterbias: subbias = True bstruct = saltio.openfits(masterbias) else: subbias = False bstruct = None #process the image struct = clean(struct, createvar=False, badpixelstruct=None, mult=True, subover=subover, trim=trim, subbias=subbias, imstack=True, bstruct=bstruct, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist = history( level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0], 'HPREPARE', 'Images have been prepared', hist) saltkey.new('HGAIN', time.asctime(time.localtime()), 'Images have been gain corrected', struct[0]) #saltkey.new('HXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('HBIAS', time.asctime(time.localtime()), 'Images have been de-biased', struct[0]) # write FITS file saltio.writefits(struct, simg, clobber=clobber) saltio.closefits(struct) return
def list_configurations(infilelist, log): """Produce a list of files of similar configurations Parameters ---------- infilelist: str list of input files log: ~logging Logging object. Returns ------- iarc_a: list list of indices for arc images iarc_i: list of indices for images imageno_i: list of image numbers """ # set up the observing dictionary arclamplist = ['Ar','CuAr','HgAr','Ne','NeAr','ThAr','Xe'] obs_dict=obslog(infilelist) # hack to remove potentially bad data for i in reversed(range(len(infilelist))): if int(obs_dict['BS-STATE'][i][1])!=2: del infilelist[i] obs_dict=obslog(infilelist) # inserted to take care of older observations old_data=False for date in obs_dict['DATE-OBS']: if int(date[0:4]) < 2015: old_data=True if old_data: log.message("Configuration map for old data", with_header=False) iarc_a, iarc_i, confno_i, confdatlist = list_configurations_old(infilelist, log) arcs = len(iarc_a) config_dict = {} for i in set(confno_i): image_dict={} image_dict['arc']=[infilelist[iarc_a[i]]] ilist = [infilelist[x] for x in np.where(iarc_i==iarc_a[i])[0]] ilist.remove(image_dict['arc'][0]) image_dict['object'] = ilist config_dict[confdatlist[i]] = image_dict return config_dict # delete bad columns obs_dict = obslog(infilelist) for k in obs_dict.keys(): if len(obs_dict[k])==0: del obs_dict[k] obs_tab = Table(obs_dict) # create the configurations list config_dict={} confdatlist = configmapset(obs_tab, config_list=('GRATING', 'GR-ANGLE', 'CAMANG', 'BVISITID')) infilelist = np.array(infilelist) for grating, grtilt, camang, blockvisit in confdatlist: image_dict = {} #things with the same configuration mask = ((obs_tab['GRATING']==grating) * (obs_tab['GR-ANGLE']==grtilt) * (obs_tab['CAMANG']==camang) * (obs_tab['BVISITID']==blockvisit) ) objtype = obs_tab['CCDTYPE'] # kn changed from OBJECT: CCDTYPE lists ARC more consistently lamp = obs_tab['LAMPID'] isarc = ((objtype == 'ARC') | np.in1d(lamp,arclamplist)) # kn added check for arc lamp when CCDTYPE incorrect image_dict['arc'] = infilelist[mask * isarc] # if no arc for this config look for a similar one with different BVISITID if len(image_dict['arc']) == 0: othermask = ((obs_tab['GRATING']==grating) * \ ((obs_tab['GR-ANGLE'] - grtilt) < .03) * ((obs_tab['GR-ANGLE'] - grtilt) > -.03) * \ ((obs_tab['CAMANG'] - camang) < .05) * ((obs_tab['CAMANG'] - camang) > -.05) * \ (obs_tab['BVISITID'] != blockvisit)) image_dict['arc'] = infilelist[othermask * (objtype == 'ARC')] if len(image_dict['arc']) > 0: log.message("Warning: using arc from different BLOCKID", with_header=False) image_dict['flat'] = infilelist[mask * (objtype == 'FLAT')] image_dict['object'] = infilelist[mask * ~isarc * (objtype != 'FLAT')] if len(image_dict['object']) == 0: continue config_dict[(grating, grtilt, camang, blockvisit)] = image_dict return config_dict
def imred(infile_list, prodir, bpmfile=None, gaindb = None, cleanup=True): #get the name of the files infiles=','.join(['%s' % x for x in infile_list]) #get the current date for the files obsdate=os.path.basename(infile_list[0])[1:9] print obsdate #set up some files that will be needed logfile='im'+obsdate+'.log' flatimage='FLAT%s.fits' % (obsdate) dbfile='spec%s.db' % obsdate #create the observation log obs_dict=obslog(infile_list) with logging(logfile, debug) as log: log.message('Pysalt Version: '+pysalt.verno, with_header=False) #prepare the data saltprepare(infiles, '', 'p', createvar=False, badpixelimage='', clobber=True, logfile=logfile, verbose=True) for img in infile_list: hdu = pyfits.open('p'+os.path.basename(img), 'update') # for backwards compatibility if not hdu[1].header.has_key('XTALK'): hdu[1].header.update('XTALK',1474) hdu[2].header.update('XTALK',1474) hdu[3].header.update('XTALK',1166) hdu[4].header.update('XTALK',1111) hdu[5].header.update('XTALK',1377) hdu[6].header.update('XTALK',1377) hdu.close() #bias subtract the data saltbias('pP*fits', '', 'b', subover=True, trim=True, subbias=False, masterbias='', median=False, function='polynomial', order=5, rej_lo=3.0, rej_hi=5.0, niter=10, plotover=False, turbo=False, clobber=True, logfile=logfile, verbose=True) add_variance('bpP*fits', bpmfile) #gain correct the data usedb = False if gaindb: usedb = True saltgain('bpP*fits', '', 'g', gaindb=gaindb, usedb=usedb, mult=True, clobber=True, logfile=logfile, verbose=True) #cross talk correct the data saltxtalk('gbpP*fits', '', 'x', xtalkfile = "", usedb=False, clobber=True, logfile=logfile, verbose=True) #cosmic ray clean the data #only clean the object data for i in range(len(infile_list)): if (obs_dict['CCDTYPE'][i].count('OBJECT') \ and obs_dict['LAMPID'][i].count('NONE') \ and obs_dict['INSTRUME'][i].count('RSS')): img='xgbp'+os.path.basename(infile_list[i]) saltcrclean(img, img, '', crtype='edge', thresh=5, mbox=11, bthresh=5.0, flux_ratio=0.2, bbox=25, gain=1.0, rdnoise=5.0, fthresh=5.0, bfactor=2, gbox=3, maxiter=5, multithread=True, clobber=True, logfile=logfile, verbose=True) #mosaic the data #khn: attempt to use most recent previous geometry to obsdate. #NOTE: mosaicing does not do this correctly #geomdb = open(datadir+"RSSgeom.dat",'r') #for geomline in geomdb: # if geomline[0]=='#': continue # if (int(obsdate) > int(geomline.split(' ')[0].replace('-',''))): break #geomfile = "RSSgeom_obsdate.dat" #open(geomfile,'w').write(geomline) geomfile=iraf.osfn("pysalt$data/rss/RSSgeom.dat") try: saltmosaic('xgbpP*fits', '', 'm', geomfile, interp='linear', cleanup=True, geotran=True, clobber=True, logfile=logfile, verbose=True) except: saltmosaic('xgbpP*fits', '', 'm', geomfile, interp='linear', cleanup=True, geotran=True, clobber=True, logfile=logfile, verbose=True) #khn: fix mosaiced VAR and BPM extensions #khn: fix mosaiced bpm missing some of gap for img in infile_list: filename = 'mxgbp'+os.path.basename(img) hdu = pyfits.open(filename, 'update') hdu[2].header.update('EXTNAME','VAR') hdu[3].header.update('EXTNAME','BPM') bpm_rc = (hdu[3].data>0).astype('uint8') zeroscicol = hdu['SCI'].data.sum(axis=0) == 0 bpmgapcol = bpm_rc.mean(axis=0) == 1 addbpmcol = zeroscicol & ~bpmgapcol addbpmcol[np.argmax(addbpmcol)-4:np.argmax(addbpmcol)] = True # allow for chip tilt bpm_rc[:,addbpmcol] = 1 hdu[3].data = bpm_rc hdu.writeto(filename,clobber=True) #clean up the images if cleanup: for f in glob.glob('p*fits'): os.remove(f) for f in glob.glob('bp*fits'): os.remove(f) for f in glob.glob('gbp*fits'): os.remove(f) for f in glob.glob('xgbp*fits'): os.remove(f)
def specpolextract(infilelist, logfile='salt.log'): #set up the files obsdate = os.path.basename(infilelist[0])[8:16] with logging(logfile, debug) as log: #create the observation log obs_dict = obslog(infilelist) # get rid of arcs for i in range(len(infilelist))[::-1]: if (obs_dict['OBJECT'][i].upper().strip() == 'ARC'): del infilelist[i] infiles = len(infilelist) # contiguous images of the same object and config are grouped together obs_dict = obslog(infilelist) confno_i, confdatlist = configmap(infilelist) configs = len(confdatlist) objectlist = list(set(obs_dict['OBJECT'])) objno_i = np.array( [objectlist.index(obs_dict['OBJECT'][i]) for i in range(infiles)], dtype=int) grp_i = np.zeros((infiles), dtype=int) grp_i[1:] = ((confno_i[1:] != confno_i[:-1]) | (objno_i[1:] != objno_i[:-1])).cumsum() for g in np.unique(grp_i): ilist = np.where(grp_i == g)[0] outfiles = len(ilist) outfilelist = [infilelist[i] for i in ilist] imagenolist = [ int(os.path.basename(infilelist[i]).split('.')[0][-4:]) for i in ilist ] log.message('\nExtract: '+objectlist[objno_i[ilist[0]]]+' Grating %s Grang %6.2f Artic %6.2f' % \ confdatlist[confno_i[ilist[0]]], with_header=False) log.message(' Images: ' + outfiles * '%i ' % tuple(imagenolist), with_header=False) hdu0 = pyfits.open(outfilelist[0]) rows, cols = hdu0['SCI'].data.shape[1:3] cbin, rbin = np.array(obs_dict["CCDSUM"][0].split(" ")).astype(int) # special version for lamp data lampid = obs_dict["LAMPID"][0].strip().upper() if lampid != "NONE": specpollampextract(outfilelist, logfile=logfile) continue # sum spectra to find target, background artifacts, and estimate sky flat and psf functions count = 0 for i in range(outfiles): badbin_orc = pyfits.open(outfilelist[i])['BPM'].data > 0 if count == 0: count_orc = (~badbin_orc).astype(int) image_orc = pyfits.open( outfilelist[i])['SCI'].data * count_orc var_orc = pyfits.open( outfilelist[i])['VAR'].data * count_orc else: count_orc += (~badbin_orc).astype(int) image_orc += pyfits.open( infilelist[i])['SCI'].data * (~badbin_orc).astype(int) var_orc += pyfits.open( outfilelist[i])['VAR'].data * (~badbin_orc).astype(int) count += 1 if count == 0: print 'No valid images' continue image_orc[count_orc > 0] /= count_orc[count_orc > 0] badbinall_orc = (count_orc == 0) | (image_orc == 0 ) # bin is bad in all images badbinone_orc = (count_orc < count) | ( image_orc == 0) # bin is bad in at least one image var_orc[count_orc > 0] /= (count_orc[count_orc > 0])**2 wav_orc = pyfits.open(outfilelist[0])['WAV'].data slitid = obs_dict["MASKID"][0] if slitid[0] == "P": slitwidth = float(slitid[2:5]) / 10. else: slitwidth = float(slitid) hdusum = pyfits.PrimaryHDU(header=hdu0[0].header) hdusum = pyfits.HDUList(hdusum) header = hdu0['SCI'].header.copy() hdusum.append( pyfits.ImageHDU(data=image_orc, header=header, name='SCI')) hdusum.append( pyfits.ImageHDU(data=var_orc, header=header, name='VAR')) hdusum.append( pyfits.ImageHDU(data=badbinall_orc.astype('uint8'), header=header, name='BPM')) hdusum.append( pyfits.ImageHDU(data=wav_orc, header=header, name='WAV')) # hdusum.writeto("groupsum_"+str(g)+".fits",clobber=True) psf_orc,skyflat_orc,badbinnew_orc,isbkgcont_orc,maprow_od,drow_oc = \ specpolsignalmap(hdusum,logfile=logfile) maprow_ocd = maprow_od[:, None, :] + np.zeros((2, cols, 4)) maprow_ocd[:, :, [1, 2 ]] -= drow_oc[:, :, None] # edge is straight, target curved isedge_orc = (np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]) | \ (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3]) istarget_orc = (np.arange(rows)[:,None] > maprow_ocd[:,None,:,1]) & \ (np.arange(rows)[:,None] < maprow_ocd[:,None,:,2]) isskycont_orc = (((np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]+rows/16) | \ (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3]-rows/16)) & ~isedge_orc) isbkgcont_orc &= (~badbinall_orc & ~isedge_orc & ~istarget_orc) badbinall_orc |= badbinnew_orc badbinone_orc |= badbinnew_orc # pyfits.PrimaryHDU(var_orc.astype('float32')).writeto('var_orc1.fits',clobber=True) # pyfits.PrimaryHDU(badbinnew_orc.astype('uint8')).writeto('badbinnew_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinall_orc.astype('uint8')).writeto('badbinall_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinone_orc.astype('uint8')).writeto('badbinone_orc.fits',clobber=True) # scrunch and normalize psf from summed images (using badbinone) for optimized extraction psfnormmin = 0.70 # wavelengths with less than this flux in good bins are marked bad wbin = wav_orc[0, rows / 2, cols / 2] - wav_orc[0, rows / 2, cols / 2 - 1] wbin = float(int(wbin / 0.75)) wmin, wmax = wav_orc.min(axis=2).max(), wav_orc.max(axis=2).min() wedgemin = wbin * int(wmin / wbin + 0.5) + wbin / 2. wedgemax = wbin * int(wmax / wbin - 0.5) + wbin / 2. wedge_w = np.arange(wedgemin, wedgemax + wbin, wbin) wavs = wedge_w.shape[0] - 1 binedge_orw = np.zeros((2, rows, wavs + 1)) psf_orw = np.zeros((2, rows, wavs)) specrow_or = maprow_od[:, 1:3].mean(axis=1)[:, None] + np.arange( -rows / 4, rows / 4) # pyfits.PrimaryHDU(var_orc.astype('float32')).writeto('var_orc2.fits',clobber=True) for o in (0, 1): for r in specrow_or[o]: binedge_orw[o, r] = interp1d(wav_orc[o, r], np.arange(cols))(wedge_w) psf_orw[o, r] = scrunch1d(psf_orc[o, r], binedge_orw[o, r]) psf_orw /= psf_orw.sum(axis=1)[:, None, :] # np.savetxt("psfnorm_ow.txt",(psf_orw*okbin_orw).sum(axis=1).T,fmt="%10.4f") # pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto('psf_orw.fits',clobber=True) # pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_orw.fits',clobber=True) # set up optional image-dependent column shift for slitless data colshiftfilename = "colshift.txt" docolshift = os.path.isfile(colshiftfilename) if docolshift: img_I, dcol_I = np.loadtxt(colshiftfilename, dtype=float, unpack=True, usecols=(0, 1)) shifts = img_I.shape[0] log.message('Column shift: \n Images ' + shifts * '%5i ' % tuple(img_I), with_header=False) log.message(' Bins ' + shifts * '%5.2f ' % tuple(dcol_I), with_header=False) # background-subtract and extract spectra psfbadfrac_iow = np.zeros((outfiles, 2, wavs)) for i in range(outfiles): hdulist = pyfits.open(outfilelist[i]) sci_orc = hdulist['sci'].data var_orc = hdulist['var'].data badbin_orc = (hdulist['bpm'].data > 0) | badbinnew_orc tnum = os.path.basename(outfilelist[i]).split('.')[0][-3:] # make background continuum image, smoothed over resolution element rblk, cblk = int(1.5 * 8. / rbin), int(slitwidth * 8. / cbin) target_orc = np.zeros_like(sci_orc) for o in (0, 1): bkgcont_rc = blksmooth2d(sci_orc[o], isbkgcont_orc[o], rblk, cblk, 0.25, mode="mean") # remove sky continuum: ends of bkg continuum * skyflat skycont_c = (bkgcont_rc.T[isskycont_orc[o].T]/skyflat_orc[o].T[isskycont_orc[o].T]) \ .reshape((cols,-1)).mean(axis=1) skycont_rc = skycont_c * skyflat_orc[o] # remove sky lines: image - bkg cont run through 2d sky averaging obj_data = ((sci_orc[o] - bkgcont_rc) / skyflat_orc)[o] obj_data[(badbin_orc | isedge_orc | istarget_orc)[o]] = np.nan # pyfits.PrimaryHDU(obj_data.astype('float32')).writeto('obj_data.fits',clobber=True) skylines_rc = make_2d_skyspectrum(obj_data, wav_orc[o], np.array([ [0, rows], ])) * skyflat_orc[o] target_orc[o] = sci_orc[o] - skycont_rc - skylines_rc # pyfits.PrimaryHDU(skylines_rc.astype('float32')).writeto('skylines_rc_'+tnum+'_'+str(o)+'.fits',clobber=True) # pyfits.PrimaryHDU(skycont_rc.astype('float32')).writeto('skycont_rc_'+tnum+'_'+str(o)+'.fits',clobber=True) target_orc *= (~badbin_orc).astype(int) # pyfits.PrimaryHDU(target_orc.astype('float32')).writeto('target_'+tnum+'_orc.fits',clobber=True) # extract spectrum optimally (Horne, PASP 1986) target_orw = np.zeros((2, rows, wavs)) var_orw = np.zeros_like(target_orw) badbin_orw = np.ones((2, rows, wavs), dtype='bool') wt_orw = np.zeros_like(target_orw) dcol = 0. if docolshift: if int(tnum) in img_I: dcol = dcol_I[np.where( img_I == int(tnum))] # table has observed shift for o in (0, 1): for r in specrow_or[o]: target_orw[o, r] = scrunch1d(target_orc[o, r], binedge_orw[o, r] + dcol) var_orw[o, r] = scrunch1d(var_orc[o, r], binedge_orw[o, r] + dcol) badbin_orw[o, r] = scrunch1d( badbin_orc[o, r].astype(float), binedge_orw[o, r] + dcol) > 0.001 badbin_orw |= (var_orw == 0) badbin_orw |= ((psf_orw * (~badbin_orw)).sum(axis=1)[:, None, :] < psfnormmin) # pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_'+tnum+'_orw.fits',clobber=True) # pyfits.PrimaryHDU(badbin_orw.astype('uint8')).writeto('badbin_'+tnum+'_orw.fits',clobber=True) # use master psf shifted in row to allow for guide errors pwidth = 2 * int(1. / psf_orw.max()) ok_w = ((psf_orw * badbin_orw).sum(axis=1) < 0.03 / float(pwidth / 2)).all(axis=0) crosscor_s = np.zeros(pwidth) for s in range(pwidth): crosscor_s[s] = (psf_orw[:, s:s - pwidth] * target_orw[:, pwidth / 2:-pwidth / 2] * ok_w).sum() smax = np.argmax(crosscor_s) s_S = np.arange(smax - pwidth / 4, smax - pwidth / 4 + pwidth / 2 + 1) polycof = la.lstsq( np.vstack((s_S**2, s_S, np.ones_like(s_S))).T, crosscor_s[s_S])[0] pshift = -(-0.5 * polycof[1] / polycof[0] - pwidth / 2) s = int(pshift + pwidth) - pwidth sfrac = pshift - s psfsh_orw = np.zeros_like(psf_orw) outrow = np.arange( max(0, s + 1), rows - (1 + int(abs(pshift))) + max(0, s + 1)) psfsh_orw[:, outrow] = ( 1. - sfrac ) * psf_orw[:, outrow - s] + sfrac * psf_orw[:, outrow - s - 1] # pyfits.PrimaryHDU(psfsh_orw.astype('float32')).writeto('psfsh_'+tnum+'_orw.fits',clobber=True) wt_orw[~badbin_orw] = psfsh_orw[~badbin_orw] / var_orw[ ~badbin_orw] var_ow = (psfsh_orw * wt_orw * (~badbin_orw)).sum(axis=1) badbin_ow = (var_ow == 0) var_ow[~badbin_ow] = 1. / var_ow[~badbin_ow] # pyfits.PrimaryHDU(var_ow.astype('float32')).writeto('var_'+tnum+'_ow.fits',clobber=True) # pyfits.PrimaryHDU(target_orw.astype('float32')).writeto('target_'+tnum+'_orw.fits',clobber=True) # pyfits.PrimaryHDU(wt_orw.astype('float32')).writeto('wt_'+tnum+'_orw.fits',clobber=True) sci_ow = (target_orw * wt_orw).sum(axis=1) * var_ow badlim = 0.20 psfbadfrac_iow[i] = (psfsh_orw * badbin_orw.astype(int)).sum( axis=1) / psfsh_orw.sum(axis=1) badbin_ow |= (psfbadfrac_iow[i] > badlim) # cdebug = 39 # np.savetxt("xtrct"+str(cdebug)+"_"+tnum+".txt",np.vstack((psf_orw[:,:,cdebug],var_orw[:,:,cdebug], \ # wt_orw[:,:,cdebug],target_orw[:,:,cdebug])).reshape((4,2,-1)).transpose(1,0,2).reshape((8,-1)).T,fmt="%12.5e") # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1) # for consistency with other modes hduout = pyfits.PrimaryHDU(header=hdulist[0].header) hduout = pyfits.HDUList(hduout) header = hdulist['SCI'].header.copy() header.update('VAREXT', 2) header.update('BPMEXT', 3) header.update('CRVAL1', wedge_w[0] + wbin / 2.) header.update('CRVAL2', 0) header.update('CDELT1', wbin) header.update('CTYPE1', 'Angstroms') hduout.append( pyfits.ImageHDU(data=sci_ow.reshape((2, 1, wavs)), header=header, name='SCI')) header.update('SCIEXT', 1, 'Extension for Science Frame', before='VAREXT') hduout.append( pyfits.ImageHDU(data=var_ow.reshape((2, 1, wavs)), header=header, name='VAR')) hduout.append( pyfits.ImageHDU(data=badbin_ow.astype("uint8").reshape( (2, 1, wavs)), header=header, name='BPM')) hduout.writeto('e' + outfilelist[i], clobber=True, output_verify='warn') log.message('Output file ' + 'e' + outfilelist[i], with_header=False) # np.savetxt("psfbadfrac_iow.txt",psfbadfrac_iow.reshape((-1,wavs)).T,fmt="%8.5f") return
def specpolwavmap(infilelist, linelistlib="", automethod='Matchlines',logfile='salt.log',debug=False): obsdate=os.path.basename(infilelist[0])[7:15] with logging(logfile, debug) as log: # create the observation log obs_dict=obslog(infilelist) log.message('Pysalt Version: '+pysalt.verno, with_header=False) # eliminate inapplicable images for i in reversed(range(len(infilelist))): if int(obs_dict['BS-STATE'][i][1])!=2: del infilelist[i] obs_dict=obslog(infilelist) # Map out which arc goes with which image. Use arc in closest wavcal block of the config. # wavcal block: neither spectrograph config nor track changes, and no gap in data files infiles = len(infilelist) newtrk = 5. # new track when rotator changes by more (deg) trkrho_i = np.array(map(float,obs_dict['TRKRHO'])) trkno_i = np.zeros((infiles),dtype=int) trkno_i[1:] = ((np.abs(trkrho_i[1:]-trkrho_i[:-1]))>newtrk).cumsum() confno_i,confdatlist = configmap(infilelist) configs = len(confdatlist) imageno_i = np.array([int(os.path.basename(infilelist[i]).split('.')[0][-4:]) \ for i in range(infiles)]) filegrp_i = np.zeros((infiles),dtype=int) filegrp_i[1:] = ((imageno_i[1:]-imageno_i[:-1])>1).cumsum() isarc_i = np.array([(obs_dict['OBJECT'][i].upper().strip()=='ARC') for i in range(infiles)]) wavblk_i = np.zeros((infiles),dtype=int) wavblk_i[1:] = ((filegrp_i[1:] != filegrp_i[:-1]) \ | (trkno_i[1:] != trkno_i[:-1]) \ | (confno_i[1:] != confno_i[:-1])).cumsum() wavblks = wavblk_i.max() +1 arcs_c = (isarc_i[:,None] & (confno_i[:,None]==range(configs))).sum(axis=0) np.savetxt("wavblktbl.txt",np.vstack((trkrho_i,imageno_i,filegrp_i,trkno_i, \ confno_i,wavblk_i,isarc_i)).T,fmt="%7.2f "+6*"%3i ",header=" rho img grp trk conf wblk arc") for c in range(configs): # worst: no arc for config, remove images if arcs_c[c] == 0: lostimages = imageno_i[confno_i==c] log.message('No Arc for this configuration: ' \ +("Grating %s Grang %6.2f Artic %6.2f" % confdatlist[c]) \ +("\n Images: "+lostimages.shape[0]*"%i " % tuple(lostimages)), with_header=False) wavblk_i[confno_i==c] = -1 if arcs_c.sum() ==0: log.message("Cannot calibrate any images", with_header=False) exit() iarc_i = -np.zeros((infiles),dtype=int) for w in range(wavblks): blkimages = imageno_i[wavblk_i==w] if blkimages.shape[0]==0: continue iarc_I = np.where((wavblk_i==w) & (isarc_i))[0] if iarc_I.shape[0] >0: iarc = iarc_I[0] # best: arc is in wavblk, take first else: conf = confno_i[wavblk_i==w][0] # fallback: take closest arc of this config iarc_I = np.where((confno_i==conf) & (isarc_i))[0] blkimagepos = blkimages.mean() iarc = iarc_I[np.argmin(imageno_i[iarc_I] - blkimagepos)] iarc_i[wavblk_i==w] = iarc log.message(("\nFor images: "+blkimages.shape[0]*"%i " % tuple(blkimages)) \ +("\n Use Arc %5i" % imageno_i[iarc]), with_header=False) iarc_a = np.unique(iarc_i[iarc_i != -1]) arcs = iarc_a.shape[0] lam_m = np.loadtxt(datadir+"wollaston.txt",dtype=float,usecols=(0,)) rpix_om = np.loadtxt(datadir+"wollaston.txt",dtype=float,unpack=True,usecols=(1,2)) for a in range(arcs): iarc = iarc_a[a] conf = confno_i[iarc] grating,grang,artic = confdatlist[confno_i[iarc]] if len(linelistlib) ==0: linelistlib=datadir+"linelistlib.txt" if grating=="PG0300": linelistlib=datadir+"linelistlib_300.txt" with open(linelistlib) as fd: linelistdict = dict(line.strip().split(None, 1) for line in fd) # use arc to make first-guess wavecal from model cbin,rbin = np.array(obs_dict["CCDSUM"][iarc].split(" ")).astype(int) hduarc = pyfits.open(infilelist[iarc]) arc_rc = hduarc['SCI'].data rows,cols = arc_rc.shape lam_c = rssmodelwave(grating,grang,artic,cbin,cols) arc_r = arc_rc.sum(axis=1) # if debug: np.savetxt("arc_r_"+str(imageno_i[iarc]+".txt"),arc_r,fmt="%8.2f") # locate beamsplitter split point axisrow_o = ((2052 + interp1d(lam_m,rpix_om,kind='cubic',bounds_error=False) \ (lam_c[cols/2]))/rbin).astype(int) top = axisrow_o[1] + np.argmax(arc_r[axisrow_o[1]:] < 0.5*arc_r[axisrow_o[1]]) bot = axisrow_o[0] - np.argmax(arc_r[axisrow_o[0]::-1] < 0.5*arc_r[axisrow_o[0]]) splitrow = 0.5*(bot + top) offset = int(splitrow - rows/2) # how far split is from center of detector # split arc into o/e images padbins = (np.indices((rows,cols))[0]<offset) | (np.indices((rows,cols))[0]>rows+offset) arc_rc = np.roll(arc_rc,-offset,axis=0) arc_rc[padbins] = 0. arc_orc = arc_rc.reshape((2,rows/2,cols)) # for O,E arc straighten spectrum, find fov, identify for each, form (unstraightened) wavelength map lamp=obs_dict['LAMPID'][iarc].strip().replace(' ', '') if lamp == 'NONE': lamp='CuAr' hduarc[0].header.update('MASKTYP','LONGSLIT') del hduarc['VAR'] del hduarc['BPM'] lampfile=iraf.osfn("pysalt$data/linelists/"+linelistdict[lamp]) rpix_oc = interp1d(lam_m, rpix_om,kind ='cubic',bounds_error=False,fill_value=0.)(lam_c) drow_oc = (rpix_oc-rpix_oc[:,cols/2][:,None])/rbin log.message('\nARC: image '+str(imageno_i[iarc])+' GRATING '+grating\ +' GRANG '+("%8.3f" % grang)+' ARTIC '+("%8.3f" % artic)+' LAMP '+lamp, with_header=False) log.message(' Split Row: '+("%4i " % splitrow), with_header=False) wavmap_orc = np.zeros((2,rows/2,cols)) edgerow_od = np.zeros((2,2)) cofrows_o = np.zeros(2) legy_od = np.zeros((2,2)) guessfile=None for o in (0,1): axisrow_o[o] += -offset - o*rows/2 arc_yc = np.zeros((rows/2,cols),dtype='float32') for c in range(cols): shift(arc_orc[o,:,c],-drow_oc[o,c],arc_yc[:,c]) maxoverlaprows = 34/rbin # beam overlap for 4' longslit in NIR arc_yc[(0,rows/2-1)] = 0. arc_y = arc_yc.sum(axis=1) edgerow_od[o,0] = axisrow_o[o] - np.argmax(arc_y[axisrow_o[o]::-1] < 0.5*arc_y[axisrow_o[o]]) edgerow_od[o,1] = axisrow_o[o] + np.argmax(arc_y[axisrow_o[o]:] < 0.5*arc_y[axisrow_o[o]]) axisrow_o[o] = edgerow_od[o].mean() if np.abs(edgerow_od[o] - np.array([0,rows/2-1])).min() < maxoverlaprows: edgerow_od[o] += maxoverlaprows*np.array([+1,-1]) hduarc['SCI'].data = arc_yc order = 3 arcimage = "arc_"+str(imageno_i[iarc])+"_"+str(o)+".fits" dbfilename = "arcdb_"+str(imageno_i[iarc])+"_"+str(o)+".txt" if (not os.path.exists(dbfilename)): if guessfile is not None: guesstype = 'file' else: guessfile=dbfilename guesstype = 'rss' hduarc.writeto(arcimage,clobber=True) ystart = axisrow_o[o] specidentify(arcimage, lampfile, dbfilename, guesstype=guesstype, guessfile=guessfile, automethod=automethod, function='legendre', order=order, rstep=20, rstart=ystart, mdiff=20, thresh=3, niter=5, smooth=3, inter=True, clobber=True, logfile=logfile, verbose=True) if (not debug): os.remove(arcimage) # process dbfile legendre coefs within FOV into wavmap (_Y = line in dbfile) legy_Y = np.loadtxt(dbfilename,dtype=float,usecols=(0,),ndmin=1) dblegcof_lY = np.loadtxt(dbfilename,unpack=True,dtype=float,usecols=range(1,order+2),ndmin=2) # first convert to centered legendre coefficients to remove crosscoupling xcenter = cols/2. legcof_lY = np.zeros_like(dblegcof_lY) legcof_lY[2] = dblegcof_lY[2] + 5.*dblegcof_lY[3]*xcenter legcof_lY[3] = dblegcof_lY[3] legcof_lY[0] = 0.5*legcof_lY[2] + (dblegcof_lY[0]-dblegcof_lY[2]) + \ (dblegcof_lY[1]-1.5*dblegcof_lY[3])*xcenter + 1.5*dblegcof_lY[2]*xcenter**2 + \ 2.5*dblegcof_lY[3]*xcenter**3 legcof_lY[1] = 1.5*legcof_lY[3] + (dblegcof_lY[1]-1.5*dblegcof_lY[3]) + \ 3.*dblegcof_lY[2]*xcenter + 7.5*dblegcof_lY[3]*xcenter**2 # remove rows outside slit and outlier fits argYbad = np.where((legy_Y<edgerow_od[o,0]) | (legy_Y>edgerow_od[o,1]))[0] legy_Y = np.delete(legy_Y, argYbad,axis=0) legcof_lY = np.delete(legcof_lY, argYbad,axis=1) mediancof_l = np.median(legcof_lY,axis=1) rms_l = np.sqrt(np.median((legcof_lY - mediancof_l[:,None])**2,axis=1)) sigma_lY = np.abs((legcof_lY - mediancof_l[:,None]))/rms_l[:,None] argYbad = np.where((sigma_lY>4).any(axis=0))[0] legy_Y = np.delete(legy_Y, argYbad,axis=0) legcof_lY = np.delete(legcof_lY, argYbad,axis=1) cofrows_o[o] = legy_Y.shape[0] legy_od[o] = legy_Y.min(),legy_Y.max() if cofrows_o[o] < 5: # for future: if few lines in db, use model shifted to agree, for now just use mean legcof_l = legcof_lY.mean(axis=1) wavmap_yc = np.polynomial.legendre.legval(np.arange(cols),legcof_l) else: # smooth wavmap along rows by fitting L_0 to quadratic, others to linear fn of row ycenter = rows/4. Y_y = np.arange(-ycenter,ycenter) aa = np.vstack(((legy_Y-ycenter)**2,(legy_Y-ycenter),np.ones(cofrows_o[o]))).T polycofs = la.lstsq(aa,legcof_lY[0])[0] legcof_ly = np.zeros((order+1,rows/2)) legcof_ly[0] = np.polyval(polycofs,Y_y) for l in range(1,order+1): polycofs = la.lstsq(aa[:,1:],legcof_lY[l])[0] legcof_ly[l] = np.polyval(polycofs,Y_y) wavmap_yc = np.zeros((rows/2,cols)) for y in range(rows/2): wavmap_yc[y] = np.polynomial.legendre.legval(np.arange(-cols/2,cols/2),legcof_ly[:,y]) # put curvature back in, zero out areas beyond slit and wavelength range (will be flagged in bpm) if debug: np.savetxt("drow_wmap_oc.txt",drow_oc.T,fmt="%8.3f %8.3f") for c in range(cols): shift(wavmap_yc[:,c],drow_oc[o,c],wavmap_orc[o,:,c],order=1) isoffslit_rc = ((np.arange(rows/2)[:,None] < (edgerow_od[o,0]+(rpix_oc[o]-rpix_oc[o,cols/2])/rbin)[None,:]) \ | (np.arange(rows/2)[:,None] > (edgerow_od[o,1]+(rpix_oc[o]-rpix_oc[o,cols/2])/rbin)[None,:])) notwav_rc = (rpix_oc[o]==0.)[None,:] wavmap_orc[o,(isoffslit_rc | notwav_rc)] = 0. log.message('\n Wavl coeff rows: O %4i E %4i' % tuple(cofrows_o), with_header=False) log.message(' Bottom, top row: O %4i %4i E %4i %4i' \ % tuple(legy_od.flatten()), with_header=False) log.message('\n Slit axis row: O %4i E %4i' % tuple(axisrow_o), with_header=False) log.message(' Bottom, top row: O %4i %4i E %4i %4i \n' \ % tuple(edgerow_od.flatten()), with_header=False) # for images using this arc,save split data along third fits axis, # add wavmap extension, save as 'w' file hduwav = pyfits.ImageHDU(data=wavmap_orc.astype('float32'), header=hduarc['SCI'].header, name='WAV') for i in np.where(iarc_i==iarc_a[a])[0]: hdu = pyfits.open(infilelist[i]) image_rc = np.roll(hdu['SCI'].data,-offset,axis=0) image_rc[padbins] = 0. hdu['SCI'].data = image_rc.reshape((2,rows/2,cols)) var_rc = np.roll(hdu['VAR'].data,-offset,axis=0) var_rc[padbins] = 0. hdu['VAR'].data = var_rc.reshape((2,rows/2,cols)) bpm_rc = np.roll(hdu['BPM'].data,-offset,axis=0) bpm_rc[padbins] = 1 bpm_orc = bpm_rc.reshape((2,rows/2,cols)) bpm_orc[wavmap_orc==0.] = 1 hdu['BPM'].data = bpm_orc hdu.append(hduwav) for f in ('SCI','VAR','BPM','WAV'): hdu[f].header.update('CTYPE3','O,E') hdu.writeto('w'+infilelist[i],clobber='True') log.message('Output file '+'w'+infilelist[i] , with_header=False) return
def list_configurations_old(infilelist, log): """For data observed prior 2015 """ obs_dict = obslog(infilelist) # Map out which arc goes with which image. Use arc in closest wavcal block of the config. # wavcal block: neither spectrograph config nor track changes, and no gap in data files infiles = len(infilelist) newtrk = 5. # new track when rotator changes by more (deg) trkrho_i = np.array(map(float, obs_dict['TRKRHO'])) trkno_i = np.zeros((infiles), dtype=int) trkno_i[1:] = ((np.abs(trkrho_i[1:] - trkrho_i[:-1])) > newtrk).cumsum() infiles = len(infilelist) grating_i = [obs_dict['GRATING'][i].strip() for i in range(infiles)] grang_i = np.array(map(float, obs_dict['GR-ANGLE'])) artic_i = np.array(map(float, obs_dict['CAMANG'])) configdat_i = [ tuple((grating_i[i], grang_i[i], artic_i[i])) for i in range(infiles) ] confdatlist = list( set(configdat_i)) # list tuples of the unique configurations _c confno_i = np.array( [confdatlist.index(configdat_i[i]) for i in range(infiles)], dtype=int) configs = len(confdatlist) imageno_i = np.array([image_number(infilelist[i]) for i in range(infiles)]) filegrp_i = np.zeros((infiles), dtype=int) filegrp_i[1:] = ((imageno_i[1:] - imageno_i[:-1]) > 1).cumsum() isarc_i = np.array([(obs_dict['OBJECT'][i].upper().strip() == 'ARC') for i in range(infiles)]) wavblk_i = np.zeros((infiles), dtype=int) wavblk_i[1:] = ((filegrp_i[1:] != filegrp_i[:-1]) \ | (trkno_i[1:] != trkno_i[:-1]) \ | (confno_i[1:] != confno_i[:-1])).cumsum() wavblks = wavblk_i.max() + 1 arcs_c = (isarc_i[:, None] & (confno_i[:, None] == range(configs))).sum(axis=0) np.savetxt("wavblktbl.txt",np.vstack((trkrho_i,imageno_i,filegrp_i,trkno_i, \ confno_i,wavblk_i,isarc_i)).T,fmt="%7.2f "+6*"%3i ",header=" rho img grp trk conf wblk arc") for c in range(configs): # worst: no arc for config, remove images if arcs_c[c] == 0: lostimages = imageno_i[confno_i == c] log.message('No Arc for this configuration: ' \ +("Grating %s Grang %6.2f Artic %6.2f" % confdatlist[c]) \ +("\n Images: "+lostimages.shape[0]*"%i " % tuple(lostimages)), with_header=False) wavblk_i[confno_i == c] = -1 if arcs_c.sum() == 0: log.message("Cannot calibrate any images", with_header=False) exit() iarc_i = -np.zeros((infiles), dtype=int) for w in range(wavblks): blkimages = imageno_i[wavblk_i == w] if blkimages.shape[0] == 0: continue iarc_I = np.where((wavblk_i == w) & (isarc_i))[0] if iarc_I.shape[0] > 0: iarc = iarc_I[0] # best: arc is in wavblk, take first else: conf = confno_i[wavblk_i == w][ 0] # fallback: take closest arc of this config iarc_I = np.where((confno_i == conf) & (isarc_i))[0] blkimagepos = blkimages.mean() iarc = iarc_I[np.argmin(imageno_i[iarc_I] - blkimagepos)] iarc_i[wavblk_i == w] = iarc log.message(("\nFor images: "+blkimages.shape[0]*"%i " % tuple(blkimages)) \ +("\n Use Arc %5i" % imageno_i[iarc]), with_header=False) iarc_a = np.unique(iarc_i[iarc_i != -1]) return iarc_a, iarc_i, confno_i, confdatlist
def specpollampextract(infilelist, logfile='salt.log'): obsdate=os.path.basename(infilelist[0])[8:16] with logging(logfile, debug) as log: log.message('Extraction of Lamp Images' , with_header=False) obsdict=obslog(infilelist) hdu0 = pyfits.open(infilelist[0]) rows,cols = hdu0['SCI'].data.shape[1:3] cbin,rbin = np.array(obsdict["CCDSUM"][0].split(" ")).astype(int) slitid = obsdict["MASKID"][0] lampid = obsdict["LAMPID"][0].strip().upper() lam_c = hdu0['WAV'].data[0,rows/2] files = len(infilelist) outfilelist = infilelist # sum spectra to find target count = 0 for i in range(files): badbin_orc = pyfits.open(outfilelist[i])['BPM'].data.astype(bool) if count == 0: count_orc = (~badbin_orc).astype(int) image_orc = pyfits.open(outfilelist[i])['SCI'].data*count_orc var_orc = pyfits.open(outfilelist[i])['VAR'].data else: count_orc += (~badbin_orc).astype(int) image_orc += pyfits.open(outfilelist[i])['SCI'].data*(~badbin_orc) var_orc += pyfits.open(outfilelist[i])['VAR'].data count += 1 if count ==0: print 'No valid images' exit() image_orc[count_orc>0] /= count_orc[count_orc>0] badbin_orc = (count_orc==0) | (image_orc==0) okbinpol_orc = (count_orc == count) & (image_orc != 0) # conservative bpm for pol extraction var_orc[count_orc>0] /= count_orc[count_orc>0]**2 wav_orc = pyfits.open(outfilelist[0])['WAV'].data # pyfits.PrimaryHDU(image_orc.astype('float32')).writeto('lampsum_orc.fits',clobber=True) lam_m = np.loadtxt(datadir+"wollaston.txt",dtype=float,usecols=(0,)) rpix_om = np.loadtxt(datadir+"wollaston.txt",dtype=float,unpack=True,usecols=(1,2)) # trace spectrum, compute spatial profile profile_orc = np.zeros_like(image_orc) drow_oc = np.zeros((2,cols)) expectrow_oc = np.zeros((2,cols),dtype='float32') maxrow_oc = np.zeros((2,cols),dtype=int) maxval_oc = np.zeros((2,cols),dtype='float32') cross_orC = np.zeros((2,rows,2)) col_cr,row_cr = np.indices(image_orc[0].T.shape) # sample cross-dispersion at center and on right (_C) to get offset and tilt Collist = [cols/2,0.8*cols] for C in (0,1): cross_orC[:,:,C] = np.sum(image_orc[:,:,Collist[C]-cols/16:Collist[C]+cols/16],axis=2) drow_oC = np.zeros((2,2)) trow_o = np.zeros((2),dtype='int') okprof_oc = np.zeros((2,cols),dtype='bool') okprof_orc = np.zeros((2,rows,cols),dtype='bool') norm_orc = np.zeros((2,rows,cols)) sig_c = np.zeros((cols)) sigmin = 20.; drowmax = 8. # find spectrum offset and tilt roughly from max of two cross-dispersion samples for o in (0,1): expectrow_oc[o] = (1-o)*rows + interp1d(lam_m,rpix_om[o],kind='cubic')(lam_c)/rbin for C in (0,1): crossmaxval = np.max(cross_orC[o, \ expectrow_oc[o,Collist[C]]-100/rbin:expectrow_oc[o,Collist[C]]+100/rbin,C]) drow_oC[o,C] = np.where(cross_orC[o,:,C]==crossmaxval)[0][0] - expectrow_oc[o,Collist[C]] drow_o = drow_oC[:,0] rowtilt = (drow_oC[:,1]-drow_oC[:,0]).mean()/(Collist[1]-Collist[0]) expectrow_oc += drow_o[:,None] + rowtilt*np.arange(-cols/2,cols/2) # get trace by finding max in narrow curved aperture and smoothing it for o in (0,1): row_c = expectrow_oc[o].astype(int) aperture_cr = ((row_cr-row_c[:,None])>=-20/rbin) & ((row_cr-row_c[:,None])<=20/rbin) maxrow_oc[o] = np.argmax(image_orc[o].T[aperture_cr].reshape((cols,-1)),axis=1) + row_c - 20/rbin maxval_oc[o] = image_orc[o,maxrow_oc[o]].diagonal() trow_o[o] = maxrow_oc[o,cols/2] # mark as bad where signal drops too low or position is off median_c = np.median(image_orc[o].T[aperture_cr].reshape((cols,-1)),axis=1) var_c = np.mean(var_orc[o].T[aperture_cr].reshape((cols,-1)),axis=1) sig_c[var_c>0] = (maxval_oc[o] - median_c)[var_c>0]/np.sqrt(var_c[var_c>0]) drow1_c = maxrow_oc[o] -expectrow_oc[o] okprof_oc[o] = (sig_c > sigmin) & (abs(drow1_c - np.median(drow1_c)) < drowmax) # divide out spectrum (allowing for spectral curvature and tilt) to make spatial profile drow2_c = np.polyval(np.polyfit(np.where(okprof_oc[o])[0],drow1_c[okprof_oc[o]],3),(range(cols))) okprof_orc[o] = (np.abs(drow2_c - drow1_c) < 3) & okprof_oc[o][None,:] drow_oc[o] = -(expectrow_oc[o] - expectrow_oc[o,cols/2] + drow2_c -drow2_c[cols/2]) for r in range(rows): norm_orc[o,r] = interp1d(wav_orc[o,trow_o[o],okprof_oc[o]],maxval_oc[o,okprof_oc[o]], \ bounds_error = False, fill_value=0.)(wav_orc[o,r]) log.message('Image tilt: %8.1f arcmin' % (60.*np.degrees(rowtilt*rbin/cbin)), with_header=False) log.message('Target offset: O %4i E %4i' % tuple(drow_o), with_header=False) log.message('Target center row: O %4i E %4i' % tuple(trow_o), with_header=False) okprof_orc &= (norm_orc != 0.) profile_orc[okprof_orc] = image_orc[okprof_orc]/norm_orc[okprof_orc] var_orc[okprof_orc] = var_orc[okprof_orc]/norm_orc[okprof_orc]**2 # pyfits.PrimaryHDU(norm_rc.astype('float32')).writeto('norm_rc.fits',clobber=True) # pyfits.PrimaryHDU(okprof_oc.astype('uint8')).writeto('okprof_oc.fits',clobber=True) okprof_c = okprof_oc.all(axis=0) # Sample the normalized row profile at 5 places (_C) Cols = 5 dcols = 64/cbin Collist = [np.argmax(okprof_c)+dcols, 0, cols/2, 0, cols-np.argmax(okprof_c[::-1])-dcols] for C in (1,3): Collist[C] = 0.5*(Collist[C-1] + Collist[C+1]) Collist = map(int,Collist) profile_Cor = np.zeros((Cols,2,rows)) # Using profile at center, find, mask off fov edge, including possible beam overlap edgerow_do = np.zeros((2,2),dtype=int) badrow_or = np.zeros((2,rows),dtype=bool) axisrow_o = np.zeros(2) maxoverlaprows = 34/rbin profile_Cor[Cols/2] = np.median(profile_orc[:,:,cols/2-dcols:cols/2+dcols],axis=2) for d,o in np.ndindex(2,2): # _d = (0,1) = (bottom,top) row_y = np.where((d==1) ^ (np.arange(rows) < trow_o[o]))[0][::2*d-1] edgeval = np.median(profile_Cor[Cols/2,o,row_y],axis=-1) hist,bin = np.histogram(profile_Cor[Cols/2,o,row_y],bins=32,range=(0,edgeval)) histarg = 32 - np.argmax(hist[::-1]<3) # edge: <3 in hist in decreasing dirn edgeval = bin[histarg] edgerow_do[d,o] = trow_o[o] + (2*d-1)*(np.argmax(profile_Cor[Cols/2,o,row_y] <= edgeval)) axisrow_o[o] += edgerow_do[d,o] edgerow_do[d,o] = np.clip(edgerow_do[d,o],maxoverlaprows,rows-maxoverlaprows) badrow_or[o] |= ((d==1) ^ (np.arange(rows) < (edgerow_do[d,o]+d))) axisrow_o /= 2. # Row profile sample, now background subtracted profile_orc[okprof_orc] = ((image_orc-np.median(image_orc,axis=1)[:,None,:])[okprof_orc]) \ /(norm_orc-np.median(image_orc,axis=1)[:,None,:])[okprof_orc] # pyfits.PrimaryHDU(profile_orc.astype('float32')).writeto('profile_orc.fits',clobber=True) for C in range(Cols): okcol_c = (profile_orc.sum(axis=0).sum(axis=0)>0) & \ (np.abs(np.arange(cols)-Collist[C])<dcols) Collist[C] = np.where(okcol_c)[0].mean() profile_Cor[C] = np.median(profile_orc[:,:,okcol_c],axis=2) # print 5*"%7.1f " % tuple(Collist) # pyfits.PrimaryHDU(okprof_orc.astype('uint8')).writeto('okprof_orc.fits',clobber=True) np.savetxt("profile_oCr.txt",profile_Cor.transpose((1,0,2)).reshape((2*Cols,-1)).T,fmt="%10.6f") # find edge of target slit, and neighboring slits, if multiple slits # background picked small enough to miss neighbors in all samples, but matched E and O isneighbor_d = np.zeros((2),dtype='bool') edgeoff_doC = np.zeros((2,2,Cols)) for o in (0,1): plim = 0.35 # slit finder bkgsafe = 0.90 # avoiding next slit for C in range(Cols): leftrow_s = np.flatnonzero((profile_Cor[C,o,:-1] < plim) & (profile_Cor[C,o,1:] > plim)) rightrow_s = np.flatnonzero((profile_Cor[C,o,leftrow_s[0]:-1] > plim) \ & (profile_Cor[C,o,leftrow_s[0]+1:] < plim)) + leftrow_s[0] slits = rightrow_s.shape[0] # eliminate spikes: slitrow_s = 0.5*(rightrow_s + leftrow_s[:slits])[(rightrow_s-leftrow_s[:slits]) > 2] slits = slitrow_s.shape[0] targetslit = np.where(abs(maxrow_oc[o,Collist[C]] - slitrow_s) < 6)[0][0] if targetslit > 0: edgeoff_doC[0,o,C] = maxrow_oc[o,Collist[C]] - slitrow_s[targetslit-1:targetslit+1].mean() isneighbor_d[0] |= True if targetslit < slits-1: edgeoff_doC[1,o,C] = slitrow_s[targetslit:targetslit+2].mean() - maxrow_oc[o,Collist[C]] isneighbor_d[1] |= True for d in (0,1): if isneighbor_d[d]: edgerow_do[d] = trow_o + bkgsafe*(2*d-1)*edgeoff_doC[d].min() edgerow_doc = (edgerow_do[:,:,None] - drow_oc[None,:,:]).astype(int) bkgrows_do = ((trow_o - edgerow_do)/2.).astype(int) bkgrow_doc = edgerow_doc + bkgrows_do[:,:,None]/2 isbkg_dorc = (((np.arange(rows)[:,None] - edgerow_doc[:,:,None,:]) * \ (np.arange(rows)[:,None] - edgerow_doc[:,:,None,:] - bkgrows_do[:,:,None,None])) < 0) istarg_orc = ((np.arange(rows)[:,None] - edgerow_doc[:,:,None,:]).prod(axis=0) < 0) istarg_orc &= ~isbkg_dorc.any(axis=0) okbinpol_orc &= okprof_oc[:,None,:] # pyfits.PrimaryHDU(image_orc*(isbkg_dorc.sum(axis=0)).astype('float32')).writeto('lampbkg_orc.fits',clobber=True) # pyfits.PrimaryHDU(istarg_orc.astype('uint8')).writeto('istarg_orc.fits',clobber=True) log.message('Bottom, top row: O %4i %4i E %4i %4i \n' \ % tuple(edgerow_do.T.flatten()), with_header=False) # background-subtract and extract spectra # set up scrunch table and badpixels in wavelength space wbin = wav_orc[0,rows/2,cols/2]-wav_orc[0,rows/2,cols/2-1] wbin = float(int(wbin/0.75)) wmin,wmax = wav_orc.min(axis=2).max(),wav_orc.max(axis=2).min() wedgemin = wbin*int(wmin/wbin+0.5) + wbin/2. wedgemax = wbin*int(wmax/wbin-0.5) + wbin/2. wedge_w = np.arange(wedgemin,wedgemax+wbin,wbin) wavs = wedge_w.shape[0] - 1 badbin_orc = ~okbinpol_orc binedge_orw = np.zeros((2,rows,wavs+1)) badbin_orw = np.ones((2,rows,wavs),dtype=bool); nottarg_orw = np.ones_like(badbin_orw) for o in (0,1): for r in range(edgerow_doc[0,o].min(),edgerow_doc[1,o].max()): binedge_orw[o,r] = interp1d(wav_orc[o,r],np.arange(cols))(wedge_w) badbin_orw[o,r] = (scrunch1d(badbin_orc[o,r].astype(int),binedge_orw[o,r]) > 0.) nottarg_orw[o,r] = (scrunch1d((~istarg_orc[o,r]).astype(int),binedge_orw[o,r]) > 0.) okbin_orw = ~badbin_orw istarg_orw = ~nottarg_orw # wavelengths with bad pixels in targ area are flagged as bad badcol_ow = (istarg_orw & ~okbin_orw).any(axis=1) for o in (0,1): okbin_orw[o] &= ~badcol_ow[o] for i in range(files): imageno = int(os.path.basename(outfilelist[i]).split('.')[0][-4:]) hdulist = pyfits.open(outfilelist[i]) sci_orc = hdulist['sci'].data var_orc = hdulist['var'].data # make background continuum image, linearly interpolated in row bkg_doc = np.zeros((2,2,cols)) for d,o in np.ndindex(2,2): bkg_doc[d,o] = np.median(sci_orc[o].T[isbkg_dorc[d,o].T].reshape((cols,-1)),axis=1) bkgslp_oc = (bkg_doc[1] - bkg_doc[0])/(bkgrow_doc[1] - bkgrow_doc[0]) bkgbase_oc = (bkg_doc[1] + bkg_doc[0])/2. - bkgslp_oc*(bkgrow_doc[1] + bkgrow_doc[0])/2. bkg_orc = bkgbase_oc[:,None,:] + bkgslp_oc[:,None,:]*np.arange(rows)[:,None] target_orc = sci_orc-bkg_orc # np.savetxt('bkg.txt',np.vstack((bkg_doc.reshape((4,-1)),bkgslp_oc,bkgbase_oc)).T,fmt="%11.4f") # pyfits.PrimaryHDU(bkg_orc.astype('float32')).writeto('bkg_orc_'+str(imageno)+'.fits',clobber=True) # pyfits.PrimaryHDU(target_orc.astype('float32')).writeto('target_orc_'+str(imageno)+'.fits',clobber=True) # extract spectrum target_orw = np.zeros((2,rows,wavs)); var_orw = np.zeros_like(target_orw) for o in (0,1): for r in range(edgerow_doc[0,o].min(),edgerow_doc[1,o].max()): target_orw[o,r] = scrunch1d(target_orc[o,r],binedge_orw[o,r]) var_orw[o,r] = scrunch1d(var_orc[o,r],binedge_orw[o,r]) # columns with negative extracted intensity are marked as bad sci_ow = (target_orw*okbin_orw).sum(axis=1) # pyfits.PrimaryHDU((target_orw*okbin_orw).astype('float32')).writeto('sci_orw.fits',clobber=True) var_ow = (var_orw*okbin_orw).sum(axis=1) okbin_ow = (okbin_orw.any(axis=1) & (sci_ow > 0.)) bpm_ow = (~okbin_ow).astype('uint8') # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1) # for consistency with other modes hduout = pyfits.PrimaryHDU(header=hdulist[0].header) hduout = pyfits.HDUList(hduout) hduout[0].header.update('OBJECT',lampid) header=hdulist['SCI'].header.copy() header.update('VAREXT',2) header.update('BPMEXT',3) header.update('CRVAL1',wedge_w[0]+wbin/2.) header.update('CRVAL2',0) header.update('CDELT1',wbin) header.update('CTYPE1','Angstroms') hduout.append(pyfits.ImageHDU(data=sci_ow.reshape((2,1,wavs)).astype('float32'), header=header, name='SCI')) header.update('SCIEXT',1,'Extension for Science Frame',before='VAREXT') hduout.append(pyfits.ImageHDU(data=var_ow.reshape((2,1,wavs)).astype('float32'), header=header, name='VAR')) hduout.append(pyfits.ImageHDU(data=bpm_ow.reshape((2,1,wavs)), header=header, name='BPM')) hduout.writeto('e'+outfilelist[i],clobber=True,output_verify='warn') log.message('Output file '+'e'+outfilelist[i] , with_header=False) return
def specred(infile_list, propcode=None, inter=True, guessfile = None, automethod='Matchlines', preprocess=False): #set up the files infiles=','.join(['%s' % x for x in infile_list]) obsdate=os.path.basename(infile_list[0])[7:15] #set up some files that will be needed logfile='spec'+obsdate+'.log' dbfile='spec%s.db' % obsdate sdbfile='spec_straight_%s.db' % obsdate straight_function = 'legendre' straight_order = 2 dcoef = [0.50, 1.0, 0.0] #create the observation log obs_dict=obslog(infile_list) for i in range(len(infile_list)): print infile_list[i], obs_dict['OBJECT'][i].upper().strip(), obs_dict['PROPID'][i].upper().strip() if obs_dict['OBJECT'][i].upper().strip()=='ARC' and (propcode is None or obs_dict['PROPID'][i].upper().strip()==propcode): lamp=obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage=os.path.basename(infile_list[i]) if lamp == 'NONE': lamp='CuAr' lampfile=iraf.osfn("pysalt$data/linelists/%s.salt" % lamp) #lampfile='/Users/crawford/research/kepler/Xe.salt' #straighten the arc #specarcstraighten(arcimage, sdbfile , function=straight_function, order=straight_order, rstep=20, # rstart='middlerow', nrows=1, dcoef=dcoef, ndstep=10, # startext=0, clobber=False, logfile='salt.log', verbose=True) #rectify it #specrectify(arcimage, outimages='', outpref='s', solfile=sdbfile, caltype='line', # function=straight_function, order=straight_order, inttype='interp', w1=None, w2=None, dw=None, nw=None, # nearest=True, blank=0.0, clobber=True, logfile=logfile, verbose=True) #idnetify the line if guessfile is None: guesstype='rss' guessfile=None else: guesstype='file' specidentify(arcimage, lampfile, dbfile, guesstype=guesstype, guessfile=guessfile, automethod=automethod, function='legendre', order=3, rstep=100, rstart='middlerow', mdiff=20, thresh=5, niter=5, smooth=3, inter=inter, clobber=True, preprocess=preprocess, logfile=logfile, verbose=True) #apply the final rectification specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages='' spec_list=[] for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS') and (propcode is None or obs_dict['PROPID'][i].upper().strip()==propcode): img = infile_list[i] # rectify it #specrectify(img, outimages='', outpref='s', solfile=sdbfile, caltype='line', # function='legendre', order=2, inttype='interp', w1=None, w2=None, dw=None, nw=None, # blank=0.0, clobber=True, logfile=logfile, verbose=True) specrectify(img, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, nearest=True, clobber=True, logfile=logfile, verbose=True)
def specpolwavmap(infilelist, linelistlib="", inter=True, automethod='Matchlines',logfile='salt.log'): obsdate=os.path.basename(infilelist[0])[7:15] with logging(logfile, debug) as log: # create the observation log obs_dict=obslog(infilelist) log.message('Pysalt Version: '+pysalt.verno, with_header=False) if len(linelistlib) ==0: linelistlib=datadir+"linelistlib.txt" with open(linelistlib) as fd: linelistdict = dict(line.strip().split(None, 1) for line in fd) # eliminate inapplicable images for i in range(len(infilelist)): if int(obs_dict['BS-STATE'][i][1])!=2: del infilelist[i] obs_dict=obslog(infilelist) # Map out which arc goes with which image. Use arc in closest wavcal block of the config. # wavcal block: neither spectrograph config nor track changes, and no gap in data files infiles = len(infilelist) newtrk = 5. # new track when rotator changes by more (deg) trkrho_i = np.array(map(float,obs_dict['TRKRHO'])) trkno_i = np.zeros((infiles),dtype=int) trkno_i[1:] = ((np.abs(trkrho_i[1:]-trkrho_i[:-1]))>newtrk).cumsum() confno_i,confdatlist = configmap(infilelist) configs = len(confdatlist) imageno_i = np.array([int(os.path.basename(infilelist[i]).split('.')[0][-4:]) \ for i in range(infiles)]) filegrp_i = np.zeros((infiles),dtype=int) filegrp_i[1:] = ((imageno_i[1:]-imageno_i[:-1])>1).cumsum() isarc_i = np.array([(obs_dict['OBJECT'][i].upper().strip()=='ARC') for i in range(infiles)]) wavblk_i = np.zeros((infiles),dtype=int) wavblk_i[1:] = ((filegrp_i[1:] != filegrp_i[:-1]) \ | (trkno_i[1:] != trkno_i[:-1]) \ | (confno_i[1:] != confno_i[:-1])).cumsum() wavblks = wavblk_i.max() +1 arcs_c = (isarc_i[:,None] & (confno_i[:,None]==range(configs))).sum(axis=0) np.savetxt("wavblktbl.txt",np.vstack((trkrho_i,imageno_i,filegrp_i,trkno_i, \ confno_i,wavblk_i,isarc_i)).T,fmt="%7.2f "+6*"%3i ",header=" rho img grp trk conf wblk arc") for c in range(configs): # worst: no arc for config, remove images if arcs_c[c] == 0: lostimages = imageno_i[confno_i==c] log.message('No Arc for this configuration: ' \ +("Grating %s Grang %6.2f Artic %6.2f" % confdatlist[c]) \ +("\n Images: "+lostimages.shape[0]*"%i " % tuple(lostimages)), with_header=False) wavblk_i[confno_i==c] = -1 if arcs_c.sum() ==0: log.message("Cannot calibrate any images", with_header=False) exit() iarc_i = -np.zeros((infiles),dtype=int) for w in range(wavblks): blkimages = imageno_i[wavblk_i==w] if blkimages.shape[0]==0: continue iarc_I = np.where((wavblk_i==w) & (isarc_i))[0] if iarc_I.shape[0] >0: iarc = iarc_I[0] # best: arc is in wavblk, take first else: conf = confno_i[wavblk_i==w][0] # fallback: take closest arc of this config iarc_I = np.where((confno_i==conf) & (isarc_i))[0] blkimagepos = blkimages.mean() iarc = iarc_I[np.argmin(imageno_i[iarc_I] - blkimagepos)] iarc_i[wavblk_i==w] = iarc log.message(("For images: "+blkimages.shape[0]*"%i " % tuple(blkimages)) \ +("\n Use Arc %5i" % imageno_i[iarc]), with_header=False) iarc_a = np.unique(iarc_i[iarc_i != -1]) arcs = iarc_a.shape[0] lam_m = np.loadtxt(datadir+"wollaston.txt",dtype=float,usecols=(0,)) rpix_om = np.loadtxt(datadir+"wollaston.txt",dtype=float,unpack=True,usecols=(1,2)) for a in range(arcs): iarc = iarc_a[a] conf = confno_i[iarc] # use arc to make first-guess wavecal from model, locate beamsplitter split point cbin,rbin = np.array(obs_dict["CCDSUM"][iarc].split(" ")).astype(int) grating,grang,artic = confdatlist[confno_i[iarc]] hduarc = pyfits.open(infilelist[iarc]) arc_rc = hduarc['SCI'].data rows,cols = arc_rc.shape lam_c = rssmodelwave(grating,grang,artic,cbin,cols) arc_r = arc_rc.sum(axis=1) expectrow_o = ((2052 + interp1d(lam_m,rpix_om,kind='cubic') \ (lam_c[cols/2-cols/16:cols/2+cols/16])).mean(axis=1)/rbin).astype(int) foundrow_o = np.zeros((2),dtype=int) for o in (0,1): foundrow_o[o] = expectrow_o[o]-100/rbin \ + np.argmax(arc_r[expectrow_o[o]-100/rbin:expectrow_o[o]+100/rbin]) arcsignal = arc_r[foundrow_o[o]] topedge = foundrow_o[o] + np.argmax(arc_r[foundrow_o[o]:] < 0.75*arcsignal) botedge = foundrow_o[o] - np.argmax(arc_r[foundrow_o[o]::-1] < 0.75*arcsignal) foundrow_o[o] = (botedge+topedge)/2. splitrow = foundrow_o.mean() offset = int(splitrow - rows/2) # split arc into o/e images padbins = (np.indices((rows,cols))[0]<offset) | (np.indices((rows,cols))[0]>rows+offset) arc_rc = np.roll(arc_rc,-offset,axis=0) arc_rc[padbins] = 0. arc_orc = arc_rc.reshape((2,rows/2,cols)) # for O,E arc straighten spectrum, identify for each, form (unstraightened) wavelength map lamp=obs_dict['LAMPID'][iarc].strip().replace(' ', '') if lamp == 'NONE': lamp='CuAr' hduarc[0].header.update('MASKTYP','LONGSLIT') hduarc[0].header.update('MASKID','PL0100N001') del hduarc['VAR'] del hduarc['BPM'] # lampfile=iraf.osfn("pysalt$data/linelists/%s.wav" % lamp) zsalt version: not good lampfile=iraf.osfn("pysalt$data/linelists/"+linelistdict[lamp]) rpix_oc = interp1d(lam_m, rpix_om,kind ='cubic')(lam_c) log.message('\nARC: image '+str(imageno_i[iarc])+' GRATING '+grating\ +' GRANG '+("%8.3f" % grang)+' ARTIC '+("%8.3f" % artic)+' LAMP '+lamp, with_header=False) log.message(' Split Row: '+("%4i " % splitrow), with_header=False) wavmap_orc = np.zeros_like(arc_orc) for o in (0,1): foundrow_o[o] += -offset - o*rows/2 arc_Rc = np.zeros((rows/2,cols),dtype='float32') for c in range(cols): shift(arc_orc[o,:,c], \ -(rpix_oc[o,c]-rpix_oc[o,cols/2])/rbin,arc_Rc[:,c]) hduarc['SCI'].data = arc_Rc arcimage = "arc_"+str(imageno_i[iarc])+"_"+str(o)+".fits" dbfilename = "arcdb_"+str(imageno_i[iarc])+"_"+str(o)+".txt" order = 3 if os.path.isfile(dbfilename): # guessfile = "guess_"+str(o)+".txt" # os.rename(dbfilename,guessfile) guesstype = 'file' else: hduarc.writeto(arcimage,clobber=True) guesstype = 'rss' guessfile = '' Rstart = foundrow_o[o] specidentify(arcimage, lampfile, dbfilename, guesstype=guesstype, guessfile=guessfile, automethod=automethod, function='legendre', order=order, rstep=20, rstart=Rstart, mdiff=20, thresh=3, niter=5, smooth=3, inter=inter, clobber=True, logfile=logfile, verbose=True) wavlegR_y = np.loadtxt(dbfilename,dtype=float,usecols=(0,),ndmin=1) wavlegcof_ly = np.loadtxt(dbfilename,unpack=True,dtype=float,usecols=range(1,order+2),ndmin=2) if wavlegR_y.min()<0: wavlegcof_ly = np.delete(wavlegcof_ly,np.where(wavlegR_y<0)[0],axis=1) wavlegR_y = np.delete(wavlegR_y,np.where(wavlegR_y<0)[0],axis=0) wavmap_yc = np.polynomial.legendre.legval(np.arange(cols),wavlegcof_ly) mediancof_l = np.median(wavlegcof_ly,axis=1) rms_l = np.sqrt(np.median((wavlegcof_ly - mediancof_l[:,None])**2,axis=1)) sigma_ly = (wavlegcof_ly - mediancof_l[:,None])/rms_l[:,None] usey = (sigma_ly[0]<3) & (sigma_ly[1]<3) wavmap_Rc = np.zeros((rows/2,cols),dtype='float32') R_y = wavlegR_y[usey] if usey.shape[0] < 5: # for future: if few lines in db, use model shifted to agree, for now just use first wavmap_orc[o] = wavmap_yc[0] else: if usey.shape[0] > 9: aa = np.vstack((R_y**3,R_y**2,R_y,np.ones_like(R_y))).T else: aa = np.vstack((R_y**2,R_y,np.ones_like(R_y))).T for c in range(cols): polycofs = la.lstsq(aa,wavmap_yc[usey,c])[0] wavmap_orc[o,:,c] \ = np.polyval(polycofs,range(rows/2)+(rpix_oc[o,c]-rpix_oc[o,cols/2])/rbin) # os.remove(arcimage) # for images using this arc,save split data along third fits axis, # add wavmap extension, save as 'w' file hduwav = pyfits.ImageHDU(data=wavmap_orc, header=hduarc['SCI'].header, name='WAV') for i in np.where(iarc_i==iarc_a[a])[0]: hdu = pyfits.open(infilelist[i]) image_rc = np.roll(hdu['SCI'].data,-offset,axis=0) image_rc[padbins] = 0. hdu['SCI'].data = image_rc.reshape((2,rows/2,cols)) var_rc = np.roll(hdu['VAR'].data,-offset,axis=0) var_rc[padbins] = 0. hdu['VAR'].data = var_rc.reshape((2,rows/2,cols)) bpm_rc = np.roll(hdu['BPM'].data,-offset,axis=0) bpm_rc[padbins] = 1 hdu['BPM'].data = bpm_rc.reshape((2,rows/2,cols)) hdu.append(hduwav) for f in ('SCI','VAR','BPM','WAV'): hdu[f].header.update('CTYPE3','O,E') hdu.writeto('w'+infilelist[i],clobber='True') log.message('Output file '+'w'+infilelist[i] , with_header=False) return
def imred(rawdir, prodir, cleanup=True): print rawdir print prodir #get the name of the files infile_list=glob.glob(rawdir+'*.fits') infiles=','.join(['%s' % x for x in infile_list]) #get the current date for the files obsdate=os.path.basename(infile_list[0])[1:9] print obsdate #set up some files that will be needed logfile='imred'+obsdate+'.log' flatimage='FLAT%s.fits' % (obsdate) dbfile='spec%s.db' % obsdate #create the observation log obs_dict=obslog(infile_list) #prepare the data saltprepare(infiles, '', 'p', createvar=False, badpixelimage='', clobber=True, logfile=logfile, verbose=True) #bias subtract the data saltbias('pP*fits', '', 'b', subover=True, trim=True, subbias=False, masterbias='', median=False, function='polynomial', order=5, rej_lo=3.0, rej_hi=5.0, niter=10, plotover=False, turbo=False, clobber=True, logfile=logfile, verbose=True) #gain correct the data saltgain('bpP*fits', '', 'g', usedb=False, mult=True, clobber=True, logfile=logfile, verbose=True) #cross talk correct the data saltxtalk('gbpP*fits', '', 'x', xtalkfile = "", usedb=False, clobber=True, logfile=logfile, verbose=True) #cosmic ray clean the data #only clean the object data for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][i].count('RSS'): img='xgbp'+os.path.basename(infile_list[i]) saltcrclean(img, img, '', crtype='edge', thresh=5, mbox=11, bthresh=5.0, flux_ratio=0.2, bbox=25, gain=1.0, rdnoise=5.0, fthresh=5.0, bfactor=2, gbox=3, maxiter=5, multithread=True, clobber=True, logfile=logfile, verbose=True) #flat field correct the data flat_imgs='' for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('FLAT'): if flat_imgs: flat_imgs += ',' flat_imgs += 'xgbp'+os.path.basename(infile_list[i]) if len(flat_imgs)!=0: saltcombine(flat_imgs,flatimage, method='median', reject=None, mask=False, \ weight=True, blank=0, scale='average', statsec='[200:300, 600:800]', lthresh=3, \ hthresh=3, clobber=True, logfile=logfile, verbose=True) #saltillum(flatimage, flatimage, '', mbox=11, clobber=True, logfile=logfile, verbose=True) saltflat('xgbpP*fits', '', 'f', flatimage, minflat=500, clobber=True, logfile=logfile, verbose=True) else: flats=None imfiles=glob.glob('xgbpP*fits') for f in imfiles: shutil.copy(f, 'f'+f) #mosaic the data geomfile=iraf.osfn("pysalt$data/rss/RSSgeom.dat") saltmosaic('fxgbpP*fits', '', 'm', geomfile, interp='linear', cleanup=True, geotran=True, clobber=True, logfile=logfile, verbose=True) #clean up the images if cleanup: for f in glob.glob('p*fits'): os.remove(f) for f in glob.glob('bp*fits'): os.remove(f) for f in glob.glob('gbp*fits'): os.remove(f) for f in glob.glob('xgbp*fits'): os.remove(f) for f in glob.glob('fxgbp*fits'): os.remove(f)
def specpolextract(infilelist, logfile='salt.log', debug=False): """Produce a 1-D extract spectra for the O and E beams This also cleans the 2-D spectra of a number of artifacts, removes the background, accounts for small spatial shifts in the observation, and resamples the data into a wavelength grid Parameters ---------- infile_list: list List of filenames that include an extracted spectra logfile: str Name of file for logging """ with logging(logfile, debug) as log: config_dict = list_configurations(infilelist, log) config_count = 0 for config in config_dict: outfilelist = config_dict[config]['object'] outfiles = len(outfilelist) obs_dict = obslog(outfilelist) hdu0 = pyfits.open(outfilelist[0]) rows, cols = hdu0['SCI'].data.shape[1:3] cbin, rbin = np.array(obs_dict["CCDSUM"][0].split(" ")).astype(int) object_name = hdu0[0].header['OBJECT'] log.message( '\nExtract: {3} Grating {0} Grang {1:6.2f} Artic {2:6.2f}'. format(config[0], config[1], config[2], object_name)) log.message( ' Images: ' + ' '.join([str(image_number(img)) for img in outfilelist]), with_header=False) # special version for lamp data # this is now removed and will not be part of this code #object = obs_dict["OBJECT"][0].strip().upper() #ampid = obs_dict["LAMPID"][0].strip().upper() #f ((object != "ARC") & (lampid != "NONE")) : # specpollampextract(outfilelist, logfile=logfile) # continue # sum spectra to find target, background artifacts, and estimate sky flat and psf functions count = 0 for i in range(outfiles): badbin_orc = pyfits.open(outfilelist[i])['BPM'].data > 0 if count == 0: count_orc = (~badbin_orc).astype(int) image_orc = pyfits.open( outfilelist[i])['SCI'].data * count_orc var_orc = pyfits.open( outfilelist[i])['VAR'].data * count_orc else: count_orc += (~badbin_orc).astype(int) image_orc += pyfits.open( outfilelist[i])['SCI'].data * (~badbin_orc).astype(int) var_orc += pyfits.open( outfilelist[i])['VAR'].data * (~badbin_orc).astype(int) count += 1 if count == 0: print 'No valid images' continue image_orc[count_orc > 0] /= count_orc[count_orc > 0] badbinall_orc = (count_orc == 0) | (image_orc == 0 ) # bin is bad in all images badbinone_orc = (count_orc < count) | ( image_orc == 0) # bin is bad in at least one image var_orc[count_orc > 0] /= (count_orc[count_orc > 0])**2 wav_orc = pyfits.open(outfilelist[0])['WAV'].data slitid = obs_dict["MASKID"][0] okwav_oc = ~((wav_orc == 0).all(axis=1)) obsname = object_name + "_c" + str(config_count) + "_" + str( outfiles) hdusum = pyfits.PrimaryHDU(header=hdu0[0].header) hdusum = pyfits.HDUList(hdusum) hdusum[0].header['OBJECT'] = obsname header = hdu0['SCI'].header.copy() hdusum.append( pyfits.ImageHDU(data=image_orc, header=header, name='SCI')) hdusum.append( pyfits.ImageHDU(data=var_orc, header=header, name='VAR')) hdusum.append( pyfits.ImageHDU(data=badbinall_orc.astype('uint8'), header=header, name='BPM')) hdusum.append( pyfits.ImageHDU(data=wav_orc, header=header, name='WAV')) if debug: hdusum.writeto(obsname + ".fits", clobber=True) # run specpolsignalmap on image psf_orc,skyflat_orc,badbinnew_orc,isbkgcont_orc,maprow_od,drow_oc = \ specpolsignalmap(hdusum,logfile=logfile,debug=debug) maprow_ocd = maprow_od[:, None, :] + np.zeros((2, cols, 4)) maprow_ocd[okwav_oc] += drow_oc[okwav_oc, None] isedge_orc = (np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]) | \ (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3]) istarget_orc = okwav_oc[:,None,:] & (np.arange(rows)[:,None] > maprow_ocd[:,None,:,1]) & \ (np.arange(rows)[:,None] < maprow_ocd[:,None,:,2]) isbkgcont_orc &= (~badbinall_orc & ~isedge_orc & ~istarget_orc) badbinall_orc |= badbinnew_orc badbinone_orc |= badbinnew_orc hdusum['BPM'].data = badbinnew_orc.astype('uint8') psf_orc *= istarget_orc.astype(int) if debug: # hdusum.writeto(obsname+".fits",clobber=True) pyfits.PrimaryHDU(psf_orc.astype('float32')).writeto( obsname + '_psf_orc.fits', clobber=True) # pyfits.PrimaryHDU(badbinnew_orc.astype('uint8')).writeto('badbinnew_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinall_orc.astype('uint8')).writeto('badbinall_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinone_orc.astype('uint8')).writeto('badbinone_orc.fits',clobber=True) # set up wavelength binning wbin = wav_orc[0, rows / 2, cols / 2] - wav_orc[0, rows / 2, cols / 2 - 1] wbin = 2.**(np.rint(np.log2(wbin)) ) # bin to nearest power of 2 angstroms wmin = (wav_orc.max(axis=1)[okwav_oc].reshape( (2, -1))).min(axis=1).max() wmax = wav_orc.max() for o in (0, 1): colmax = np.where((wav_orc[o] > 0.).any(axis=0))[0][-1] row_r = np.where(wav_orc[o, :, colmax] > 0.)[0] wmax = min(wmax, wav_orc[o, row_r, colmax].min()) wedgemin = wbin * int(wmin / wbin + 0.5) + wbin / 2. wedgemax = wbin * int(wmax / wbin - 0.5) + wbin / 2. wedge_w = np.arange(wedgemin, wedgemax + wbin, wbin) wavs = wedge_w.shape[0] - 1 binedge_orw = np.zeros((2, rows, wavs + 1)) specrow_or = (maprow_od[:, 1:3].mean(axis=1)[:, None] + np.arange(-rows / 4, rows / 4)).astype(int) # scrunch and normalize psf from summed images (using badbinone) for optimized extraction # psf is normalized so its integral over row is 1. psfnormmin = 0.70 # wavelengths with less than this flux in good bins are marked bad psf_orw = np.zeros((2, rows, wavs)) for o in (0, 1): for r in specrow_or[o]: binedge_orw[o,r] = \ interp1d(wav_orc[o,r,okwav_oc[o]],np.arange(cols)[okwav_oc[o]], \ kind='linear',bounds_error=False)(wedge_w) psf_orw[o, r] = scrunch1d(psf_orc[o, r], binedge_orw[o, r]) if debug: pyfits.PrimaryHDU(binedge_orw.astype('float32')).writeto( obsname + '_binedge_orw.fits', clobber=True) pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto( obsname + '_psf_orw.fits', clobber=True) psfnorm_orw = np.repeat(psf_orw.sum(axis=1), rows, axis=1).reshape(2, rows, -1) psf_orw[psfnorm_orw > 0.] /= psfnorm_orw[psfnorm_orw > 0.] pmax = np.minimum( 1., np.median(psf_orw[psfnorm_orw > 0.].reshape( (2, rows, -1)).max(axis=1))) log.message('Stellar profile width: %8.2f arcsec' % ((1. / pmax) * rbin / 8.), with_header=False) pwidth = int(1. / pmax) if debug: pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto( obsname + '_psfnormed_orw.fits', clobber=True) # set up optional image-dependent column shift for slitless data colshiftfilename = "colshift.txt" docolshift = os.path.isfile(colshiftfilename) if docolshift: img_I, dcol_I = np.loadtxt(colshiftfilename, dtype=float, unpack=True, usecols=(0, 1)) shifts = img_I.shape[0] log.message('Column shift: \n Images ' + shifts * '%5i ' % tuple(img_I), with_header=False) log.message(' Bins ' + shifts * '%5.2f ' % tuple(dcol_I), with_header=False) log.message('\nArcsec offset Output File', with_header=False) # background-subtract and extract spectra for i in range(outfiles): hdulist = pyfits.open(outfilelist[i]) tnum = image_number(outfilelist[i]) badbin_orc = (hdulist['BPM'].data > 0) badbinbkg_orc = (badbin_orc | badbinnew_orc | isedge_orc | istarget_orc) if debug: pyfits.PrimaryHDU(isedge_orc.astype('uint8')).writeto( 'isedge_orc_' + tnum + '.fits', clobber=True) pyfits.PrimaryHDU(istarget_orc.astype('uint8')).writeto( 'istarget_orc_' + tnum + '.fits', clobber=True) pyfits.PrimaryHDU(badbinbkg_orc.astype('uint8')).writeto( 'badbinbkg_orc_' + tnum + '.fits', clobber=True) target_orc = bkgsub(hdulist, badbinbkg_orc, isbkgcont_orc, skyflat_orc, maprow_ocd, tnum, debug=debug) target_orc *= (~badbin_orc).astype(int) if debug: pyfits.PrimaryHDU(target_orc.astype('float32')).writeto( 'target_' + tnum + '_orc.fits', clobber=True) var_orc = hdulist['var'].data badbin_orc = (hdulist['bpm'].data > 0) | badbinnew_orc # extract spectrum optimally (Horne, PASP 1986) target_orw = np.zeros((2, rows, wavs)) var_orw = np.zeros_like(target_orw) badbin_orw = np.ones((2, rows, wavs), dtype='bool') wt_orw = np.zeros_like(target_orw) dcol = 0. if docolshift: if int(tnum) in img_I: dcol = dcol_I[np.where( img_I == int(tnum))] # table has observed shift for o in (0, 1): for r in specrow_or[o]: target_orw[o, r] = scrunch1d(target_orc[o, r], binedge_orw[o, r] + dcol) var_orw[o, r] = scrunch1d(var_orc[o, r], binedge_orw[o, r] + dcol) badbin_orw[o, r] = scrunch1d( badbin_orc[o, r].astype(float), binedge_orw[o, r] + dcol) > 0.001 badbin_orw |= (var_orw == 0) badbin_orw |= ((psf_orw * (~badbin_orw)).sum(axis=1)[:, None, :] < psfnormmin) if debug: # pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_'+tnum+'_orw.fits',clobber=True) pyfits.PrimaryHDU(badbin_orw.astype('uint8')).writeto( 'badbin_' + tnum + '_orw.fits', clobber=True) # use master psf shifted in row to allow for guide errors ok_w = ((psf_orw * badbin_orw).sum(axis=1) < 0.03 / float(pwidth / 2)).all(axis=0) crosscor_s = np.zeros(pwidth) for s in range(pwidth): crosscor_s[s] = (psf_orw[:, s:s - pwidth] * target_orw[:, pwidth / 2:-pwidth / 2] * ok_w).sum() smax = np.argmax(crosscor_s) s_S = np.arange(smax - pwidth / 4, smax - pwidth / 4 + pwidth / 2 + 1) polycof = la.lstsq( np.vstack((s_S**2, s_S, np.ones_like(s_S))).T, crosscor_s[s_S])[0] pshift = -(-0.5 * polycof[1] / polycof[0] - pwidth / 2) s = int(pshift + pwidth) - pwidth sfrac = pshift - s psfsh_orw = np.zeros_like(psf_orw) outrow = np.arange( max(0, s + 1), rows - (1 + int(abs(pshift))) + max(0, s + 1)) psfsh_orw[:, outrow] = ( 1. - sfrac ) * psf_orw[:, outrow - s] + sfrac * psf_orw[:, outrow - s - 1] # pyfits.PrimaryHDU(psfsh_orw.astype('float32')).writeto('psfsh_'+tnum+'_orw.fits',clobber=True) wt_orw[~badbin_orw] = psfsh_orw[~badbin_orw] / var_orw[ ~badbin_orw] var_ow = (psfsh_orw * wt_orw * (~badbin_orw)).sum(axis=1) badbin_ow = (var_ow == 0) var_ow[~badbin_ow] = 1. / var_ow[~badbin_ow] # pyfits.PrimaryHDU(var_ow.astype('float32')).writeto('var_'+tnum+'_ow.fits',clobber=True) # pyfits.PrimaryHDU(target_orw.astype('float32')).writeto('target_'+tnum+'_orw.fits',clobber=True) # pyfits.PrimaryHDU(wt_orw.astype('float32')).writeto('wt_'+tnum+'_orw.fits',clobber=True) sci_ow = (target_orw * wt_orw).sum(axis=1) * var_ow badlim = 0.20 psfbadfrac_ow = (psfsh_orw * badbin_orw.astype(int)).sum( axis=1) / psfsh_orw.sum(axis=1) badbin_ow |= (psfbadfrac_ow > badlim) cdebug = 83 if debug: np.savetxt("xtrct"+str(cdebug)+"_"+tnum+".txt",np.vstack((psf_orw[:,:,cdebug],var_orw[:,:,cdebug], \ wt_orw[:,:,cdebug],target_orw[:,:,cdebug])).reshape((4,2,-1)).transpose(1,0,2).reshape((8,-1)).T,fmt="%12.5e") # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1) # for consistency with other modes hduout = pyfits.PrimaryHDU(header=hdulist[0].header) hduout = pyfits.HDUList(hduout) header = hdulist['SCI'].header.copy() header.update('VAREXT', 2) header.update('BPMEXT', 3) header.update('CRVAL1', wedge_w[0] + wbin / 2.) header.update('CRVAL2', 0) header.update('CDELT1', wbin) header.update('CTYPE1', 'Angstroms') hduout.append( pyfits.ImageHDU(data=sci_ow.reshape((2, 1, wavs)), header=header, name='SCI')) header.update('SCIEXT', 1, 'Extension for Science Frame', before='VAREXT') hduout.append( pyfits.ImageHDU(data=var_ow.reshape((2, 1, wavs)), header=header, name='VAR')) hduout.append( pyfits.ImageHDU(data=badbin_ow.astype("uint8").reshape( (2, 1, wavs)), header=header, name='BPM')) hduout.writeto('e' + outfilelist[i], clobber=True, output_verify='warn') log.message(' %8.2f e%s' % (pshift * rbin / 8., outfilelist[i]), with_header=False) #increate the config count config_count += 1 return
def list_configurations_old(infilelist, log): """For data observed prior 2015 """ obs_dict=obslog(infilelist) # Map out which arc goes with which image. Use arc in closest wavcal block of the config. # wavcal block: neither spectrograph config nor track changes, and no gap in data files infiles = len(infilelist) newtrk = 5. # new track when rotator changes by more (deg) trkrho_i = np.array(map(float,obs_dict['TRKRHO'])) trkno_i = np.zeros((infiles),dtype=int) trkno_i[1:] = ((np.abs(trkrho_i[1:]-trkrho_i[:-1]))>newtrk).cumsum() infiles = len(infilelist) grating_i = [obs_dict['GRATING'][i].strip() for i in range(infiles)] grang_i = np.array(map(float,obs_dict['GR-ANGLE'])) artic_i = np.array(map(float,obs_dict['CAMANG'])) configdat_i = [tuple((grating_i[i],grang_i[i],artic_i[i])) for i in range(infiles)] confdatlist = list(set(configdat_i)) # list tuples of the unique configurations _c confno_i = np.array([confdatlist.index(configdat_i[i]) for i in range(infiles)],dtype=int) configs = len(confdatlist) imageno_i = np.array([image_number(infilelist[i]) for i in range(infiles)]) filegrp_i = np.zeros((infiles),dtype=int) filegrp_i[1:] = ((imageno_i[1:]-imageno_i[:-1])>1).cumsum() isarc_i = np.array([(obs_dict['OBJECT'][i].upper().strip()=='ARC') for i in range(infiles)]) wavblk_i = np.zeros((infiles),dtype=int) wavblk_i[1:] = ((filegrp_i[1:] != filegrp_i[:-1]) \ | (trkno_i[1:] != trkno_i[:-1]) \ | (confno_i[1:] != confno_i[:-1])).cumsum() wavblks = wavblk_i.max() +1 arcs_c = (isarc_i[:,None] & (confno_i[:,None]==range(configs))).sum(axis=0) np.savetxt("wavblktbl.txt",np.vstack((trkrho_i,imageno_i,filegrp_i,trkno_i, \ confno_i,wavblk_i,isarc_i)).T,fmt="%7.2f "+6*"%3i ",header=" rho img grp trk conf wblk arc") for c in range(configs): # worst: no arc for config, remove images if arcs_c[c] == 0: lostimages = imageno_i[confno_i==c] log.message('No Arc for this configuration: ' \ +("Grating %s Grang %6.2f Artic %6.2f" % confdatlist[c]) \ +("\n Images: "+lostimages.shape[0]*"%i " % tuple(lostimages)), with_header=False) wavblk_i[confno_i==c] = -1 if arcs_c.sum() ==0: log.message("Cannot calibrate any images", with_header=False) exit() iarc_i = -np.zeros((infiles),dtype=int) for w in range(wavblks): blkimages = imageno_i[wavblk_i==w] if blkimages.shape[0]==0: continue iarc_I = np.where((wavblk_i==w) & (isarc_i))[0] if iarc_I.shape[0] >0: iarc = iarc_I[0] # best: arc is in wavblk, take first else: conf = confno_i[wavblk_i==w][0] # fallback: take closest arc of this config iarc_I = np.where((confno_i==conf) & (isarc_i))[0] blkimagepos = blkimages.mean() iarc = iarc_I[np.argmin(imageno_i[iarc_I] - blkimagepos)] iarc_i[wavblk_i==w] = iarc log.message(("\nFor images: "+blkimages.shape[0]*"%i " % tuple(blkimages)) \ +("\n Use Arc %5i" % imageno_i[iarc]), with_header=False) iarc_a = np.unique(iarc_i[iarc_i != -1]) return iarc_a, iarc_i, confno_i, confdatlist
def flexure_rss(fitslist,option=""): global sex_js, rd, B, pixarcsec, gridsize, niter global rho_f, rc0_dgg, usespots_gg, fwhminterp_g # for cube analysis if option == "filesave": prefix = raw_input("file prefix: ") pixel = 15. # pixel size in microns pix_scale=0.125 sexparams = ["X_IMAGE","Y_IMAGE","FLUX_ISO","FLUX_MAX","FLAGS","CLASS_STAR", \ "X2WIN_IMAGE","Y2WIN_IMAGE","XYWIN_IMAGE","ERRX2WIN_IMAGE"] np.savetxt("qred_thrufoc.param",sexparams,fmt="%s") fmaxcol,flagcol,xvarcol,yvarcol,xerrcol = (3,4,6,7,9) # column nos (from 0) of data in sextractor imagestooclosefactor = 3.0 # too close if factor*sep < sqrt(var) gaptooclose = 1.25 # arcsec edgetooclose = 1.25 # arcsec rattolerance = 0.25 toofaint = 250. # FMAX counts galaxydelta = 0.4 # arcsec MOSimagelimit = 1. # arcsec deblend = .005 # default flexposns = len(fitslist) obsdict=obslog(fitslist) image_f = [fitslist[fpos].split(".")[0][-12:] for fpos in range(flexposns)] dateobs = int(image_f[0][:8]) if dateobs > 20110928: rho_f = np.array(obsdict["TRKRHO"]).astype(float) else: rho_f = np.array(obsdict["TELRHO"]).astype(float) catpos = np.argmin(np.abs(rho_f)) cbin,rbin = np.array(obsdict["CCDSUM"][catpos].split(" ")).astype(int) maskid = obsdict["MASKID"][catpos].strip() filter = obsdict["FILTER"][catpos].strip() grating = obsdict["GRATING"][catpos].strip() rows,cols = pyfits.getdata(fitslist[catpos]).shape isspec = (obsdict["GR-STATE"][catpos][1] =="4") print str(datetime.now()), "\n" print "Mask: ", maskid print "Filter: ", filter print "Grating: ", grating # make catalog of stars using image closest to rho=0 (capital S) sex_js = sextract(fitslist[catpos],deblend=deblend) fluxisomedian = np.median(np.sort(sex_js[2])[-10:]) # median of 10 brightest ok_s = sex_js[2] > fluxisomedian/100. # get rid of bogus stars sexcols = sex_js.shape[0] Stars = ok_s.sum() sexdata_jfS = np.zeros((sexcols,flexposns,Stars)) sexdata_jfS[:,catpos] = sex_js[:,ok_s] xcenter = 0.5*(sexdata_jfS[0,catpos].min() + sexdata_jfS[0,catpos].max()) ycenter = 0.5*(sexdata_jfS[1,catpos].min() + sexdata_jfS[1,catpos].max()) print "\n fits rho stars rshift cshift rslope cslope rmserr " print " deg arcsec arcsec arcmin arcmin bins" print ("%12s %5.1f %5i "+5*"%7.2f ") % \ (image_f[catpos], rho_f[catpos], Stars, 0., 0., 0., 0., 0.) if option == "filesave": np.savetxt(prefix+"Stars.txt",sexdata_jfS[:,catpos].T, \ fmt=2*"%9.2f "+"%9.0f "+"%9.1f "+"%4i "+"%6.2f "+3*"%7.2f "+"%11.3e") # find stars in flexure series, in order of increasing abs(rho), and store sextractor output row_fd = np.zeros((flexposns,2)) col_fd = np.zeros((flexposns,2)) for dirn in (1,-1): refpos = catpos posdirlist = np.argsort(dirn*rho_f) poslist = posdirlist[dirn*rho_f[posdirlist] > rho_f[refpos]] for fpos in poslist: col_S,row_S = sexdata_jfS[0:2,refpos,:] sex_js = sextract(fitslist[fpos],"sexwt.fits",deblend=deblend) bintol = 16/cbin # 2 arcsec tolerance for finding star binsqerr_sS = (sex_js[1,:,None] - row_S[None,:])**2 + (sex_js[0,:,None] - col_S[None,:])**2 S_s = np.argmin(binsqerr_sS,axis=1) # First compute image shift by averaging small errors rowerr_s = sex_js[1] - row_S[S_s] colerr_s = sex_js[0] - col_S[S_s] hist_r,bin_r = np.histogram(rowerr_s,bins=32,range=(-2*bintol,2*bintol)) drow = rowerr_s[(rowerr_s > bin_r[np.argmax(hist_r)]-bintol) & \ (rowerr_s < bin_r[np.argmax(hist_r)]+bintol)].mean() hist_c,bin_c = np.histogram(colerr_s,bins=32,range=(-2*bintol,2*bintol)) dcol = colerr_s[(colerr_s > bin_r[np.argmax(hist_r)]-bintol) & \ (colerr_s < bin_r[np.argmax(hist_r)]+bintol)].mean() # Now refind the closest ID binsqerr_sS = (sex_js[1,:,None] - row_S[None,:] -drow)**2 + \ (sex_js[0,:,None] - col_S[None,:] -dcol)**2 binsqerr_s = binsqerr_sS.min(axis=1) isfound_s = binsqerr_s < bintol**2 S_s = np.argmin(binsqerr_sS,axis=1) isfound_s &= (binsqerr_s == binsqerr_sS[:,S_s].min(axis=0)) isfound_S = np.array([S in S_s[isfound_s] for S in range(Stars)]) sexdata_jfS[:,fpos,S_s[isfound_s]] = sex_js[:,isfound_s] drow_S = sexdata_jfS[1,fpos]-sexdata_jfS[1,catpos] dcol_S = sexdata_jfS[0,fpos]-sexdata_jfS[0,catpos] row_fd[fpos],rowchi,d,d,d = np.polyfit(sexdata_jfS[0,catpos,isfound_S]-xcenter, \ drow_S[isfound_S],deg=1,full=True) col_fd[fpos],colchi,d,d,d = np.polyfit(sexdata_jfS[1,catpos,isfound_S]-ycenter, \ dcol_S[isfound_S],deg=1,full=True) rms = np.sqrt((rowchi+colchi)/(2*isfound_S.sum())) print ("%12s %5.0f %5i "+5*"%7.2f ") % (image_f[fpos], rho_f[fpos], isfound_S.sum(), \ row_fd[fpos,1]*rbin*pix_scale, col_fd[fpos,1]*cbin*pix_scale, \ 60.*np.degrees(row_fd[fpos,0]),-60.*np.degrees(col_fd[fpos,0]), rms) if option == "filesave": np.savetxt(prefix+"flex_"+str(fpos)+".txt",np.vstack((isfound_S,drow_S,dcol_S)).T, \ fmt = "%2i %8.3f %8.3f") np.savetxt(prefix+"sextr_"+str(fpos)+".txt",sexdata_jfS[:,fpos].T) # make plots fig,plot_s = plt.subplots(2,1,sharex=True) plt.xlabel('Rho (deg)') plt.xlim(-120,120) plt.xticks(range(-120,120,30)) fig.set_size_inches((8.5,11)) fig.subplots_adjust(left=0.175) plot_s[0].set_title(str(dateobs)+" Imaging Flexure") plot_s[0].set_ylabel('Mean Position (arcsec)') plot_s[0].set_ylim(-0.5,2.) plot_s[1].set_ylabel('Rotation (arcmin ccw)') plot_s[1].set_ylim(-6.,6.) plot_s[0].plot(rho_f,row_fd[:,1]*rbin*pix_scale,marker='D',label='row') plot_s[0].plot(rho_f,col_fd[:,1]*rbin*pix_scale,marker='D',label='col') plot_s[1].plot(rho_f,60.*np.degrees(row_fd[:,0]),marker='D',label='row') plot_s[1].plot(rho_f,-60.*np.degrees(col_fd[:,0]),marker='D',label='col') plot_s[0].legend(fontsize='medium',loc='upper center') plotfile = str(dateobs)+'_imflex.pdf' plt.savefig(plotfile,orientation='portrait') if os.name=='posix': if os.popen('ps -C evince -f').read().count(plotfile)==0: os.system('evince '+plotfile+' &') os.remove("out.txt") os.remove("qred_thrufoc.param") os.remove("sexwt.fits") return
def specpolextract(infilelist, logfile='salt.log', debug=False): """Produce a 1-D extract spectra for the O and E beams This also cleans the 2-D spectra of a number of artifacts, removes the background, accounts for small spatial shifts in the observation, and resamples the data into a wavelength grid Parameters ---------- infile_list: list List of filenames that include an extracted spectra logfile: str Name of file for logging """ #set up the files obsdate=os.path.basename(infilelist[0])[8:16] with logging(logfile, debug) as log: #create the observation log obs_dict=obslog(infilelist) # get rid of arcs for i in range(len(infilelist))[::-1]: if (obs_dict['OBJECT'][i].upper().strip()=='ARC'): del infilelist[i] infiles = len(infilelist) # contiguous images of the same object and config are grouped together as an observation obs_dict=obslog(infilelist) confno_i,confdatlist = configmap(infilelist) configs = len(confdatlist) objectlist = list(set(obs_dict['OBJECT'])) objno_i = np.array([objectlist.index(obs_dict['OBJECT'][i]) for i in range(infiles)],dtype=int) obs_i = np.zeros((infiles),dtype=int) obs_i[1:] = ((objno_i[1:] != objno_i[:-1]) | (confno_i[1:] != confno_i[:-1]) ).cumsum() dum,iarg_b = np.unique(obs_i,return_index=True) # gives i for beginning of each obs obss = iarg_b.shape[0] obscount_b = np.zeros((obss),dtype=int) oclist_b = np.array([[objno_i[iarg_b[b]], confno_i[iarg_b[b]]] for b in range(obss)]) if obss>1: for b in range(1,obss): obscount_b[b] = (oclist_b[b]==oclist_b[0:b]).all(axis=1).sum() for b in range(obss): ilist = np.where(obs_i==b)[0] outfiles = len(ilist) outfilelist = [infilelist[i] for i in ilist] obs_dict=obslog(outfilelist) imagenolist = [int(os.path.basename(infilelist[i]).split('.')[0][-4:]) for i in ilist] log.message('\nExtract: '+objectlist[objno_i[ilist[0]]]+' Grating %s Grang %6.2f Artic %6.2f' % \ confdatlist[confno_i[ilist[0]]], with_header=False) log.message(' Images: '+outfiles*'%i ' % tuple(imagenolist), with_header=False) hdu0 = pyfits.open(outfilelist[0]) rows,cols = hdu0['SCI'].data.shape[1:3] cbin,rbin = np.array(obs_dict["CCDSUM"][0].split(" ")).astype(int) # special version for lamp data object = obs_dict["OBJECT"][0].strip().upper() lampid = obs_dict["LAMPID"][0].strip().upper() if ((object != "ARC") & (lampid != "NONE")) : specpollampextract(outfilelist, logfile=logfile) continue # sum spectra to find target, background artifacts, and estimate sky flat and psf functions count = 0 for i in range(outfiles): badbin_orc = pyfits.open(outfilelist[i])['BPM'].data > 0 if count == 0: count_orc = (~badbin_orc).astype(int) image_orc = pyfits.open(outfilelist[i])['SCI'].data*count_orc var_orc = pyfits.open(outfilelist[i])['VAR'].data*count_orc else: count_orc += (~badbin_orc).astype(int) image_orc += pyfits.open(outfilelist[i])['SCI'].data*(~badbin_orc).astype(int) var_orc += pyfits.open(outfilelist[i])['VAR'].data*(~badbin_orc).astype(int) count += 1 if count ==0: print 'No valid images' continue image_orc[count_orc>0] /= count_orc[count_orc>0] badbinall_orc = (count_orc==0) | (image_orc==0) # bin is bad in all images badbinone_orc = (count_orc < count) | (image_orc==0) # bin is bad in at least one image var_orc[count_orc>0] /= (count_orc[count_orc>0])**2 wav_orc = pyfits.open(outfilelist[0])['WAV'].data slitid = obs_dict["MASKID"][0] okwav_oc = ~((wav_orc == 0).all(axis=1)) if slitid[0] =="P": slitwidth = float(slitid[2:5])/10. else: slitwidth = float(slitid) obsname = objectlist[oclist_b[b][0]]+"_c"+str(oclist_b[b][1])+"_"+str(obscount_b[b]) hdusum = pyfits.PrimaryHDU(header=hdu0[0].header) hdusum = pyfits.HDUList(hdusum) hdusum[0].header.update('OBJECT',obsname) header=hdu0['SCI'].header.copy() hdusum.append(pyfits.ImageHDU(data=image_orc, header=header, name='SCI')) hdusum.append(pyfits.ImageHDU(data=var_orc, header=header, name='VAR')) hdusum.append(pyfits.ImageHDU(data=badbinall_orc.astype('uint8'), header=header, name='BPM')) hdusum.append(pyfits.ImageHDU(data=wav_orc, header=header, name='WAV')) if debug: hdusum.writeto(obsname+".fits",clobber=True) psf_orc,skyflat_orc,badbinnew_orc,isbkgcont_orc,maprow_od,drow_oc = \ specpolsignalmap(hdusum,logfile=logfile,debug=debug) maprow_ocd = maprow_od[:,None,:] + np.zeros((2,cols,4)) maprow_ocd[okwav_oc] += drow_oc[okwav_oc,None] isedge_orc = (np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]) | \ (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3]) istarget_orc = okwav_oc[:,None,:] & (np.arange(rows)[:,None] > maprow_ocd[:,None,:,1]) & \ (np.arange(rows)[:,None] < maprow_ocd[:,None,:,2]) isbkgcont_orc &= (~badbinall_orc & ~isedge_orc & ~istarget_orc) badbinall_orc |= badbinnew_orc badbinone_orc |= badbinnew_orc hdusum['BPM'].data = badbinnew_orc.astype('uint8') if debug: # hdusum.writeto(obsname+".fits",clobber=True) pyfits.PrimaryHDU(psf_orc.astype('float32')).writeto(obsname+'_psf_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinnew_orc.astype('uint8')).writeto('badbinnew_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinall_orc.astype('uint8')).writeto('badbinall_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinone_orc.astype('uint8')).writeto('badbinone_orc.fits',clobber=True) # set up wavelength binning wbin = wav_orc[0,rows/2,cols/2]-wav_orc[0,rows/2,cols/2-1] wbin = 2.**(np.rint(np.log2(wbin))) # bin to nearest power of 2 angstroms wmin = (wav_orc.max(axis=1)[okwav_oc].reshape((2,-1))).min(axis=1).max() wmax = wav_orc.max() for o in (0,1): colmax = np.where((wav_orc[o] > 0.).any(axis=0))[0][-1] row_r = np.where(wav_orc[o,:,colmax] > 0.)[0] wmax = min(wmax,wav_orc[o,row_r,colmax].min()) wedgemin = wbin*int(wmin/wbin+0.5) + wbin/2. wedgemax = wbin*int(wmax/wbin-0.5) + wbin/2. wedge_w = np.arange(wedgemin,wedgemax+wbin,wbin) wavs = wedge_w.shape[0] - 1 binedge_orw = np.zeros((2,rows,wavs+1)) specrow_or = (maprow_od[:,1:3].mean(axis=1)[:,None] + np.arange(-rows/4,rows/4)).astype(int) # scrunch and normalize psf from summed images (using badbinone) for optimized extraction # psf is normalized so its integral over row is 1. psfnormmin = 0.70 # wavelengths with less than this flux in good bins are marked bad psf_orw = np.zeros((2,rows,wavs)) for o in (0,1): for r in specrow_or[o]: binedge_orw[o,r] = \ interp1d(wav_orc[o,r,okwav_oc[o]],np.arange(cols)[okwav_oc[o]], \ kind='linear',bounds_error=False)(wedge_w) psf_orw[o,r] = scrunch1d(psf_orc[o,r],binedge_orw[o,r]) if debug: pyfits.PrimaryHDU(binedge_orw.astype('float32')).writeto(obsname+'_binedge_orw.fits',clobber=True) pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto(obsname+'_psf_orw.fits',clobber=True) psf_orw /= psf_orw.sum(axis=1)[:,None,:] # set up optional image-dependent column shift for slitless data colshiftfilename = "colshift.txt" docolshift = os.path.isfile(colshiftfilename) if docolshift: img_I,dcol_I = np.loadtxt(colshiftfilename,dtype=float,unpack=True,usecols=(0,1)) shifts = img_I.shape[0] log.message('Column shift: \n Images '+shifts*'%5i ' % tuple(img_I), with_header=False) log.message(' Bins '+shifts*'%5.2f ' % tuple(dcol_I), with_header=False) # background-subtract and extract spectra for i in range(outfiles): hdulist = pyfits.open(outfilelist[i]) tnum = os.path.basename(outfilelist[i]).split('.')[0][-3:] badbin_orc = (hdulist['BPM'].data > 0) badbinbkg_orc = (badbin_orc | badbinnew_orc | isedge_orc | istarget_orc) if debug: pyfits.PrimaryHDU(isedge_orc.astype('uint8')).writeto('isedge_orc_'+tnum+'.fits',clobber=True) pyfits.PrimaryHDU(istarget_orc.astype('uint8')).writeto('istarget_orc_'+tnum+'.fits',clobber=True) pyfits.PrimaryHDU(badbinbkg_orc.astype('uint8')).writeto('badbinbkg_orc_'+tnum+'.fits',clobber=True) target_orc = bkgsub(hdulist,badbinbkg_orc,isbkgcont_orc,skyflat_orc,maprow_ocd,tnum,debug=debug) target_orc *= (~badbin_orc).astype(int) if debug: pyfits.PrimaryHDU(target_orc.astype('float32')).writeto('target_'+tnum+'_orc.fits',clobber=True) var_orc = hdulist['var'].data badbin_orc = (hdulist['bpm'].data > 0) | badbinnew_orc # extract spectrum optimally (Horne, PASP 1986) target_orw = np.zeros((2,rows,wavs)) var_orw = np.zeros_like(target_orw) badbin_orw = np.ones((2,rows,wavs),dtype='bool') wt_orw = np.zeros_like(target_orw) dcol = 0. if docolshift: if int(tnum) in img_I: dcol = dcol_I[np.where(img_I==int(tnum))] # table has observed shift for o in (0,1): for r in specrow_or[o]: target_orw[o,r] = scrunch1d(target_orc[o,r],binedge_orw[o,r]+dcol) var_orw[o,r] = scrunch1d(var_orc[o,r],binedge_orw[o,r]+dcol) badbin_orw[o,r] = scrunch1d(badbin_orc[o,r].astype(float),binedge_orw[o,r]+dcol) > 0.001 badbin_orw |= (var_orw == 0) badbin_orw |= ((psf_orw*(~badbin_orw)).sum(axis=1)[:,None,:] < psfnormmin) # pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_'+tnum+'_orw.fits',clobber=True) # pyfits.PrimaryHDU(badbin_orw.astype('uint8')).writeto('badbin_'+tnum+'_orw.fits',clobber=True) # use master psf shifted in row to allow for guide errors pwidth = 2*int(1./psf_orw.max()) ok_w = ((psf_orw*badbin_orw).sum(axis=1) < 0.03/float(pwidth/2)).all(axis=0) crosscor_s = np.zeros(pwidth) for s in range(pwidth): crosscor_s[s] = (psf_orw[:,s:s-pwidth]*target_orw[:,pwidth/2:-pwidth/2]*ok_w).sum() smax = np.argmax(crosscor_s) s_S = np.arange(smax-pwidth/4,smax-pwidth/4+pwidth/2+1) polycof = la.lstsq(np.vstack((s_S**2,s_S,np.ones_like(s_S))).T,crosscor_s[s_S])[0] pshift = -(-0.5*polycof[1]/polycof[0] - pwidth/2) s = int(pshift+pwidth)-pwidth sfrac = pshift-s psfsh_orw = np.zeros_like(psf_orw) outrow = np.arange(max(0,s+1),rows-(1+int(abs(pshift)))+max(0,s+1)) psfsh_orw[:,outrow] = (1.-sfrac)*psf_orw[:,outrow-s] + sfrac*psf_orw[:,outrow-s-1] # pyfits.PrimaryHDU(psfsh_orw.astype('float32')).writeto('psfsh_'+tnum+'_orw.fits',clobber=True) wt_orw[~badbin_orw] = psfsh_orw[~badbin_orw]/var_orw[~badbin_orw] var_ow = (psfsh_orw*wt_orw*(~badbin_orw)).sum(axis=1) badbin_ow = (var_ow == 0) var_ow[~badbin_ow] = 1./var_ow[~badbin_ow] # pyfits.PrimaryHDU(var_ow.astype('float32')).writeto('var_'+tnum+'_ow.fits',clobber=True) # pyfits.PrimaryHDU(target_orw.astype('float32')).writeto('target_'+tnum+'_orw.fits',clobber=True) # pyfits.PrimaryHDU(wt_orw.astype('float32')).writeto('wt_'+tnum+'_orw.fits',clobber=True) sci_ow = (target_orw*wt_orw).sum(axis=1)*var_ow badlim = 0.20 psfbadfrac_ow = (psfsh_orw*badbin_orw.astype(int)).sum(axis=1)/psfsh_orw.sum(axis=1) badbin_ow |= (psfbadfrac_ow > badlim) cdebug = 83 if debug: np.savetxt("xtrct"+str(cdebug)+"_"+tnum+".txt",np.vstack((psf_orw[:,:,cdebug],var_orw[:,:,cdebug], \ wt_orw[:,:,cdebug],target_orw[:,:,cdebug])).reshape((4,2,-1)).transpose(1,0,2).reshape((8,-1)).T,fmt="%12.5e") # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1) # for consistency with other modes hduout = pyfits.PrimaryHDU(header=hdulist[0].header) hduout = pyfits.HDUList(hduout) header=hdulist['SCI'].header.copy() header.update('VAREXT',2) header.update('BPMEXT',3) header.update('CRVAL1',wedge_w[0]+wbin/2.) header.update('CRVAL2',0) header.update('CDELT1',wbin) header.update('CTYPE1','Angstroms') hduout.append(pyfits.ImageHDU(data=sci_ow.reshape((2,1,wavs)), header=header, name='SCI')) header.update('SCIEXT',1,'Extension for Science Frame',before='VAREXT') hduout.append(pyfits.ImageHDU(data=var_ow.reshape((2,1,wavs)), header=header, name='VAR')) hduout.append(pyfits.ImageHDU(data=badbin_ow.astype("uint8").reshape((2,1,wavs)), header=header, name='BPM')) hduout.writeto('e'+outfilelist[i],clobber=True,output_verify='warn') log.message('Output file '+'e'+outfilelist[i] , with_header=False) return
def saltadvance(images, outpath, obslogfile=None, gaindb=None,xtalkfile=None, geomfile=None,subover=True,trim=True,masbias=None, subbias=False, median=False, function='polynomial', order=5,rej_lo=3, rej_hi=3,niter=5,interp='linear', sdbhost='',sdbname='',sdbuser='', password='', clobber=False, cleanup=True, logfile='salt.log', verbose=True): """SALTADVANCE provides advanced data reductions for a set of data. It will sort the data, and first process the biases, flats, and then the science frames. It will record basic quality control information about each of the steps. """ plotover=False #start logging with logging(logfile,debug) as log: # Check the input images infiles = saltio.argunpack ('Input',images) infiles.sort() # create list of output files outpath=saltio.abspath(outpath) #log into the database sdb=saltmysql.connectdb(sdbhost, sdbname, sdbuser, password) #does the gain database file exist if gaindb: dblist= saltio.readgaindb(gaindb) else: dblist=[] # does crosstalk coefficient data exist if xtalkfile: xtalkfile = xtalkfile.strip() xdict = saltio.readxtalkcoeff(xtalkfile) else: xdict=None #does the mosaic file exist--raise error if no saltio.fileexists(geomfile) # Delete the obslog file if it already exists if os.path.isfile(obslogfile) and clobber: saltio.delete(obslogfile) #read in the obsveration log or create it if os.path.isfile(obslogfile): msg='The observing log already exists. Please either delete it or run saltclean with clobber=yes' raise SaltError(msg) else: headerDict=obslog(infiles, log) obsstruct=createobslogfits(headerDict) saltio.writefits(obsstruct, obslogfile) #create the list of bias frames and process them filename=obsstruct.data.field('FILENAME') detmode=obsstruct.data.field('DETMODE') obsmode=obsstruct.data.field('OBSMODE') ccdtype=obsstruct.data.field('CCDTYPE') propcode=obsstruct.data.field('PROPID') masktype=obsstruct.data.field('MASKTYP') #set the bias list of objects biaslist=filename[(ccdtype=='ZERO')*(propcode=='CAL_BIAS')] masterbias_dict={} for img in infiles: if os.path.basename(img) in biaslist: #open the image struct=fits.open(img) bimg=outpath+'bxgp'+os.path.basename(img) #print the message if log: message='Processing Zero frame %s' % img log.message(message, with_stdout=verbose) #process the image struct=clean(struct, createvar=True, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) #update the database updatedq(os.path.basename(img), struct, sdb) #write the file out # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN',time.asctime(time.localtime()),'Images have been gain corrected',struct[0]) saltkey.new('SXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('SBIAS',time.asctime(time.localtime()),'Images have been de-biased',struct[0]) # write FITS file saltio.writefits(struct,bimg, clobber=clobber) saltio.closefits(struct) #add files to the master bias list masterbias_dict=compareimages(struct, bimg, masterbias_dict, keylist=biasheader_list) #create the master bias frame for i in masterbias_dict.keys(): bkeys=masterbias_dict[i][0] blist=masterbias_dict[i][1:] mbiasname=outpath+createmasterbiasname(blist, bkeys) bfiles=','.join(blist) saltcombine(bfiles, mbiasname, method='median', reject='sigclip', mask=False, weight=False, blank=0, scale=None, statsec=None, lthresh=3, \ hthresh=3, clobber=False, logfile=logfile,verbose=verbose) #create the list of flatfields and process them flatlist=filename[ccdtype=='FLAT'] masterflat_dict={} for img in infiles: if os.path.basename(img) in flatlist: #open the image struct=fits.open(img) fimg=outpath+'bxgp'+os.path.basename(img) #print the message if log: message='Processing Flat frame %s' % img log.message(message, with_stdout=verbose) #process the image struct=clean(struct, createvar=True, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) #update the database updatedq(os.path.basename(img), struct, sdb) #write the file out # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN',time.asctime(time.localtime()),'Images have been gain corrected',struct[0]) saltkey.new('SXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('SBIAS',time.asctime(time.localtime()),'Images have been de-biased',struct[0]) # write FITS file saltio.writefits(struct,fimg, clobber=clobber) saltio.closefits(struct) #add files to the master bias list masterflat_dict=compareimages(struct, fimg, masterflat_dict, keylist=flatheader_list) #create the master flat frame for i in masterflat_dict.keys(): fkeys=masterflat_dict[i][0] flist=masterflat_dict[i][1:] mflatname=outpath+createmasterflatname(flist, fkeys) ffiles=','.join(flist) saltcombine(ffiles, mflatname, method='median', reject='sigclip', mask=False, weight=False, blank=0, scale=None, statsec=None, lthresh=3, \ hthresh=3, clobber=False, logfile=logfile,verbose=verbose) #process the arc data arclist=filename[(ccdtype=='ARC') * (obsmode=='SPECTROSCOPY') * (masktype=='LONGSLIT')] for i, img in enumerate(infiles): nimg=os.path.basename(img) if nimg in arclist: #open the image struct=fits.open(img) simg=outpath+'bxgp'+os.path.basename(img) obsdate=os.path.basename(img)[1:9] #print the message if log: message='Processing ARC frame %s' % img log.message(message, with_stdout=verbose) struct=clean(struct, createvar=False, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) # write FITS file saltio.writefits(struct,simg, clobber=clobber) saltio.closefits(struct) #mosaic the images mimg=outpath+'mbxgp'+os.path.basename(img) saltmosaic(images=simg, outimages=mimg,outpref='',geomfile=geomfile, interp=interp,cleanup=True,clobber=clobber,logfile=logfile, verbose=verbose) #remove the intermediate steps saltio.delete(simg) #measure the arcdata arcimage=outpath+'mbxgp'+nimg dbfile=outpath+obsdate+'_specid.db' lamp = obsstruct.data.field('LAMPID')[i] lamp = lamp.replace(' ', '') lampfile = iraf.osfn("pysalt$data/linelists/%s.salt" % lamp) print arcimage, lampfile, os.getcwd() specidentify(arcimage, lampfile, dbfile, guesstype='rss', guessfile='', automethod='Matchlines', function='legendre', order=3, rstep=100, rstart='middlerow', mdiff=20, thresh=3, startext=0, niter=5, smooth=3, inter=False, clobber=True, logfile=logfile, verbose=verbose) try: ximg = outpath+'xmbxgp'+os.path.basename(arcimage) specrectify(images=arcimage, outimages=ximg, outpref='', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, conserve=True, nearest=True, clobber=True, logfile=logfile, verbose=verbose) except: pass #process the science data for i, img in enumerate(infiles): nimg=os.path.basename(img) if not (nimg in flatlist or nimg in biaslist or nimg in arclist): #open the image struct=fits.open(img) if struct[0].header['PROPID'].count('CAL_GAIN'): continue simg=outpath+'bxgp'+os.path.basename(img) #print the message if log: message='Processing science frame %s' % img log.message(message, with_stdout=verbose) #Check to see if it is RSS 2x2 and add bias subtraction instrume=saltkey.get('INSTRUME', struct[0]).strip() gainset = saltkey.get('GAINSET', struct[0]) rospeed = saltkey.get('ROSPEED', struct[0]) target = saltkey.get('OBJECT', struct[0]).strip() exptime = saltkey.get('EXPTIME', struct[0]) obsmode = saltkey.get('OBSMODE', struct[0]).strip() detmode = saltkey.get('DETMODE', struct[0]).strip() masktype = saltkey.get('MASKTYP', struct[0]).strip() xbin, ybin = saltkey.ccdbin( struct[0], img) obsdate=os.path.basename(img)[1:9] bstruct=None crtype=None thresh=5 mbox=11 bthresh=5.0, flux_ratio=0.2 bbox=25 gain=1.0 rdnoise=5.0 fthresh=5.0 bfactor=2 gbox=3 maxiter=5 subbias=False if instrume=='RSS' and gainset=='FAINT' and rospeed=='SLOW': bfile='P%sBiasNM%ix%iFASL.fits' % (obsdate, xbin, ybin) if os.path.exists(bfile): bstruct=fits.open(bfile) subbias=True if detmode=='Normal' and target!='ARC' and xbin < 5 and ybin < 5: crtype='edge' thresh=5 mbox=11 bthresh=5.0, flux_ratio=0.2 bbox=25 gain=1.0 rdnoise=5.0 fthresh=5.0 bfactor=2 gbox=3 maxiter=3 #process the image struct=clean(struct, createvar=True, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=subbias, bstruct=bstruct, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, crtype=crtype,thresh=thresh,mbox=mbox, bbox=bbox, \ bthresh=bthresh, flux_ratio=flux_ratio, gain=gain, rdnoise=rdnoise, bfactor=bfactor, fthresh=fthresh, gbox=gbox, maxiter=maxiter, log=log, verbose=verbose) #update the database updatedq(os.path.basename(img), struct, sdb) #write the file out # housekeeping keywords fname, hist=history(level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0],'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN',time.asctime(time.localtime()),'Images have been gain corrected',struct[0]) saltkey.new('SXTALK',time.asctime(time.localtime()),'Images have been xtalk corrected',struct[0]) saltkey.new('SBIAS',time.asctime(time.localtime()),'Images have been de-biased',struct[0]) # write FITS file saltio.writefits(struct,simg, clobber=clobber) saltio.closefits(struct) #mosaic the files--currently not in the proper format--will update when it is if not saltkey.fastmode(saltkey.get('DETMODE', struct[0])): mimg=outpath+'mbxgp'+os.path.basename(img) saltmosaic(images=simg, outimages=mimg,outpref='',geomfile=geomfile, interp=interp,fill=True, cleanup=True,clobber=clobber,logfile=logfile, verbose=verbose) #remove the intermediate steps saltio.delete(simg) #if the file is spectroscopic mode, apply the wavelength correction if obsmode == 'SPECTROSCOPY' and masktype.strip()=='LONGSLIT': dbfile=outpath+obsdate+'_specid.db' try: ximg = outpath+'xmbxgp'+os.path.basename(img) specrectify(images=mimg, outimages=ximg, outpref='', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, conserve=True, nearest=True, clobber=True, logfile=logfile, verbose=verbose) except Exception, e: log.message('%s' % e) #clean up the results if cleanup: #clean up the bias frames for i in masterbias_dict.keys(): blist=masterbias_dict[i][1:] for b in blist: saltio.delete(b) #clean up the flat frames for i in masterflat_dict.keys(): flist=masterflat_dict[i][1:] for f in flist: saltio.delete(f)
def flexure_rssspec(imagefits, fitslist, option=""): print str(datetime.now()) if option == "filesave": prefix = raw_input("\nFile prefix: ") pixel = 15. # pixel size in microns pix_scale = 0.125 sexparams = ["X_IMAGE","Y_IMAGE","FLUX_ISO","FLUX_MAX","FLAGS","CLASS_STAR", \ "X2WIN_IMAGE","Y2WIN_IMAGE","XYWIN_IMAGE","ERRX2WIN_IMAGE"] np.savetxt("qred_thrufoc.param", sexparams, fmt="%s") fmaxcol, flagcol, xvarcol, yvarcol, xerrcol = ( 3, 4, 6, 7, 9) # column nos (from 0) of data in sextractor imagestooclosefactor = 3.0 # too close if factor*sep < sqrt(var) gaptooclose = 1.25 # arcsec edgetooclose = 1.25 # arcsec rattolerance = 0.25 toofaint = 250. # FMAX counts galaxydelta = 0.4 # arcsec MOSimagelimit = 1. # arcsec deblend = .005 # default imagehdr = pyfits.getheader(imagefits) if imagehdr["GR-STATE"][1] == "4": print "First fits file " + imagefits + " is not image of mask" exit() flexposns = len(fitslist) obsdict = obslog(fitslist) image_f = [fitslist[fpos].split(".")[0][-12:] for fpos in range(flexposns)] dateobs = obsdict["DATE-OBS"][0].replace("-", "") if int(dateobs) > 20110928: rho_f = np.array(obsdict["TRKRHO"]).astype(float) else: rho_f = np.array(obsdict["TELRHO"]).astype(float) catpos = np.argmin(np.abs(rho_f)) cbin, rbin = np.array(obsdict["CCDSUM"][catpos].split(" ")).astype(int) maskid = obsdict["MASKID"][catpos].strip() filter = obsdict["FILTER"][catpos].strip() grating = obsdict["GRATING"][catpos].strip() rows, cols = pyfits.getdata(fitslist[catpos]).shape isspec = (obsdict["GR-STATE"][catpos][1] == "4") if not isspec: print "Use flexure_rssimage for image flexure analysis" exit() grang = float(obsdict["GRTILT"][catpos]) artic = float(obsdict["CAMANG"][catpos]) lamp = obsdict["LAMPID"][catpos].strip() print "\nMask: ", maskid print "Filter: ", filter print "Grating: ", grating print "Artic (deg): ", artic print "Gr Angle (deg): ", grang print "Lamp: ", lamp # map the mask spots _m using the imaging fits file sex_js = sextract(imagefits, deblend=deblend) flux_s = sex_js[2] fluxmedian = np.median(np.sort(flux_s)[-10:]) okm_s = (flux_s > fluxmedian / 10) # cull bogus spots maskholes = okm_s.sum() r_m = sex_js[1, okm_s] c_m = sex_js[0, okm_s] # find mask rows _R, tabulate histr_b, binr_b = np.histogram(r_m, bins=rows / 10, range=(0, rows)) bin0_R = np.where((histr_b[1:] > 0) & (histr_b[:-1] == 0))[0] bin1_R = np.where((histr_b[1:] == 0) & (histr_b[:-1] > 0))[0] maskRows = bin0_R.shape[0] bin_m = np.digitize(r_m, binr_b) - 1 R_m = np.array([np.where((bin_m[m] >= bin0_R) & (bin_m[m] <= bin1_R))[0][0] \ for m in range(maskholes)]) # find mask cols _C, tabulate histc_b, binc_b = np.histogram(c_m, bins=cols / 10, range=(0, cols)) bin0_C = np.where((histc_b[1:] > 0) & (histc_b[:-1] == 0))[0] bin1_C = np.where((histc_b[1:] == 0) & (histc_b[:-1] > 0))[0] maskCols = bin0_C.shape[0] bin_m = np.digitize(c_m, binc_b) - 1 C_m = np.array([np.where((bin_m[m] >= bin0_C) & (bin_m[m] <= bin1_C))[0][0] \ for m in range(maskholes)]) # identify mask center = optical axis if maskid == 'P000000N99': # symmetric mask Raxis = maskRows / 2 Caxis = maskCols / 2 elif maskid == 'P000000N03': # mask with centered cross Raxis = np.where((np.argmax(histr_b) >= bin0_R) & (np.argmax(histr_b) <= bin1_R))[0][0] Caxis = np.where((np.argmax(histc_b) >= bin0_C) & (np.argmax(histc_b) <= bin1_C))[0][0] else: print "Not a valid flexure mask" exit() maxis = np.where((R_m == Raxis) & (C_m == Caxis))[0][0] raxis = r_m[maxis] caxis = c_m[maxis] print "\nMask_Holes Rows Cols r axis c axis \n pixels pixels" print " %5i %5i %5i %8.1f %8.1f" % (maskholes, maskRows, maskCols, raxis * rbin, caxis * cbin) # np.savetxt(dateobs+'_'+"mask.txt",np.vstack((r_m,c_m,sex_js[2,okm_s],R_m)).T,fmt="%10.2f") # get linelist, predict spots in spectral image wavcent = rsslam(grating, grang, artic, 0., dateobs) specfile = datedfile(datadir + "spectrograph/spec_yyyymmdd.txt", dateobs) FCampoly = np.loadtxt(specfile, usecols=(1, ))[5:11] fcam = np.polyval(FCampoly, (wavcent / 1000. - 4.)) lampfile = iraf.osfn("pysalt$data/linelists/" + lamp + ".salt") wav_l, int_l = np.loadtxt(lampfile, unpack=True) maxdalpha = -np.degrees((cols / 2) * cbin * pixel / (1000. * fcam)) maxgamma = np.degrees((rows / 2) * rbin * pixel / (1000. * fcam)) maxwav = rsslam(grating, grang, artic, cols * cbin / 2, dateobs, -maxdalpha, 0) minwav = rsslam(grating, grang, artic, -cols * cbin / 2, dateobs, maxdalpha, maxgamma) ok_l = (wav_l >= minwav) & (wav_l <= maxwav) wav_l = wav_l[ok_l] int_l = int_l[ok_l] lines = wav_l.shape[0] col_ml = np.zeros((maskholes, lines)) dcol_c = np.arange(-(cols * cbin / 2), (cols * cbin / 2)) for m in range(maskholes): dalpha = -np.degrees((c_m[m] - caxis) * cbin * pixel / (1000. * fcam)) gamma = np.degrees((r_m[m] - raxis) * rbin * pixel / (1000. * fcam)) wav0, wav1 = rsslam(grating, grang, artic, dcol_c[[0, -1]], dateobs, dalpha, gamma=gamma) ok_l = ((wav_l > wav0) & (wav_l < wav1)) colwav = interp1d(rsslam(grating,grang,artic,dcol_c, \ dateobs,dalpha=dalpha,gamma=gamma), dcol_c) col_ml[m, ok_l] = colwav(wav_l[ok_l]) + caxis * cbin # np.savetxt(dateobs+"_col_ml.txt",np.vstack((R_m,C_m,col_ml.T)),fmt="%8.1f") # identify mask hole and wavelength for spots in spec image closest to rho=0 os.remove("sexwt.fits") sex_js = sextract(fitslist[catpos], "", deblend=deblend) r_s = sex_js[1] c_s = sex_js[0] flux_s = sex_js[2] spots = r_s.shape[0] fluxmedian = np.median(np.sort(sex_js[2])[-10:]) ok_s = (flux_s > fluxmedian / 30) # cull bogus spots # find spectral bin rows RR in candidates R0, cull non-spectra histr_b, binr_b = np.histogram(r_s[ok_s], bins=rows / 10, range=(0, rows)) histr_b[[0, -1]] = 0 bin0_R0 = np.where((histr_b[1:] > 0) & (histr_b[:-1] == 0))[0] + 1 bin1_R0 = np.where((histr_b[1:] == 0) & (histr_b[:-1] > 0))[0] bin_s = np.digitize(r_s, binr_b) - 1 maxcount_R0 = np.array([(histr_b[bin0_R0[R0]:bin1_R0[R0]+1]).max() \ for R0 in range(bin0_R0.shape[0])]) ok_R0 = (maxcount_R0 > 3) specrows = ok_R0.sum() # cull down to spectra RR bin0_RR = bin0_R0[ok_R0] bin1_RR = bin1_R0[ok_R0] ok_s &= ((bin_s >= bin0_RR[:, None]) & (bin_s <= bin1_RR[:, None])).any(axis=0) RR_s = -np.ones(spots) r_RR = np.zeros(specrows) for RR in range(specrows): isRR_s = ok_s & np.in1d(bin_s, np.arange(bin0_RR[RR], bin1_RR[RR] + 1)) RR_s[isRR_s] = RR r_RR[RR] = r_s[isRR_s].mean() count_RR = (RR_s[:, None] == range(specrows)).sum(axis=0) if maskid == 'P000000N99': RRaxis = np.argmin((raxis - r_RR)**2) elif maskid == 'P000000N03': RRaxis = np.argmax(count_RR) # cull weak lines ptile = 100. * min( 1., 5. * maskCols / count_RR.max()) # want like 5 brightest lines for RR in range(specrows): isRR_s = ok_s & np.in1d(bin_s, np.arange(bin0_RR[RR], bin1_RR[RR] + 1)) fluxmin = np.percentile(sex_js[2, isRR_s], 100. - ptile) ok_s[isRR_s] &= (sex_js[2, isRR_s] > fluxmin) # identify with mask rows R (assuming no gaps) RR_m = R_m + RRaxis - Raxis # find approximate grating shift in dispersion direction by looking for most common id error histc_b = np.zeros(60) for RR in range(specrows): isRR_s = ((RR_s == RR) & ok_s) cerr_MS = (c_s[None, isRR_s] - col_ml[RR_m == RR].ravel()[:, None]) histc_b += np.histogram(cerr_MS.ravel(), bins=60, range=(-150, 150))[0] cshift = 5 * np.argmax(histc_b) - 150 col_ml += cshift # identify wavelength and mask column with spots in each spectrum isfound_s = np.zeros((spots), dtype=bool) bintol = 16 / cbin # 2 arcsec tolerance for line ID R_s = -np.ones(spots, dtype=int) C_s = -np.ones(spots, dtype=int) l_s = -np.ones(spots, dtype=int) m_s = -np.ones(spots, dtype=int) cerr_s = np.zeros(spots) rmscol = 0. for RR in range(specrows): # _S spot in spectrum, _P (mask column, line) isRR_m = (RR_m == RR) isRR_s = ((RR_s == RR) & ok_s) cerr_PS = (c_s[None, isRR_s] - col_ml[isRR_m].ravel()[:, None]) Spots = isRR_s.sum() Possibles = col_ml[isRR_m].size Cols = Possibles / lines P_S = np.argmin(np.abs(cerr_PS), axis=0) cerr_S = cerr_PS[P_S, range(isRR_s.sum())] isfound_S = (np.abs(cerr_S) < bintol) M_P, l_P = np.unravel_index(np.arange(Possibles), (Cols, lines)) m_P = np.where(isRR_m)[0][M_P] m_S = m_P[P_S] C_P = C_m[m_P] C_S = C_P[P_S] l_S = l_P[P_S] s_S = np.where(isRR_s)[0] R_s[isRR_s] = RR + Raxis - RRaxis cerr_s[s_S] = cerr_S C_s[s_S[isfound_S]] = C_S[isfound_S] l_s[s_S[isfound_S]] = l_S[isfound_S] m_s[s_S[isfound_S]] = m_S[isfound_S] isfound_s[s_S] |= isfound_S rmscol += (cerr_S[isfound_S]**2).sum() # cull wavelengths to _L with < 1/2 Mask Rows or Cols ok_s &= isfound_s ok_l = np.zeros((lines), dtype=bool) for line in range(lines): lRows = np.unique(R_s[l_s == line]).shape[0] lCols = np.unique(C_s[l_s == line]).shape[0] ok_l[line] = ((lRows >= maskRows / 2) & (lCols >= maskCols / 2)) l_L = np.where(ok_l)[0] wav_L = wav_l[l_L] Lines = l_L.shape[0] ok_s &= np.in1d(l_s, l_L) # tabulate good catalog spots (final _S) s_S = np.where(ok_s)[0] r_S = r_s[s_S] c_S = c_s[s_S] cerr_S = cerr_s[s_S] R_S = R_s[s_S] C_S = C_s[s_S] l_S = l_s[s_S] Spots = ok_s.sum() rshift = r_S[R_S == Raxis].mean() - raxis cshift += (c_S - col_ml[m_s[s_S], l_S]).mean() rmscol = np.sqrt(rmscol / Spots) np.savetxt("cat_S.txt",np.vstack((s_S,r_S,c_S,R_S,C_S,l_S,cerr_S)).T, \ fmt="%5i %8.2f %8.2f %5i %5i %5i %8.2f") print "\nSpec_Spots Lines rshift cshift rms\n pixels pixels pixels" print " %5i %5i %8.1f %8.1f %8.1f" % (Spots, np.unique(l_S).shape[0], rshift, cshift, rmscol) print "\nLineno Wavel spots Rows Cols" for L in range(Lines): line = l_L[L] lRows = np.unique(R_S[l_S == line]).shape[0] lCols = np.unique(C_S[l_S == line]).shape[0] lspots = (l_S == line).sum() print " %5i %8.2f %5i %5i %5i" % (line, wav_l[line], lspots, lRows, lCols) sexcols = sex_js.shape[0] sexdata_jfS = np.zeros((sexcols, flexposns, Spots)) sexdata_jfS[:, catpos] = sex_js[:, ok_s] xcenter_L = col_ml[maxis, l_L] ycenter = raxis + rshift if option == "filesave": np.savetxt(prefix+"Spots.txt",sexdata_jfS[:,catpos].T, \ fmt=2*"%9.2f "+"%9.0f "+"%9.1f "+"%4i "+"%6.2f "+3*"%7.2f "+"%11.3e") # find spots in flexure series, in order of increasing abs(rho), and store sextractor output row_fLd = np.zeros((flexposns, Lines, 2)) col_fLd = np.zeros((flexposns, Lines, 2)) print "\n fits rho line spots rshift cshift rslope cslope rmserr " print " deg Ang arcsec arcsec arcmin arcmin bins" for dirn in (1, -1): refpos = catpos posdirlist = np.argsort(dirn * rho_f) poslist = posdirlist[dirn * rho_f[posdirlist] > rho_f[refpos]] for fpos in poslist: col_S, row_S = sexdata_jfS[0:2, refpos, :] sex_js = sextract(fitslist[fpos], "sexwt.fits", deblend=deblend) binsqerr_sS = (sex_js[1, :, None] - row_S[None, :])**2 + ( sex_js[0, :, None] - col_S[None, :])**2 S_s = np.argmin(binsqerr_sS, axis=1) # First compute image shift by averaging small errors rowerr_s = sex_js[1] - row_S[S_s] colerr_s = sex_js[0] - col_S[S_s] hist_r, bin_r = np.histogram(rowerr_s, bins=32, range=(-2 * bintol, 2 * bintol)) drow = rowerr_s[(rowerr_s > bin_r[np.argmax(hist_r)]-bintol) & \ (rowerr_s < bin_r[np.argmax(hist_r)]+bintol)].mean() hist_c, bin_c = np.histogram(colerr_s, bins=32, range=(-2 * bintol, 2 * bintol)) dcol = colerr_s[(colerr_s > bin_c[np.argmax(hist_c)]-bintol) & \ (colerr_s < bin_c[np.argmax(hist_c)]+bintol)].mean() # Now refind the closest ID binsqerr_sS = (sex_js[1,:,None] - row_S[None,:] -drow)**2 + \ (sex_js[0,:,None] - col_S[None,:] -dcol)**2 binsqerr_s = binsqerr_sS.min(axis=1) isfound_s = binsqerr_s < bintol**2 S_s = np.argmin(binsqerr_sS, axis=1) isfound_s &= (binsqerr_s == binsqerr_sS[:, S_s].min(axis=0)) isfound_S = np.array([S in S_s[isfound_s] for S in range(Spots)]) sexdata_jfS[:, fpos, S_s[isfound_s]] = sex_js[:, isfound_s] drow_S = sexdata_jfS[1, fpos] - sexdata_jfS[1, catpos] dcol_S = sexdata_jfS[0, fpos] - sexdata_jfS[0, catpos] # np.savetxt("motion_"+str(fpos)+".txt",np.vstack((isfound_S,l_S,drow_S,dcol_S)).T,fmt="%3i %3i %8.2f %8.2f") # Compute flexure image motion parameters for each line for L in range(Lines): ok_S = ((l_S == l_L[L]) & isfound_S) row_fLd[fpos,L],rowchi,d,d,d = \ np.polyfit(sexdata_jfS[0,catpos,ok_S]-xcenter_L[L],drow_S[ok_S],deg=1,full=True) col_fLd[fpos,L],colchi,d,d,d = \ np.polyfit(sexdata_jfS[1,catpos,ok_S]-ycenter,dcol_S[ok_S],deg=1,full=True) rms = np.sqrt((rowchi + colchi) / (2 * ok_S.sum())) print ("%12s %5.0f %5i %5i "+5*"%7.2f ") % (image_f[fpos], rho_f[fpos], wav_L[L], \ ok_S.sum(),row_fLd[fpos,L,1]*rbin*pix_scale, col_fLd[fpos,L,1]*cbin*pix_scale, \ 60.*np.degrees(row_fLd[fpos,L,0]),-60.*np.degrees(col_fLd[fpos,L,0]), rms) if option == "filesave": np.savetxt(prefix+"flex_"+str(fpos)+".txt",np.vstack((isfound_S,drow_S,dcol_S)).T, \ fmt = "%2i %8.3f %8.3f") np.savetxt(prefix + "sextr_" + str(fpos) + ".txt", sexdata_jfS[:, fpos].T) print # make plots fig, plot_s = plt.subplots(2, 1, sharex=True) plt.xlabel('Rho (deg)') plt.xlim(-120, 120) plt.xticks(range(-120, 120, 30)) fig.set_size_inches((8.5, 11)) fig.subplots_adjust(left=0.175) plot_s[0].set_title( str(dateobs) + [" Imaging", " Spectral"][isspec] + " Flexure") plot_s[0].set_ylabel('Mean Position (arcsec)') plot_s[0].set_ylim(-0.5, 4.) plot_s[1].set_ylabel('Rotation (arcmin ccw)') plot_s[1].set_ylim(-10., 6.) lbl_L = [("%5.0f") % (wav_L[L]) for L in range(Lines)] color_L = 'bgrcmykw' for L in range(Lines): plot_s[0].plot(rho_f,row_fLd[:,L,1]*rbin*pix_scale, \ color=color_L[L],marker='D',markersize=8,label='row '+lbl_L[L]) plot_s[1].plot(rho_f, 60. * np.degrees(row_fLd[:, L, 0]), color=color_L[L], marker='D', markersize=8, label='row ' + lbl_L[L]) collbl = 'col' + lbl_L[0] for L in range(Lines): plot_s[0].plot(rho_f,col_fLd[:,L,1]*cbin*pix_scale, \ color=color_L[L],marker='s',markersize=8,label=collbl) plot_s[1].plot(rho_f,-60.*np.degrees(col_fLd[:,L,0]), \ color=color_L[L],marker='s',markersize=8,label=collbl) collbl = '' plot_s[0].legend(fontsize='medium', loc='upper center') plotfile = str(dateobs) + ['_imflex.pdf', '_grflex.pdf'][isspec] plt.savefig(plotfile, orientation='portrait') if os.name == 'posix': if os.popen('ps -C evince -f').read().count(plotfile) == 0: os.system('evince ' + plotfile + ' &') os.remove("out.txt") os.remove("qred_thrufoc.param") os.remove("sexwt.fits") return
def specred(infile_list, target, propcode, calfile=None, inter=True, automethod='Matchlines'): #set up the files infiles = ','.join(['%s' % x for x in infile_list]) obsdate = os.path.basename(infile_list[0])[7:15] #set up some files that will be needed logfile = 'spec' + obsdate + '.log' dbfile = 'spec%s.db' % obsdate #create the observation log obs_dict = obslog(infile_list) for i in range(len(infile_list)): if obs_dict['OBJECT'][i].upper().strip( ) == 'ARC' and obs_dict['PROPID'][i].upper().strip() == propcode: lamp = obs_dict['LAMPID'][i].strip().replace(' ', '') arcimage = os.path.basename(infile_list[i]) if lamp == 'NONE': lamp = 'CuAr' lampfile = iraf.osfn("pysalt$data/linelists/%s.salt" % lamp) specidentify(arcimage, lampfile, dbfile, guesstype='rss', guessfile='', automethod=automethod, function='legendre', order=3, rstep=100, rstart='middlerow', mdiff=20, thresh=3, niter=5, smooth=3, inter=False, clobber=True, logfile=logfile, verbose=True) specrectify(arcimage, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) objimages = '' spec_list = [] for i in range(len(infile_list)): if obs_dict['CCDTYPE'][i].count('OBJECT') and obs_dict['INSTRUME'][ i].count('RSS') and obs_dict['PROPID'][i].upper().strip( ) == propcode: img = infile_list[i] ##rectify it specrectify(img, outimages='', outpref='x', solfile=dbfile, caltype='line', function='legendre', order=3, inttype='interp', w1=None, w2=None, dw=None, nw=None, blank=0.0, clobber=True, logfile=logfile, verbose=True) #extract the spectra spec_list.append( extract_spectra('x' + img, yc=1030, calfile=calfile, findobject=True, smooth=False, maskzeros=True, clobber=True)) #combine the results w, f, e = speccombine(spec_list, obsdate) outfile = "%s_%s.spec" % (target, obsdate) write_spectra(outfile, w, f, e)
def list_configurations(infilelist, log): """Produce a list of files of similar configurations Parameters ---------- infilelist: str list of input files log: ~logging Logging object. Returns ------- iarc_a: list list of indices for arc images iarc_i: list of indices for images imageno_i: list of image numbers """ # set up the observing dictionary obs_dict = obslog(infilelist) # hack to remove potentially bad data for i in reversed(range(len(infilelist))): if int(obs_dict['BS-STATE'][i][1]) != 2: del infilelist[i] obs_dict = obslog(infilelist) # inserted to take care of older observations old_data = False for date in obs_dict['DATE-OBS']: if int(date[0:4]) < 2015: old_data = True if old_data: log.message("Configuration map for old data", with_header=False) iarc_a, iarc_i, confno_i, confdatlist = list_configurations_old( infilelist, log) arcs = len(iarc_a) config_dict = {} for i in set(confno_i): image_dict = {} image_dict['arc'] = [infilelist[iarc_a[i]]] ilist = [infilelist[x] for x in np.where(iarc_i == iarc_a[i])[0]] ilist.remove(image_dict['arc'][0]) image_dict['object'] = ilist config_dict[confdatlist[i]] = image_dict return config_dict # delete bad columns obs_dict = obslog(infilelist) for k in obs_dict.keys(): if len(obs_dict[k]) == 0: del obs_dict[k] obs_tab = Table(obs_dict) # create the configurations list config_dict = {} confdatlist = configmap(obs_tab, config_list=('GRATING', 'GR-ANGLE', 'CAMANG', 'BVISITID')) infilelist = np.array(infilelist) for grating, grtilt, camang, blockvisit in confdatlist: image_dict = {} #things with the same configuration mask = ((obs_tab['GRATING'] == grating) * (obs_tab['GR-ANGLE'] == grtilt) * (obs_tab['CAMANG'] == camang) * (obs_tab['BVISITID'] == blockvisit)) objtype = obs_tab[ 'CCDTYPE'] # kn changed from OBJECT: CCDTYPE lists ARC consistently image_dict['arc'] = infilelist[mask * (objtype == 'ARC')] # if no arc for this config look for a similar one with different BVISITID if len(image_dict['arc']) == 0: othermask = ((obs_tab['GRATING']==grating) * \ ((obs_tab['GR-ANGLE'] - grtilt) < .03) * ((obs_tab['GR-ANGLE'] - grtilt) > -.03) * \ ((obs_tab['CAMANG'] - camang) < .05) * ((obs_tab['CAMANG'] - camang) > -.05) * \ (obs_tab['BVISITID'] != blockvisit)) image_dict['arc'] = infilelist[othermask * (objtype == 'ARC')] if len(image_dict['arc']) > 0: log.message("Warning: using arc from different BLOCKID", with_header=False) image_dict['flat'] = infilelist[mask * (objtype == 'FLAT')] image_dict['object'] = infilelist[mask * (objtype != 'ARC') * (objtype != 'FLAT')] config_dict[(grating, grtilt, camang)] = image_dict return config_dict
def specpolfinalstokes(infilelist,logfile='salt.log',debug=False, \ HW_Cal_override=False,Linear_PolZeropoint_override=False,PAZeropoint_override=False): """Combine the raw stokes and apply the polarimetric calibrations Parameters ---------- infilelist: list List of filenames that include an extracted spectrum logfile: str Name of file for logging """ """ _l: line in calibration file _i: index in file list _j: rawstokes = waveplate position pair index (enumeration within config, including repeats) _J: cycle number idx (0,1,..) for each rawstokes _k: combstokes = waveplate position pair index (enumeration within config, repeats combined) _K: pair = waveplate position pair index (enumeration within obs) _p: pair = waveplate position pair # (eg 0,1,2,3 = 0 4 1 5 2 6 3 7 for LINEAR-HI, sorted in h0 order) _s: normalized linear stokes for zeropoint correction (0,1) = (q,u) _S: unnormalized raw stokes within waveplate position pair: (eg 0,1 = I,Q) _F: unnormalized final stokes (eg 0,1,2 = I,Q,U) """ calhistorylist = ["PolCal Model: 20170429",] patternlist = open(datadir+'wppaterns.txt','r').readlines() patternpairs = dict(); patternstokes = dict(); patterndict = dict() for p in patternlist: if p.split()[0] == '#': continue patterndict[p.split()[0]]=np.array(p.split()[3:]).astype(int).reshape((-1,2)) patternpairs[p.split()[0]]=(len(p.split())-3)/2 patternstokes[p.split()[0]]=int(p.split()[1]) if len(glob.glob('specpol*.log')): logfile=glob.glob('specpol*.log')[0] with logging(logfile, debug) as log: log.message('specpolfinalstokes version: 20171226', with_header=False) # organize data using names. # allrawlist = infileidx,object,config,wvplt,cycle for each infile. obsdict=obslog(infilelist) files = len(infilelist) allrawlist = [] for i in range(files): object,config,wvplt,cycle = os.path.basename(infilelist[i]).rsplit('.',1)[0].rsplit('_',3) if (config[0]!='c')|(wvplt[0]!='h')|(not cycle.isdigit()): log.message('File '+infilelist[i]+' is not a raw stokes file.' , with_header=False) continue allrawlist.append([i,object,config,wvplt,cycle]) configlist = sorted(list(set(ele[2] for ele in allrawlist))) # unique configs if debug: print "allrawlist: ",allrawlist print "configlist: ",configlist # input correct HWCal and TelZeropoint calibration files dateobs = obsdict['DATE-OBS'][0].replace('-','') HWCalibrationfile = datedfile(datadir+"RSSpol_HW_Calibration_yyyymmdd_vnn.txt",dateobs) hwav_l,heff_l,hpa_l = np.loadtxt(HWCalibrationfile,dtype=float,unpack=True,usecols=(0,1,2),ndmin=2) TelZeropointfile = datedfile(datadir+"RSSpol_Linear_TelZeropoint_yyyymmdd_vnn.txt",dateobs) twav_l,tq0_l,tu0_l,err_l = np.loadtxt(TelZeropointfile,dtype=float,unpack=True,ndmin=2) # input PAZeropoint file and get correct entry dpadatever,dpa = datedline(datadir+"RSSpol_Linear_PAZeropoint.txt",dateobs).split() dpa = float(dpa) # prepare calibration keyword documentation pacaltype = "Equatorial" if HW_Cal_override: Linear_PolZeropoint_override=True PAZeropoint_override=True pacaltype = "Instrumental" calhistorylist.append("HWCal: Uncalibrated") elif Linear_PolZeropoint_override: PAZeropoint_override=True calhistorylist.extend(["HWCal: "+os.path.basename(HWCalibrationfile),"PolZeropoint: Null"]) elif PAZeropoint_override: calhistorylist.extend(["HWCal: "+os.path.basename(HWCalibrationfile), \ "PolZeropoint: "+os.path.basename(TelZeropointfile), "PAZeropoint: Null"]) else: calhistorylist.extend(["HWCal: "+os.path.basename(HWCalibrationfile), \ "PolZeropoint: "+os.path.basename(TelZeropointfile), \ "PAZeropoint: RSSpol_Linear_PAZeropoint.txt "+str(dpadatever)+" "+str(dpa)]) log.message(' PA type: '+pacaltype, with_header=False) if len(calhistorylist): log.message(' '+'\n '.join(calhistorylist), with_header=False) chifence_d = 2.2*np.array([6.43,4.08,3.31,2.91,2.65,2.49,2.35,2.25]) # *q3 for upper outer fence outlier for each dof # do one config at a time. # rawlist = infileidx,object,config,wvplt,cycle for each infile *in this config*. # rawlist is sorted with cycle varying fastest # rawstokes = len(rawlist). j is idx in rawlist. for conf in configlist: log.message("\nConfiguration: %s" % conf, with_header=False) rawlist = [entry for entry in allrawlist if entry[2]==conf] for col in (4,3,1,2): rawlist = sorted(rawlist,key=operator.itemgetter(col)) rawstokes = len(rawlist) # rawlist is sorted with cycle varying fastest wav0 = pyfits.getheader(infilelist[rawlist[0][0]],'SCI')['CRVAL1'] dwav = pyfits.getheader(infilelist[rawlist[0][0]],'SCI')['CDELT1'] wavs = pyfits.getheader(infilelist[rawlist[0][0]],'SCI')['NAXIS1'] wav_w = wav0 + dwav*np.arange(wavs) # interpolate HW, telZeropoint calibration wavelength dependence for this config okcal_w = np.ones(wavs).astype(bool) if not HW_Cal_override: heff_w = interp1d(hwav_l,heff_l,kind='cubic',bounds_error=False)(wav_w) hpar_w = -interp1d(hwav_l,hpa_l,kind='cubic',bounds_error=False)(wav_w) okcal_w &= ~np.isnan(heff_w) hpar_w[~okcal_w] = 0. if not Linear_PolZeropoint_override: tel0_sw = interp1d(twav_l,np.array([tq0_l,tu0_l]),kind='cubic',bounds_error=False)(wav_w) okcal_w &= ~np.isnan(tel0_sw[0]) tel0_sw /= 100. # table is in % # get spectrograph calibration file, spectrograph coordinates grating = pyfits.getheader(infilelist[rawlist[0][0]])['GRATING'] grang = pyfits.getheader(infilelist[rawlist[0][0]])['GR-ANGLE'] artic = pyfits.getheader(infilelist[rawlist[0][0]])['AR-ANGLE'] SpecZeropointfile = datedfile(datadir+ "RSSpol_Linear_SpecZeropoint_"+grating+"_yyyymmdd_vnn.txt",dateobs) if len(SpecZeropointfile): calhistorylist.append(SpecZeropointfile) # get all rawstokes data # comblist = last rawlistidx,object,config,wvplt,cycles,wppat # one entry for each set of cycles that needs to be combined (i.e, one for each wvplt) stokes_jSw = np.zeros((rawstokes,2,wavs)) var_jSw = np.zeros_like(stokes_jSw) covar_jSw = np.zeros_like(stokes_jSw) bpm_jSw = np.zeros_like(stokes_jSw).astype(int) telpa_j = np.zeros(rawstokes) comblist = [] for j in range(rawstokes): i,object,config,wvplt,cycle = rawlist[j] lampid = pyfits.getheader(infilelist[i],0)['LAMPID'].strip().upper() telpa_j[j] = float(pyfits.getheader(infilelist[i],0)['TELPA']) if lampid != "NONE": pacaltype ="Instrumental" if j==0: cycles = 1 # if object,config,wvplt changes, start a new comblist entry else: if rawlist[j-1][1:4] != rawlist[j][1:4]: cycles = 1 else: cycles += 1 wppat = pyfits.getheader(infilelist[i])['WPPATERN'].upper() stokes_jSw[j] = pyfits.open(infilelist[i])['SCI'].data.reshape((2,-1)) var_jSw[j] = pyfits.open(infilelist[i])['VAR'].data.reshape((2,-1)) covar_jSw[j] = pyfits.open(infilelist[i])['COV'].data.reshape((2,-1)) bpm_jSw[j] = pyfits.open(infilelist[i])['BPM'].data.reshape((2,-1)) # apply telescope zeropoint calibration, q rotated to raw coordinates if not Linear_PolZeropoint_override: trkrho = pyfits.getheader(infilelist[i])['TRKRHO'] dpatelraw_w = -(22.5*float(wvplt[1]) + hpar_w + trkrho + dpa) rawtel0_sw = \ specpolrotate(tel0_sw,0,0,dpatelraw_w,normalized=True)[0] rawtel0_sw[:,okcal_w] *= heff_w[okcal_w] stokes_jSw[j,1,okcal_w] -= stokes_jSw[j,0,okcal_w]*rawtel0_sw[0,okcal_w] if cycles==1: comblist.append((j,object,config,wvplt,1,wppat,pacaltype)) else: comblist[-1] = (j,object,config,wvplt,cycles,wppat,pacaltype) # combine multiple cycles as necessary. Absolute stokes is on a per cycle basis. # polarimetric combination on normalized stokes basis # to avoid coupling mean syserr into polarimetric spectral features combstokess = len(comblist) stokes_kSw = np.zeros((combstokess,2,wavs)) var_kSw = np.zeros_like(stokes_kSw) covar_kSw = np.zeros_like(stokes_kSw) cycles_kw = np.zeros((combstokess,wavs)).astype(int) chi2cycle_kw = np.zeros((combstokess,wavs)) badcyclechi_kw = np.zeros((combstokess,wavs),dtype=bool) havecyclechi_k = np.zeros(combstokess,dtype=bool) # obslist = first comblist idx,object,config,wppat,pairs # k = idx in comblist obslist = [] jlistk = [] # list of rawstokes idx for each comblist entry Jlistk = [] # list of cycle number for each comblist entry obsobject = '' obsconfig = '' chi2cycle_j = np.zeros(rawstokes) syserrcycle_j = np.zeros(rawstokes) iscull_jw = np.zeros((rawstokes,wavs),dtype=bool) stokes_kSw = np.zeros((combstokess,2,wavs)) var_kSw = np.zeros_like(stokes_kSw) nstokes_kw = np.zeros((combstokess,wavs)) nvar_kw = np.zeros_like(nstokes_kw) ncovar_kw = np.zeros_like(nstokes_kw) chi2cyclenet_k = np.zeros(combstokess) syserrcyclenet_k = np.zeros(combstokess) for k in range(combstokess): j,object,config,wvplt,cycles,wppat,pacaltype = comblist[k] jlistk.append(range(j-cycles+1,j+1)) Jlistk.append([int(rawlist[jj][4])-1 for jj in range(j-cycles+1,j+1)]) # J = cycle-1, counting from 0 nstokes_Jw = np.zeros((cycles,wavs)) nvar_Jw = np.zeros((cycles,wavs)) ncovar_Jw = np.zeros((cycles,wavs)) bpm_Jw = np.zeros((cycles,wavs)) ok_Jw = np.zeros((cycles,wavs),dtype=bool) for J,j in enumerate(jlistk[k]): bpm_Jw[J] = bpm_jSw[j,0] ok_Jw[J] = (bpm_Jw[J] ==0) nstokes_Jw[J][ok_Jw[J]] = stokes_jSw[j,1][ok_Jw[J]]/stokes_jSw[j,0][ok_Jw[J]] nvar_Jw[J][ok_Jw[J]] = var_jSw[j,1][ok_Jw[J]]/(stokes_jSw[j,0][ok_Jw[J]])**2 ncovar_Jw[J][ok_Jw[J]] = covar_jSw[j,1][ok_Jw[J]]/(stokes_jSw[j,0][ok_Jw[J]])**2 # Culling: for multiple cycles, compare each cycle with every other cycle (dof=1). # bad wavelengths flagged for P < .02% (1/2000): chisq > 13.8 (chi2.isf(q=.0002,df=1)) # for cycles>2, vote to cull specific pair/wavelength, otherwise cull wavelength cycles_kw[k] = (1-bpm_Jw).sum(axis=0).astype(int) okchi_w = (cycles_kw[k] > 1) chi2lim = 13.8 havecyclechi_k[k] = okchi_w.any() if cycles > 1: ok_Jw[J] = okchi_w & (bpm_Jw[J] ==0) chi2cycle_JJw = np.zeros((cycles,cycles,wavs)) badcyclechi_JJw = np.zeros((cycles,cycles,wavs)) ok_JJw = ok_Jw[:,None,:] & ok_Jw[None,:,:] nstokes_JJw = nstokes_Jw[:,None] - nstokes_Jw[None,:] nvar_JJw = nvar_Jw[:,None] + nvar_Jw[None,:] chi2cycle_JJw[ok_JJw] = nstokes_JJw[ok_JJw]**2/nvar_JJw[ok_JJw] triuidx = np.triu_indices(cycles,1) # _i enumeration of cycle differences chi2cycle_iw = chi2cycle_JJw[triuidx] badcyclechi_w = (chi2cycle_iw > chi2lim).any(axis=(0)) badcyclechiall_w = (badcyclechi_w & (ok_JJw[triuidx].reshape((-1,wavs)).sum(axis=0)<3)) badcyclechicull_w = (badcyclechi_w & np.logical_not(badcyclechiall_w)) wavcull_W = np.where(badcyclechicull_w)[0] # cycles>2, cull by voting if wavcull_W.shape[0]: for W,w in enumerate(wavcull_W): J_I = np.array(triuidx).T[np.argsort(chi2cycle_iw[:,w])].flatten() _,idx = np.unique(J_I,return_index=True) Jcull = J_I[np.sort(idx)][-1] jcull = jlistk[k][Jcull] iscull_jw[jcull,w] = True # for reporting bpm_jSw[jcull,:,w] = 1 else: for j in jlistk[k]: iscull_jw[j] = badcyclechiall_w # for reporting bpm_jSw[j][:,badcyclechiall_w] = 1 for J,j in enumerate(jlistk[k]): bpm_Jw[J] = bpm_jSw[j,0] if debug: obsname = object+"_"+config ok_Jw = okchi_w[None,:] & (bpm_Jw ==0) np.savetxt(obsname+"_nstokes_Jw_"+str(k)+".txt",np.vstack((wav_w,ok_Jw.astype(int), \ nstokes_Jw,nvar_Jw)).T, fmt="%8.2f "+cycles*"%3i "+cycles*"%10.6f "+cycles*"%10.12f ") np.savetxt(obsname+"_chi2cycle_iw_"+str(k)+".txt",np.vstack((wav_w,okchi_w.astype(int), \ chi2cycle_iw.reshape((-1,wavs)),badcyclechi_w,ok_JJw[triuidx].reshape((-1,wavs)).sum(axis=0))).T, \ fmt="%8.2f %3i "+chi2cycle_iw.shape[0]*"%10.7f "+" %2i %2i") np.savetxt(obsname+"_Jcull_kw_"+str(k)+".txt",np.vstack((wav_w,okchi_w.astype(int), \ iscull_jw[jlistk[k]].astype(int).reshape((-1,wavs)))).T, fmt="%8.2f %3i "+cycles*" %3i") if ((object != obsobject) | (config != obsconfig)): obslist.append([k,object,config,wppat,1,pacaltype]) obsobject = object; obsconfig = config else: obslist[-1][4] +=1 # Now combine cycles, using normalized stokes to minimize systematic errors # first normalize cycle members J at wavelengths where all cycles have data: cycles_kw[k] = (1-bpm_Jw).sum(axis=0).astype(int) ok_w = (cycles_kw[k] > 0) okall_w = (cycles_kw[k] == cycles) normint_J = np.array(stokes_jSw[jlistk[k],0][:,okall_w].sum(axis=1)) normint_J /= np.mean(normint_J) stokes_JSw = stokes_jSw[jlistk[k]]/normint_J[:,None,None] var_JSw = var_jSw[jlistk[k]]/normint_J[:,None,None]**2 covar_JSw = covar_jSw[jlistk[k]]/normint_J[:,None,None]**2 for J in range(cycles): okJ_w = ok_w & (bpm_Jw[J] ==0) # average the intensity stokes_kSw[k,0,okJ_w] += stokes_JSw[J,0,okJ_w]/cycles_kw[k][okJ_w] var_kSw[k,0,okJ_w] += var_JSw[J,0,okJ_w]/cycles_kw[k][okJ_w]**2 covar_kSw[k,0,okJ_w] += covar_JSw[J,0,okJ_w]/cycles_kw[k][okJ_w]**2 # now the normalized stokes nstokes_kw[k][okJ_w] += (stokes_JSw[J,1][okJ_w]/stokes_JSw[J,0][okJ_w])/cycles_kw[k][okJ_w] nvar_kw[k][okJ_w] += (var_JSw[J,1][okJ_w]/stokes_JSw[J,0][okJ_w]**2)/cycles_kw[k][okJ_w]**2 ncovar_kw[k][okJ_w] += (covar_JSw[J,1][okJ_w]/stokes_JSw[J,0][okJ_w]**2)/cycles_kw[k][okJ_w]**2 stokes_kSw[k,1] = nstokes_kw[k]*stokes_kSw[k,0] var_kSw[k,1] = nvar_kw[k]*stokes_kSw[k,0]**2 covar_kSw[k,1] = ncovar_kw[k]*stokes_kSw[k,0]**2 if debug: obsname = object+"_"+config np.savetxt(obsname+"_stokes_kSw_"+str(k)+".txt",np.vstack((wav_w,ok_w.astype(int), \ stokes_kSw[k])).T, fmt="%8.2f %3i "+2*"%12.3f ") # compute mean chisq for each pair having multiple cycles if cycles > 1: nstokeserr_Jw = np.zeros((cycles,wavs)) nerr_Jw = np.zeros((cycles,wavs)) for J in range(cycles): okJ_w = ok_w & (bpm_Jw[J] ==0) nstokes_Jw[J][okJ_w] = stokes_JSw[J,1][okJ_w]/stokes_JSw[J,0][okJ_w] nvar_Jw[J][okJ_w] = var_JSw[J,1][okJ_w]/(stokes_JSw[J,0][okJ_w])**2 nstokeserr_Jw[J] = (nstokes_Jw[J] - nstokes_kw[k]) nvar_w = nvar_Jw[J] - nvar_kw[k] okall_w &= (nvar_w > 0.) nerr_Jw[J,okall_w] = np.sqrt(nvar_w[okall_w]) if (okall_w.sum()==0): print "Bad data in one of the cycles for wp pair ",comblist[k][3] exit() nstokessyserr_J = np.average(nstokeserr_Jw[:,okall_w],weights=1./nerr_Jw[:,okall_w],axis=1) nstokeserr_Jw -= nstokessyserr_J[:,None] for J,j in enumerate(jlistk[k]): loc,scale = norm.fit(nstokeserr_Jw[J,okall_w]/nerr_Jw[J,okall_w]) chi2cycle_j[j] = scale**2 syserrcycle_j[j] = nstokessyserr_J[J] chi2cyclenet_k[k] = chi2cycle_j[jlistk[k]].mean() syserrcyclenet_k[k] = np.sqrt((syserrcycle_j[jlistk[k]]**2).sum())/len(jlistk[k]) if debug: obsname = object+"_"+config chisqanalysis(obsname,nstokeserr_Jw,nerr_Jw,okall_w) # for each obs combine raw stokes, apply efficiency and PA calibration as appropriate for pattern, and save obss = len(obslist) for obs in range(obss): k0,object,config,wppat,pairs,pacaltype = obslist[obs] patpairs = patternpairs[wppat] klist = range(k0,k0+pairs) # entries in comblist for this obs jlist = sum([jlistk[k] for k in klist],[]) telpa = angle_average(telpa_j[jlist]) obsname = object+"_"+config wplist = [comblist[k][3][1:] for k in klist] patwplist = sorted((patpairs*"%1s%1s " % tuple(patterndict[wppat].flatten())).split()) plist = [patwplist.index(wplist[P]) for P in range(pairs)] k_p = np.zeros(patpairs,dtype=int) k_p[plist] = klist # idx in klist for each pair idx cycles_p = np.zeros_like(k_p) cycles_p[plist] = np.array([comblist[k][4] for k in klist]) # number of cycles in comb cycles_pw = np.zeros((patpairs,wavs),dtype=int) cycles_pw[plist] = cycles_kw[klist] # of ok cycles for each wavelength havecyclechi_p = np.zeros(patpairs,dtype=bool) havecyclechi_p[plist] = havecyclechi_k[klist] havelinhichi_p = np.zeros(patpairs,dtype=bool) # name result to document hw cycles included kplist = list(k_p) if cycles_p.max()==cycles_p.min(): kplist = [klist[0],] for p in range(len(kplist)): obsname += "_" j0 = comblist[k_p[p]][0] - cycles_p[p] + 1 for j in range(j0,j0+cycles_p[p]): obsname+=rawlist[j][4][-1] log.message("\n Observation: %s Date: %s" % (obsname,dateobs), with_header=False) finstokes = patternstokes[wppat] if pairs != patpairs: if (pairs<2): log.message((' Only %1i pair, skipping observation' % pairs), with_header=False) continue elif ((max(plist) < 2) | (min(plist) > 1)): log.message(' Pattern not usable, skipping observation', with_header=False) continue stokes_Fw = np.zeros((finstokes,wavs)) var_Fw = np.zeros_like(stokes_Fw) covar_Fw = np.zeros_like(stokes_Fw) # normalize pairs in obs at wavelengths _W where all pair/cycles have data: okall_w = okcal_w & (cycles_pw[plist] == cycles_p[plist,None]).all(axis=0) normint_K = stokes_kSw[klist,0][:,okall_w].sum(axis=1) normint_K /= np.mean(normint_K) stokes_kSw[klist] /= normint_K[:,None,None] var_kSw[klist] /= normint_K[:,None,None]**2 covar_kSw[klist] /= normint_K[:,None,None]**2 # first, the intensity stokes_Fw[0] = stokes_kSw[klist,0].sum(axis=0)/pairs var_Fw[0] = var_kSw[klist,0].sum(axis=0)/pairs**2 covar_Fw[0] = covar_kSw[klist,0].sum(axis=0)/pairs**2 # now, the polarization stokes if wppat.count('LINEAR'): var_Fw = np.vstack((var_Fw,np.zeros(wavs))) # add QU covariance if (wppat=='LINEAR'): # wavelengths with both pairs having good, calibratable data in at least one cycle ok_w = okcal_w & (cycles_pw[plist] > 0).all(axis=0) bpm_Fw = np.repeat((np.logical_not(ok_w))[None,:],finstokes,axis=0) stokes_Fw[1:,ok_w] = stokes_kSw[klist,1][:,ok_w]*(stokes_Fw[0,ok_w]/stokes_kSw[klist,0][:,ok_w]) var_Fw[1:3,ok_w] = var_kSw[klist,1][:,ok_w]*(stokes_Fw[0,ok_w]/stokes_kSw[klist,0][:,ok_w])**2 covar_Fw[1:,ok_w] = covar_kSw[klist,1][:,ok_w]*(stokes_Fw[0,ok_w]/stokes_kSw[klist,0][:,ok_w])**2 if debug: np.savetxt(obsname+"_stokes.txt",np.vstack((wav_w,ok_w.astype(int),stokes_Fw)).T, \ fmt="%8.2f "+"%2i "+3*" %10.6f") np.savetxt(obsname+"_var.txt",np.vstack((wav_w,ok_w.astype(int),var_Fw)).T, \ fmt="%8.2f "+"%2i "+4*"%14.9f ") np.savetxt(obsname+"_covar.txt",np.vstack((wav_w,ok_w.astype(int),covar_Fw)).T, \ fmt="%8.2f "+"%2i "+3*"%14.9f ") elif wppat=='LINEAR-HI': # for Linear-Hi, must go to normalized stokes in order for the pair combination to cancel systematic errors # each pair p at each wavelength w is linear combination of pairs, including primary p and secondary sec_p # linhi chisq is from comparison of primary and secondary # evaluate wavelengths with at least both pairs 0,2 or 1,3 having good, calibratable data in at least one cycle: ok_pw = okcal_w[None,:] & (cycles_pw > 0) ok_w = (ok_pw[0] & ok_pw[2]) | (ok_pw[1] & ok_pw[3]) bpm_Fw = np.repeat((np.logical_not(ok_w))[None,:],finstokes,axis=0) stokespri_pw = np.zeros((patpairs,wavs)) varpri_pw = np.zeros_like(stokespri_pw) covarpri_pw = np.zeros_like(stokespri_pw) stokespri_pw[plist] = nstokes_kw[klist] varpri_pw[plist] = nvar_kw[klist] covarpri_pw[plist] = ncovar_kw[klist] haveraw_pw = (cycles_pw > 0) pricof_ppw = np.identity(patpairs)[:,:,None]*haveraw_pw[None,:,:] qq = 1./np.sqrt(2.) seccofb_pp = np.array([[ 0,1, 0,-1],[1, 0,1, 0],[ 0,1, 0,1],[-1, 0,1, 0]])*qq # both secs avail seccof1_pp = np.array([[qq,1,-qq, 0],[1,qq,0, qq],[-qq,1,qq,0],[-1, qq,0,qq]])*qq # only 1st sec seccof2_pp = np.array([[qq,0, qq,-1],[0,qq,1,-qq],[ qq,0,qq,1],[ 0,-qq,1,qq]])*qq # only 2nd sec seclist_p = np.array([[1,3],[0,2],[1,3],[0,2]]) havesecb_pw = haveraw_pw[seclist_p].all(axis=1) onlysec1_pw = (np.logical_not(havesecb_pw) & haveraw_pw[seclist_p][:,0] & havesecb_pw[seclist_p][:,1]) onlysec2_pw = (np.logical_not(havesecb_pw) & haveraw_pw[seclist_p][:,1] & havesecb_pw[seclist_p][:,0]) seccof_ppw = seccofb_pp[:,:,None]*havesecb_pw[:,None,:] + \ seccof1_pp[:,:,None]*onlysec1_pw[:,None,:] + \ seccof2_pp[:,:,None]*onlysec2_pw[:,None,:] stokessec_pw = (seccof_ppw*stokespri_pw[:,None,:]).sum(axis=0) varsec_pw = (seccof_ppw**2*varpri_pw[:,None,:]).sum(axis=0) covarsec_pw = (seccof_ppw**2*covarpri_pw[:,None,:]).sum(axis=0) havesec_pw = (havesecb_pw | onlysec1_pw | onlysec2_pw) prisec_pw = (haveraw_pw & havesec_pw) onlypri_pw = (haveraw_pw & np.logical_not(havesec_pw)) onlysec_pw = (np.logical_not(haveraw_pw) & havesec_pw) cof_ppw = onlypri_pw[:,None,:]*pricof_ppw + onlysec_pw[:,None,:]*seccof_ppw + \ 0.5*prisec_pw[:,None,:]*(pricof_ppw+seccof_ppw) # now do the combination stokes_pw = (cof_ppw*stokespri_pw[None,:,:]).sum(axis=1) var_pw = (cof_ppw**2*varpri_pw[None,:,:]).sum(axis=1) covar_pw = (cof_ppw**2*covarpri_pw[None,:,:]).sum(axis=1) covarprisec_pw = 0.5*varpri_pw*np.logical_or(onlysec1_pw,onlysec2_pw) covarqu_w = (cof_ppw[0]*cof_ppw[2]*varpri_pw).sum(axis=0) # cull wavelengths based on chisq between primary and secondary chi2linhi_pw = np.zeros((patpairs,wavs)) badlinhichi_w = np.zeros(wavs) havelinhichi_p = prisec_pw.any(axis=1) linhichis = havelinhichi_p.sum() chi2linhi_pw[prisec_pw] = ((stokespri_pw[prisec_pw] - stokessec_pw[prisec_pw])**2 / \ (varpri_pw[prisec_pw] + varsec_pw[prisec_pw] - 2.*covarprisec_pw[prisec_pw])) q3_p = np.percentile(chi2linhi_pw[:,okall_w].reshape((4,-1)),75,axis=1) badlinhichi_w[ok_w] = ((chi2linhi_pw[:,ok_w] > (chifence_d[2]*q3_p)[:,None])).any(axis=0) ok_w &= np.logical_not(badlinhichi_w) okall_w &= np.logical_not(badlinhichi_w) chi2linhi_p = np.zeros(patpairs) chi2linhi_p[havelinhichi_p] = (chi2linhi_pw[havelinhichi_p][:,ok_w]).sum(axis=1)/ \ (prisec_pw[havelinhichi_p][:,ok_w]).sum(axis=1) syserrlinhi_pw = np.zeros((patpairs,wavs)) varlinhi_pw = np.zeros((patpairs,wavs)) syserrlinhi_p = np.zeros(patpairs) syserrlinhi_pw[prisec_pw] = (stokespri_pw[prisec_pw] - stokessec_pw[prisec_pw]) varlinhi_pw[prisec_pw] = varpri_pw[prisec_pw] + varsec_pw[prisec_pw] - 2.*covarprisec_pw[prisec_pw] syserrlinhi_p[havelinhichi_p] = np.average(syserrlinhi_pw[havelinhichi_p][:,okall_w], \ weights=1./np.sqrt(varlinhi_pw[havelinhichi_p][:,okall_w]),axis=1) if debug: np.savetxt(obsname+"_have_pw.txt",np.vstack((wav_w,ok_pw.astype(int),haveraw_pw,havesecb_pw, \ onlysec1_pw,onlysec2_pw,havesec_pw,prisec_pw,onlypri_pw,onlysec_pw)).T, \ fmt="%8.2f "+9*"%2i %2i %2i %2i ") np.savetxt(obsname+"_seccof_ppw.txt",np.vstack((wav_w,ok_pw.astype(int),seccof_ppw.reshape((16,-1)))).T, \ fmt="%8.2f "+4*"%2i "+16*" %6.3f") np.savetxt(obsname+"_cof_ppw.txt",np.vstack((wav_w,ok_pw.astype(int),cof_ppw.reshape((16,-1)))).T, \ fmt="%8.2f "+4*"%2i "+16*" %6.3f") np.savetxt(obsname+"_stokes.txt",np.vstack((wav_w,ok_pw.astype(int),stokespri_pw,stokes_pw)).T, \ fmt="%8.2f "+4*"%2i "+8*" %10.6f") np.savetxt(obsname+"_var.txt",np.vstack((wav_w,ok_pw.astype(int),varpri_pw,var_pw)).T, \ fmt="%8.2f "+4*"%2i "+8*"%14.9f ") np.savetxt(obsname+"_covar.txt",np.vstack((wav_w,ok_pw.astype(int),covarpri_pw,covar_pw)).T, \ fmt="%8.2f "+4*"%2i "+8*"%14.9f ") np.savetxt(obsname+"_chi2linhi_pw.txt",np.vstack((wav_w,stokes_Fw[0],ok_pw.astype(int), \ chi2linhi_pw)).T, fmt="%8.2f %10.0f "+4*"%2i "+4*"%10.4f ") stokes_Fw[1:] = stokes_pw[[0,2]]*stokes_Fw[0] var_Fw[1:3] = var_pw[[0,2]]*stokes_Fw[0]**2 var_Fw[3] = covarqu_w*stokes_Fw[0]**2 covar_Fw[1:] = covar_pw[[0,2]]*stokes_Fw[0]**2 bpm_Fw = ((bpm_Fw==1) | np.logical_not(ok_w)).astype(int) # document chisq results, combine flagoffs, compute mean chisq for observation, combine with final bpm if (havecyclechi_p.any() | havelinhichi_p.any()): chi2cyclenet = 0. syserrcyclenet = 0. chi2linhinet = 0. syserrlinhinet = 0. if havecyclechi_p.any(): log.message(("\n"+14*" "+"{:^"+str(5*patpairs)+"}{:^"+str(8*patpairs)+"}{:^"+str(6*patpairs)+"}")\ .format("culled","sys %err","mean chisq"), with_header=False) log.message((9*" "+"HW "+patpairs*" %4s"+patpairs*" %7s"+patpairs*" %5s") \ % tuple(3*patwplist),with_header=False) jlist = sum([jlistk[k] for k in klist],[]) Jlist = list(set(sum([Jlistk[k] for k in klist],[]))) Jmax = max(Jlist) ok_pJ = np.zeros((patpairs,Jmax+1),dtype=bool) for p in plist: ok_pJ[p][Jlistk[k_p[p]]] = True syserrcycle_pJ = np.zeros((patpairs,Jmax+1)) syserrcycle_pJ[ok_pJ] = syserrcycle_j[jlist] syserrcyclenet_p = np.zeros(patpairs) syserrcyclenet_p[plist] = syserrcyclenet_k[klist] syserrcyclenet = np.sqrt((syserrcyclenet_p**2).sum()/patpairs) chi2cycle_pJ = np.zeros((patpairs,Jmax+1)) chi2cycle_pJ[ok_pJ] = chi2cycle_j[jlist] chi2cyclenet_p = np.zeros(patpairs) chi2cyclenet_p[plist] = chi2cyclenet_k[klist] chi2cyclenet = chi2cyclenet_p.sum()/patpairs culls_pJ = np.zeros((patpairs,Jmax+1),dtype=int) culls_pJ[ok_pJ] = iscull_jw[jlist].sum(axis=1) if cycles_p.max() > 2: for J in set(Jlist): log.message(((" cycle %2i: "+patpairs*"%4i "+patpairs*"%7.3f "+patpairs*"%5.2f ") % \ ((J+1,)+tuple(culls_pJ[:,J])+tuple(100.*syserrcycle_pJ[:,J])+tuple(chi2cycle_pJ[:,J]))), \ with_header=False) netculls_p = [iscull_jw[jlistk[k_p[p]]].all(axis=0).sum() for p in range(patpairs)] log.message((" net : "+patpairs*"%4i "+patpairs*"%7.3f "+patpairs*"%5.2f ") % \ (tuple(netculls_p)+tuple(100*syserrcyclenet_p)+tuple(chi2cyclenet_p)), with_header=False) if (havelinhichi_p.any()): log.message(("\n"+14*" "+"{:^"+str(5*patpairs)+"}{:^"+str(8*patpairs)+"}{:^"+str(6*patpairs)+"}")\ .format("culled","sys %err","mean chisq"), with_header=False) log.message((9*" "+"HW "+(4*patpairs/2)*" "+" all"+(4*patpairs/2)*" "+patpairs*" %7s"+patpairs*" %5s") \ % tuple(2*patwplist),with_header=False) chicount = int(badlinhichi_w.sum()) chi2linhinet = chi2linhi_p.sum()/(havelinhichi_p.sum()) syserrlinhinet = np.sqrt((syserrlinhi_p**2).sum()/(havelinhichi_p.sum())) log.message((" Linhi: "+(2*patpairs)*" "+"%3i "+(2*patpairs)*" "+patpairs*"%7.3f "+patpairs*"%5.2f ") % \ ((chicount,)+tuple(100.*syserrlinhi_p)+tuple(chi2linhi_p)), with_header=False) chi2qudof = (chi2cyclenet+chi2linhinet)/(int(chi2cyclenet>0)+int(chi2linhinet>0)) syserr = np.sqrt((syserrcyclenet**2+syserrlinhinet**2)/ \ (int(syserrcyclenet>0)+int(syserrlinhinet>0))) log.message(("\n Estimated sys %%error: %5.3f%% Mean Chisq: %6.2f") % \ (100.*syserr,chi2qudof), with_header=False) if not HW_Cal_override: # apply hw efficiency, equatorial PA rotation calibration eqpar_w = hpar_w + dpa + (telpa % 180) stokes_Fw[1:,ok_w] /= heff_w[ok_w] var_Fw[1:,ok_w] /= heff_w[ok_w]**2 covar_Fw[1:,ok_w] /= heff_w[ok_w]**2 stokes_Fw,var_Fw,covar_Fw = specpolrotate(stokes_Fw,var_Fw,covar_Fw,eqpar_w) # save final stokes fits file for this observation. Strain out nans. infile = infilelist[rawlist[comblist[k][0]][0]] hduout = pyfits.open(infile) hduout['SCI'].data = np.nan_to_num(stokes_Fw.reshape((3,1,-1))) hduout['SCI'].header['CTYPE3'] = 'I,Q,U' hduout['VAR'].data = np.nan_to_num(var_Fw.reshape((4,1,-1))) hduout['VAR'].header['CTYPE3'] = 'I,Q,U,QU' hduout['COV'].data = np.nan_to_num(covar_Fw.reshape((3,1,-1))) hduout['COV'].header['CTYPE3'] = 'I,Q,U,QU' hduout['BPM'].data = bpm_Fw.astype('uint8').reshape((3,1,-1)) hduout['BPM'].header['CTYPE3'] = 'I,Q,U' hduout[0].header['TELPA'] = round(telpa,4) hduout[0].header['WPPATERN'] = wppat hduout[0].header['PATYPE'] = pacaltype if len(calhistorylist): for line in calhistorylist: hduout[0].header.add_history(line) if (havecyclechi_p.any() | havelinhichi_p.any()): hduout[0].header['SYSERR'] = (100.*syserr,'estimated % systematic error') outfile = obsname+'_stokes.fits' hduout.writeto(outfile,overwrite=True,output_verify='warn') log.message('\n '+outfile+' Stokes I,Q,U', with_header=False) # apply flux calibration, if available fluxcal_w = specpolflux(outfile,logfile=logfile) if fluxcal_w.shape[0]>0: stokes_Fw *= fluxcal_w var_Fw *= fluxcal_w**2 covar_Fw *= fluxcal_w**2 # calculate, print means (stokes averaged in unnorm space) avstokes_f, avvar_f, avwav = spv.avstokes(stokes_Fw,var_Fw[:-1],covar_Fw,wav_w) avstokes_F = np.insert(avstokes_f,0,1.) avvar_F = np.insert(avvar_f,0,1.) spv.printstokes(avstokes_F,avvar_F,avwav,tcenter=np.pi/2.,textfile='tmp.log') log.message(open('tmp.log').read(), with_header=False) os.remove('tmp.log') # elif wppat.count('CIRCULAR'): TBS # elif wppat=='ALL-STOKES': TBS # end of obs loop # end of config loop return
def saltclean(images, outpath, obslogfile=None, gaindb=None, xtalkfile=None, geomfile=None, subover=True, trim=True, masbias=None, subbias=False, median=False, function='polynomial', order=5, rej_lo=3, rej_hi=3, niter=5, interp='linear', clobber=False, logfile='salt.log', verbose=True): """SALTCLEAN will provide basic CCD reductions for a set of data. It will sort the data, and first process the biases, flats, and then the science frames. It will record basic quality control information about each of the steps. """ plotover = False #start logging with logging(logfile, debug) as log: # Check the input images infiles = saltio.argunpack('Input', images) # create list of output files outpath = saltio.abspath(outpath) #does the gain database file exist if gaindb: dblist = saltio.readgaindb(gaindb) else: dblist = [] # does crosstalk coefficient data exist if xtalkfile: xtalkfile = xtalkfile.strip() xdict = saltio.readxtalkcoeff(xtalkfile) else: xdict = None #does the mosaic file exist--raise error if no saltio.fileexists(geomfile) # Delete the obslog file if it already exists if os.path.isfile(obslogfile) and clobber: saltio.delete(obslogfile) #read in the obsveration log or create it if os.path.isfile(obslogfile): msg = 'The observing log already exists. Please either delete it or run saltclean with clobber=yes' raise SaltError(msg) else: headerDict = obslog(infiles, log) obsstruct = createobslogfits(headerDict) saltio.writefits(obsstruct, obslogfile) #create the list of bias frames and process them filename = obsstruct.data.field('FILENAME') detmode = obsstruct.data.field('DETMODE') ccdtype = obsstruct.data.field('CCDTYPE') #set the bias list of objects biaslist = filename[ccdtype == 'ZERO'] masterbias_dict = {} for img in infiles: if os.path.basename(img) in biaslist: #open the image struct = fits.open(img) bimg = outpath + 'bxgp' + os.path.basename(img) #print the message if log: message = 'Processing Zero frame %s' % img log.message(message, with_stdout=verbose) #process the image struct = clean(struct, createvar=False, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist = history( level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0], 'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN', time.asctime(time.localtime()), 'Images have been gain corrected', struct[0]) saltkey.new('SXTALK', time.asctime(time.localtime()), 'Images have been xtalk corrected', struct[0]) saltkey.new('SBIAS', time.asctime(time.localtime()), 'Images have been de-biased', struct[0]) # write FITS file saltio.writefits(struct, bimg, clobber=clobber) saltio.closefits(struct) #add files to the master bias list masterbias_dict = compareimages(struct, bimg, masterbias_dict, keylist=biasheader_list) #create the master bias frame for i in masterbias_dict.keys(): bkeys = masterbias_dict[i][0] blist = masterbias_dict[i][1:] mbiasname = outpath + createmasterbiasname(blist, bkeys) bfiles = ','.join(blist) saltcombine(bfiles, mbiasname, method='median', reject='sigclip', mask=False, weight=False, blank=0, scale=None, statsec=None, lthresh=3, \ hthresh=3, clobber=False, logfile=logfile,verbose=verbose) #create the list of flatfields and process them flatlist = filename[ccdtype == 'FLAT'] masterflat_dict = {} for img in infiles: if os.path.basename(img) in flatlist: #open the image struct = fits.open(img) fimg = outpath + 'bxgp' + os.path.basename(img) #print the message if log: message = 'Processing Flat frame %s' % img log.message(message, with_stdout=verbose) #process the image struct = clean(struct, createvar=False, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist = history( level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0], 'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN', time.asctime(time.localtime()), 'Images have been gain corrected', struct[0]) saltkey.new('SXTALK', time.asctime(time.localtime()), 'Images have been xtalk corrected', struct[0]) saltkey.new('SBIAS', time.asctime(time.localtime()), 'Images have been de-biased', struct[0]) # write FITS file saltio.writefits(struct, fimg, clobber=clobber) saltio.closefits(struct) #add files to the master bias list masterflat_dict = compareimages(struct, fimg, masterflat_dict, keylist=flatheader_list) #create the master flat frame for i in masterflat_dict.keys(): fkeys = masterflat_dict[i][0] flist = masterflat_dict[i][1:] mflatname = outpath + createmasterflatname(flist, fkeys) ffiles = ','.join(flist) saltcombine(ffiles, mflatname, method='median', reject='sigclip', mask=False, weight=False, blank=0, scale=None, statsec=None, lthresh=3, \ hthresh=3, clobber=False, logfile=logfile,verbose=verbose) #process the science data for img in infiles: nimg = os.path.basename(img) #print nimg, nimg in flatlist, nimg in biaslist if not (nimg in biaslist): #open the image struct = fits.open(img) simg = outpath + 'bxgp' + os.path.basename(img) #print the message if log: message = 'Processing science frame %s' % img log.message(message, with_stdout=verbose) #process the image struct = clean(struct, createvar=False, badpixelstruct=None, mult=True, dblist=dblist, xdict=xdict, subover=subover, trim=trim, subbias=False, bstruct=None, median=median, function=function, order=order, rej_lo=rej_lo, rej_hi=rej_hi, niter=niter, plotover=plotover, log=log, verbose=verbose) #write the file out # housekeeping keywords fname, hist = history( level=1, wrap=False, exclude=['images', 'outimages', 'outpref']) saltkey.housekeeping(struct[0], 'SPREPARE', 'Images have been prepared', hist) saltkey.new('SGAIN', time.asctime(time.localtime()), 'Images have been gain corrected', struct[0]) saltkey.new('SXTALK', time.asctime(time.localtime()), 'Images have been xtalk corrected', struct[0]) saltkey.new('SBIAS', time.asctime(time.localtime()), 'Images have been de-biased', struct[0]) # write FITS file saltio.writefits(struct, simg, clobber=clobber) saltio.closefits(struct) #mosaic the files--currently not in the proper format--will update when it is if not saltkey.fastmode(saltkey.get('DETMODE', struct[0])): mimg = outpath + 'mbxgp' + os.path.basename(img) saltmosaic(images=simg, outimages=mimg, outpref='', geomfile=geomfile, interp=interp, cleanup=True, clobber=clobber, logfile=logfile, verbose=verbose) #remove the intermediate steps saltio.delete(simg)
def specpolcombine(infilelist,debug_output=False): """combine stokes files Parameters ---------- infile_list: list one or more _stokes.fits files """ """ _b observations _w wavelengths in individual observations _W wavelengths in combined grid """ obss = len(infilelist) obsdict=obslog(infilelist) # construct common wavelength grid _W grating_b = obsdict['GRATING'] grang_b = obsdict['GR-ANGLE'] artic_b = obsdict['CAMANG'] dwav_b = np.empty(obss) wav0_b = np.empty(obss) wavs_b = np.empty(obss) stokeslist_sw = [] varlist_sw = [] oklist_sw = [] for b in range(obss): hdul = pyfits.open(infilelist[b]) dwav_b[b] = float(hdul['SCI'].header['CDELT1']) wav0_b[b] = float(hdul['SCI'].header['CRVAL1']) wavs_b[b] = int(hdul['SCI'].header['NAXIS1']) stokeslist_sw.append(hdul['SCI'].data[:,0,:]) varlist_sw.append(hdul['VAR'].data[:,0,:]) oklist_sw.append(hdul['BPM'].data[:,0,:] == 0) dWav = dwav_b.max() Wav0 = dWav*(wav0_b.min()//dWav) Wavs = (dWav*((wav0_b + dwav_b*wavs_b).max()//dWav) - Wav0)/dWav wav_W = np.arange(Wav0,Wav0+dWav*Wavs,dWav) stokess = stokeslist_sw[0].shape[0] vars = varlist_sw[0].shape[0] stokes_bsW = np.zeros((obss,stokess,Wavs)) var_bsW = np.zeros((obss,vars,Wavs)) ok_bsW = np.zeros((obss,stokess,Wavs)).astype(bool) # get data and put on common grid, combining bins if necessary for b in range(obss): if dwav_b[b] == dWav: W0 = (wav0_b[b] - Wav0)/dWav stokes_bsW[b,:,W0:W0+wavs_b[b]] = stokeslist_sw[b] var_bsW[b,:,W0:W0+wavs_b[b]] = varlist_sw[b] ok_bsW[b,:,W0:W0+wavs_b[b]] = oklist_sw[b] else: wbinedge_W = (wav_W - dWav/2. - (wav0_b[b] - dwav_b[b]/2.))/dwav_b for s in range(stokess): stokes_bsW[b,s] = scrunch1d(stokeslist_sw[b][s],wbinedge_W) var_bsW[b,s] = scrunch1d(varlist_sw[b][s],wbinedge_W) ok_bsW[b,s] = (scrunch1d((oklist_sw[b][s]).astype(int),wbinedge_W) > 0) if (vars>stokess): var_bsW[b,vars] = scrunch1d(var_sw[vars],wbinedge_W) if debug_output: np.savetxt("stokes_bsW.txt",np.vstack((wav_W,stokes_bsW.reshape((6,Wavs)))).T,fmt="%10.3f") # correct (unfluxed) intensity for grating efficiency to match observations together for b in range(obss): greff_W = greff(grating_b[b],grang_b[b],artic_b[b],wav_W) ok_W = (ok_bsW[b].all(axis=0) & (greff_W > 0.)) stokes_bsW[b][:,ok_W] /= greff_W[ok_W] var_bsW[b][:,ok_W] /= greff_W[ok_W]**2 # normalize at matching wavelengths _w # compute ratios at each wavelength, then error-weighted mean of ratio ismatch_W = ok_bsW.all(axis=0).all(axis=0) normint_bw = stokes_bsW[:,0,ismatch_W]/stokes_bsW[:,0,ismatch_W].mean(axis=0) varnorm_bw = var_bsW[:,0,ismatch_W]/stokes_bsW[:,0,ismatch_W].mean(axis=0)**2 normint_b = (normint_bw/varnorm_bw).sum(axis=1)/(1./varnorm_bw).sum(axis=1) print normint_b stokes_bsW /= normint_b[:,None,None] var_bsW /= normint_b[:,None,None]**2 # Do error weighted combine of observations stokes_sW = np.zeros((stokess,Wavs)) var_sW = np.zeros((vars,Wavs)) for b in range(obss): ok_W = ok_bsW[b].any(axis=0) stokes_sW[:,ok_W] += stokes_bsW[b][:,ok_W]/var_bsW[b][:stokess,ok_W] var_sW[:,ok_W] += 1./var_bsW[b][:,ok_W] ok_W = (var_sW != 0).all(axis=0) ok_sW = np.tile(ok_W,(3,1)) var_sW[ok_sW] = 1./var_sW[ok_sW] stokes_sW[:,ok_W] *= var_sW[:stokess,ok_W] # Save result, name formed from unique elements of '_'-separated parts of names namepartlist = [] parts = 100 for file in infilelist: partlist = os.path.basename(file).split('.')[0].split('_') parts = min(parts,len(partlist)) namepartlist.append(partlist) outfile = '' for part in range(parts): outfile+='-'.join(sorted(set(zip(*namepartlist)[part])))+'_' outfile = outfile[:-1]+'.fits' print "\n",outfile,"\n" hduout = hdul for ext in ('SCI','VAR','BPM'): hduout[ext].header.update('CDELT1',dWav) hduout[ext].header.update('CRVAL1',Wav0) hduout['SCI'].data = stokes_sW.astype('float32').reshape((stokess,1,-1)) hduout['VAR'].data = var_sW.astype('float32').reshape((vars,1,-1)) hduout['BPM'].data = (~ok_sW).astype('uint8').reshape((stokess,1,-1)) hduout[0].header.add_history('POLCOMBINE: '+' '.join(infilelist)) hduout.writeto(outfile,clobber=True,output_verify='warn') return
def flexure_rssspec(imagefits,fitslist,option=""): print str(datetime.now()) if option == "filesave": prefix = raw_input("\nFile prefix: ") pixel = 15. # pixel size in microns pix_scale=0.125 sexparams = ["X_IMAGE","Y_IMAGE","FLUX_ISO","FLUX_MAX","FLAGS","CLASS_STAR", \ "X2WIN_IMAGE","Y2WIN_IMAGE","XYWIN_IMAGE","ERRX2WIN_IMAGE"] np.savetxt("qred_thrufoc.param",sexparams,fmt="%s") fmaxcol,flagcol,xvarcol,yvarcol,xerrcol = (3,4,6,7,9) # column nos (from 0) of data in sextractor imagestooclosefactor = 3.0 # too close if factor*sep < sqrt(var) gaptooclose = 1.25 # arcsec edgetooclose = 1.25 # arcsec rattolerance = 0.25 toofaint = 250. # FMAX counts galaxydelta = 0.4 # arcsec MOSimagelimit = 1. # arcsec deblend = .005 # default imagehdr = pyfits.getheader(imagefits) if imagehdr["GR-STATE"][1] == "4": print "First fits file "+imagefits+" is not image of mask" exit() flexposns = len(fitslist) obsdict=obslog(fitslist) image_f = [fitslist[fpos].split(".")[0][-12:] for fpos in range(flexposns)] dateobs = obsdict["DATE-OBS"][0].replace("-","") if int(dateobs) > 20110928: rho_f = np.array(obsdict["TRKRHO"]).astype(float) else: rho_f = np.array(obsdict["TELRHO"]).astype(float) catpos = np.argmin(np.abs(rho_f)) cbin,rbin = np.array(obsdict["CCDSUM"][catpos].split(" ")).astype(int) maskid = obsdict["MASKID"][catpos].strip() filter = obsdict["FILTER"][catpos].strip() grating = obsdict["GRATING"][catpos].strip() rows,cols = pyfits.getdata(fitslist[catpos]).shape isspec = (obsdict["GR-STATE"][catpos][1] =="4") if not isspec: print "Use flexure_rssimage for image flexure analysis" exit() grang = float(obsdict["GRTILT"][catpos]) artic = float(obsdict["CAMANG"][catpos]) lamp = obsdict["LAMPID"][catpos].strip() print "\nMask: ", maskid print "Filter: ", filter print "Grating: ", grating print "Artic (deg): ", artic print "Gr Angle (deg): ", grang print "Lamp: ", lamp # map the mask spots _m using the imaging fits file sex_js = sextract(imagefits,deblend=deblend) flux_s = sex_js[2] fluxmedian = np.median(np.sort(flux_s)[-10:]) okm_s = (flux_s > fluxmedian/10) # cull bogus spots maskholes = okm_s.sum() r_m = sex_js[1,okm_s] c_m = sex_js[0,okm_s] # find mask rows _R, tabulate histr_b, binr_b = np.histogram(r_m,bins=rows/10,range=(0,rows)) bin0_R = np.where((histr_b[1:]>0) & (histr_b[:-1]==0))[0] bin1_R = np.where((histr_b[1:]==0) & (histr_b[:-1]>0))[0] maskRows = bin0_R.shape[0] bin_m = np.digitize(r_m,binr_b) - 1 R_m = np.array([np.where((bin_m[m] >= bin0_R) & (bin_m[m] <= bin1_R))[0][0] \ for m in range(maskholes)]) # find mask cols _C, tabulate histc_b, binc_b = np.histogram(c_m,bins=cols/10,range=(0,cols)) bin0_C = np.where((histc_b[1:]>0) & (histc_b[:-1]==0))[0] bin1_C = np.where((histc_b[1:]==0) & (histc_b[:-1]>0))[0] maskCols = bin0_C.shape[0] bin_m = np.digitize(c_m,binc_b) - 1 C_m = np.array([np.where((bin_m[m] >= bin0_C) & (bin_m[m] <= bin1_C))[0][0] \ for m in range(maskholes)]) # identify mask center = optical axis if maskid == 'P000000N99': # symmetric mask Raxis = maskRows/2 Caxis = maskCols/2 elif maskid == 'P000000N03': # mask with centered cross Raxis = np.where((np.argmax(histr_b) >= bin0_R) & (np.argmax(histr_b) <= bin1_R))[0][0] Caxis = np.where((np.argmax(histc_b) >= bin0_C) & (np.argmax(histc_b) <= bin1_C))[0][0] else: print "Not a valid flexure mask" exit() maxis = np.where((R_m==Raxis)&(C_m==Caxis))[0][0] raxis = r_m[maxis] caxis = c_m[maxis] print "\nMask_Holes Rows Cols r axis c axis \n pixels pixels" print " %5i %5i %5i %8.1f %8.1f" % (maskholes,maskRows,maskCols,raxis*rbin,caxis*cbin) # np.savetxt(dateobs+'_'+"mask.txt",np.vstack((r_m,c_m,sex_js[2,okm_s],R_m)).T,fmt="%10.2f") # get linelist, predict spots in spectral image wavcent = rsslam(grating, grang, artic, 0.,dateobs) specfile = datedfile(datadir+"spectrograph/spec_yyyymmdd.txt",dateobs) FCampoly=np.loadtxt(specfile,usecols=(1,))[5:11] fcam = np.polyval(FCampoly,(wavcent/1000. - 4.)) lampfile=iraf.osfn("pysalt$data/linelists/"+lamp+".salt") wav_l,int_l = np.loadtxt(lampfile,unpack=True) maxdalpha = -np.degrees((cols/2)*cbin*pixel/(1000.*fcam)) maxgamma = np.degrees((rows/2)*rbin*pixel/(1000.*fcam)) maxwav = rsslam(grating,grang,artic, cols*cbin/2,dateobs,-maxdalpha,0) minwav = rsslam(grating,grang,artic,-cols*cbin/2,dateobs, maxdalpha,maxgamma) ok_l = (wav_l >= minwav) & (wav_l <= maxwav) wav_l = wav_l[ok_l] int_l = int_l[ok_l] lines = wav_l.shape[0] col_ml = np.zeros((maskholes,lines)) dcol_c = np.arange(-(cols*cbin/2),(cols*cbin/2)) for m in range(maskholes): dalpha = -np.degrees((c_m[m]-caxis)*cbin*pixel/(1000.*fcam)) gamma = np.degrees((r_m[m]-raxis)*rbin*pixel/(1000.*fcam)) wav0,wav1 = rsslam(grating,grang,artic,dcol_c[[0,-1]],dateobs,dalpha,gamma=gamma) ok_l = ((wav_l > wav0) & (wav_l < wav1)) colwav = interp1d(rsslam(grating,grang,artic,dcol_c, \ dateobs,dalpha=dalpha,gamma=gamma), dcol_c) col_ml[m,ok_l] = colwav(wav_l[ok_l]) + caxis*cbin # np.savetxt(dateobs+"_col_ml.txt",np.vstack((R_m,C_m,col_ml.T)),fmt="%8.1f") # identify mask hole and wavelength for spots in spec image closest to rho=0 os.remove("sexwt.fits") sex_js = sextract(fitslist[catpos],"",deblend=deblend) r_s = sex_js[1] c_s = sex_js[0] flux_s = sex_js[2] spots = r_s.shape[0] fluxmedian = np.median(np.sort(sex_js[2])[-10:]) ok_s = (flux_s > fluxmedian/30) # cull bogus spots # find spectral bin rows RR in candidates R0, cull non-spectra histr_b, binr_b = np.histogram(r_s[ok_s],bins=rows/10,range=(0,rows)) histr_b[[0,-1]] = 0 bin0_R0 = np.where((histr_b[1:]>0) & (histr_b[:-1]==0))[0] + 1 bin1_R0 = np.where((histr_b[1:]==0) & (histr_b[:-1]>0))[0] bin_s = np.digitize(r_s,binr_b) - 1 maxcount_R0 = np.array([(histr_b[bin0_R0[R0]:bin1_R0[R0]+1]).max() \ for R0 in range(bin0_R0.shape[0])]) ok_R0 = (maxcount_R0 > 3) specrows = ok_R0.sum() # cull down to spectra RR bin0_RR = bin0_R0[ok_R0] bin1_RR = bin1_R0[ok_R0] ok_s &= ((bin_s >= bin0_RR[:,None]) & (bin_s <= bin1_RR[:,None])).any(axis=0) RR_s = -np.ones(spots) r_RR = np.zeros(specrows) for RR in range(specrows): isRR_s = ok_s & np.in1d(bin_s,np.arange(bin0_RR[RR],bin1_RR[RR]+1)) RR_s[isRR_s] = RR r_RR[RR] = r_s[isRR_s].mean() count_RR = (RR_s[:,None]==range(specrows)).sum(axis=0) if maskid == 'P000000N99': RRaxis = np.argmin((raxis-r_RR)**2) elif maskid == 'P000000N03': RRaxis = np.argmax(count_RR) # cull weak lines ptile = 100.*min(1.,5.*maskCols/count_RR.max()) # want like 5 brightest lines for RR in range(specrows): isRR_s = ok_s & np.in1d(bin_s,np.arange(bin0_RR[RR],bin1_RR[RR]+1)) fluxmin = np.percentile(sex_js[2,isRR_s],100.-ptile) ok_s[isRR_s] &= (sex_js[2,isRR_s] > fluxmin) # identify with mask rows R (assuming no gaps) RR_m = R_m + RRaxis - Raxis # find approximate grating shift in dispersion direction by looking for most common id error histc_b = np.zeros(60) for RR in range(specrows): isRR_s = ((RR_s==RR) & ok_s) cerr_MS = (c_s[None,isRR_s] - col_ml[RR_m==RR].ravel()[:,None]) histc_b += np.histogram(cerr_MS.ravel(),bins=60,range=(-150,150))[0] cshift = 5*np.argmax(histc_b) - 150 col_ml += cshift # identify wavelength and mask column with spots in each spectrum isfound_s = np.zeros((spots),dtype=bool) bintol = 16/cbin # 2 arcsec tolerance for line ID R_s = -np.ones(spots,dtype=int) C_s = -np.ones(spots,dtype=int) l_s = -np.ones(spots,dtype=int) m_s = -np.ones(spots,dtype=int) cerr_s = np.zeros(spots) rmscol = 0. for RR in range(specrows): # _S spot in spectrum, _P (mask column, line) isRR_m = (RR_m==RR) isRR_s = ((RR_s==RR) & ok_s) cerr_PS = (c_s[None,isRR_s] - col_ml[isRR_m].ravel()[:,None]) Spots = isRR_s.sum() Possibles = col_ml[isRR_m].size Cols = Possibles/lines P_S = np.argmin(np.abs(cerr_PS),axis=0) cerr_S = cerr_PS[P_S,range(isRR_s.sum())] isfound_S = (np.abs(cerr_S) < bintol) M_P,l_P = np.unravel_index(np.arange(Possibles),(Cols,lines)) m_P = np.where(isRR_m)[0][M_P] m_S = m_P[P_S] C_P = C_m[m_P] C_S = C_P[P_S] l_S = l_P[P_S] s_S = np.where(isRR_s)[0] R_s[isRR_s] = RR + Raxis-RRaxis cerr_s[s_S] = cerr_S C_s[s_S[isfound_S]] = C_S[isfound_S] l_s[s_S[isfound_S]] = l_S[isfound_S] m_s[s_S[isfound_S]] = m_S[isfound_S] isfound_s[s_S] |= isfound_S rmscol += (cerr_S[isfound_S]**2).sum() # cull wavelengths to _L with < 1/2 Mask Rows or Cols ok_s &= isfound_s ok_l = np.zeros((lines),dtype=bool) for line in range(lines): lRows = np.unique(R_s[l_s==line]).shape[0] lCols = np.unique(C_s[l_s==line]).shape[0] ok_l[line] = ((lRows>=maskRows/2) & (lCols>=maskCols/2)) l_L = np.where(ok_l)[0] wav_L = wav_l[l_L] Lines = l_L.shape[0] ok_s &= np.in1d(l_s,l_L) # tabulate good catalog spots (final _S) s_S = np.where(ok_s)[0] r_S = r_s[s_S] c_S = c_s[s_S] cerr_S = cerr_s[s_S] R_S = R_s[s_S] C_S = C_s[s_S] l_S = l_s[s_S] Spots = ok_s.sum() rshift = r_S[R_S==Raxis].mean() - raxis cshift += (c_S - col_ml[m_s[s_S],l_S]).mean() rmscol = np.sqrt(rmscol/Spots) np.savetxt("cat_S.txt",np.vstack((s_S,r_S,c_S,R_S,C_S,l_S,cerr_S)).T, \ fmt="%5i %8.2f %8.2f %5i %5i %5i %8.2f") print "\nSpec_Spots Lines rshift cshift rms\n pixels pixels pixels" print " %5i %5i %8.1f %8.1f %8.1f" % (Spots,np.unique(l_S).shape[0],rshift,cshift,rmscol) print "\nLineno Wavel spots Rows Cols" for L in range(Lines): line = l_L[L] lRows = np.unique(R_S[l_S==line]).shape[0] lCols = np.unique(C_S[l_S==line]).shape[0] lspots = (l_S==line).sum() print " %5i %8.2f %5i %5i %5i" % (line,wav_l[line],lspots,lRows,lCols) sexcols = sex_js.shape[0] sexdata_jfS = np.zeros((sexcols,flexposns,Spots)) sexdata_jfS[:,catpos] = sex_js[:,ok_s] xcenter_L = col_ml[maxis,l_L] ycenter = raxis + rshift if option == "filesave": np.savetxt(prefix+"Spots.txt",sexdata_jfS[:,catpos].T, \ fmt=2*"%9.2f "+"%9.0f "+"%9.1f "+"%4i "+"%6.2f "+3*"%7.2f "+"%11.3e") # find spots in flexure series, in order of increasing abs(rho), and store sextractor output row_fLd = np.zeros((flexposns,Lines,2)) col_fLd = np.zeros((flexposns,Lines,2)) print "\n fits rho line spots rshift cshift rslope cslope rmserr " print " deg Ang arcsec arcsec arcmin arcmin bins" for dirn in (1,-1): refpos = catpos posdirlist = np.argsort(dirn*rho_f) poslist = posdirlist[dirn*rho_f[posdirlist] > rho_f[refpos]] for fpos in poslist: col_S,row_S = sexdata_jfS[0:2,refpos,:] sex_js = sextract(fitslist[fpos],"sexwt.fits",deblend=deblend) binsqerr_sS = (sex_js[1,:,None] - row_S[None,:])**2 + (sex_js[0,:,None] - col_S[None,:])**2 S_s = np.argmin(binsqerr_sS,axis=1) # First compute image shift by averaging small errors rowerr_s = sex_js[1] - row_S[S_s] colerr_s = sex_js[0] - col_S[S_s] hist_r,bin_r = np.histogram(rowerr_s,bins=32,range=(-2*bintol,2*bintol)) drow = rowerr_s[(rowerr_s > bin_r[np.argmax(hist_r)]-bintol) & \ (rowerr_s < bin_r[np.argmax(hist_r)]+bintol)].mean() hist_c,bin_c = np.histogram(colerr_s,bins=32,range=(-2*bintol,2*bintol)) dcol = colerr_s[(colerr_s > bin_c[np.argmax(hist_c)]-bintol) & \ (colerr_s < bin_c[np.argmax(hist_c)]+bintol)].mean() # Now refind the closest ID binsqerr_sS = (sex_js[1,:,None] - row_S[None,:] -drow)**2 + \ (sex_js[0,:,None] - col_S[None,:] -dcol)**2 binsqerr_s = binsqerr_sS.min(axis=1) isfound_s = binsqerr_s < bintol**2 S_s = np.argmin(binsqerr_sS,axis=1) isfound_s &= (binsqerr_s == binsqerr_sS[:,S_s].min(axis=0)) isfound_S = np.array([S in S_s[isfound_s] for S in range(Spots)]) sexdata_jfS[:,fpos,S_s[isfound_s]] = sex_js[:,isfound_s] drow_S = sexdata_jfS[1,fpos]-sexdata_jfS[1,catpos] dcol_S = sexdata_jfS[0,fpos]-sexdata_jfS[0,catpos] # np.savetxt("motion_"+str(fpos)+".txt",np.vstack((isfound_S,l_S,drow_S,dcol_S)).T,fmt="%3i %3i %8.2f %8.2f") # Compute flexure image motion parameters for each line for L in range(Lines): ok_S = ((l_S == l_L[L]) & isfound_S) row_fLd[fpos,L],rowchi,d,d,d = \ np.polyfit(sexdata_jfS[0,catpos,ok_S]-xcenter_L[L],drow_S[ok_S],deg=1,full=True) col_fLd[fpos,L],colchi,d,d,d = \ np.polyfit(sexdata_jfS[1,catpos,ok_S]-ycenter,dcol_S[ok_S],deg=1,full=True) rms = np.sqrt((rowchi+colchi)/(2*ok_S.sum())) print ("%12s %5.0f %5i %5i "+5*"%7.2f ") % (image_f[fpos], rho_f[fpos], wav_L[L], \ ok_S.sum(),row_fLd[fpos,L,1]*rbin*pix_scale, col_fLd[fpos,L,1]*cbin*pix_scale, \ 60.*np.degrees(row_fLd[fpos,L,0]),-60.*np.degrees(col_fLd[fpos,L,0]), rms) if option == "filesave": np.savetxt(prefix+"flex_"+str(fpos)+".txt",np.vstack((isfound_S,drow_S,dcol_S)).T, \ fmt = "%2i %8.3f %8.3f") np.savetxt(prefix+"sextr_"+str(fpos)+".txt",sexdata_jfS[:,fpos].T) print # make plots fig,plot_s = plt.subplots(2,1,sharex=True) plt.xlabel('Rho (deg)') plt.xlim(-120,120) plt.xticks(range(-120,120,30)) fig.set_size_inches((8.5,11)) fig.subplots_adjust(left=0.175) plot_s[0].set_title(str(dateobs)+[" Imaging"," Spectral"][isspec]+" Flexure") plot_s[0].set_ylabel('Mean Position (arcsec)') plot_s[0].set_ylim(-0.5,4.) plot_s[1].set_ylabel('Rotation (arcmin ccw)') plot_s[1].set_ylim(-10.,6.) lbl_L = [("%5.0f") % (wav_L[L]) for L in range(Lines)] color_L = 'bgrcmykw' for L in range(Lines): plot_s[0].plot(rho_f,row_fLd[:,L,1]*rbin*pix_scale, \ color=color_L[L],marker='D',markersize=8,label='row '+lbl_L[L]) plot_s[1].plot(rho_f,60.*np.degrees(row_fLd[:,L,0]), color=color_L[L],marker='D',markersize=8,label='row '+lbl_L[L]) collbl = 'col'+lbl_L[0] for L in range(Lines): plot_s[0].plot(rho_f,col_fLd[:,L,1]*cbin*pix_scale, \ color=color_L[L],marker='s',markersize=8,label=collbl) plot_s[1].plot(rho_f,-60.*np.degrees(col_fLd[:,L,0]), \ color=color_L[L],marker='s',markersize=8,label=collbl) collbl = '' plot_s[0].legend(fontsize='medium',loc='upper center') plotfile = str(dateobs)+['_imflex.pdf','_grflex.pdf'][isspec] plt.savefig(plotfile,orientation='portrait') if os.name=='posix': if os.popen('ps -C evince -f').read().count(plotfile)==0: os.system('evince '+plotfile+' &') os.remove("out.txt") os.remove("qred_thrufoc.param") os.remove("sexwt.fits") return
def specpolextract(infilelist, logfile='salt.log'): #set up the files obsdate=os.path.basename(infilelist[0])[8:16] with logging(logfile, debug) as log: #create the observation log obs_dict=obslog(infilelist) # get rid of arcs for i in range(len(infilelist))[::-1]: if (obs_dict['OBJECT'][i].upper().strip()=='ARC'): del infilelist[i] infiles = len(infilelist) # contiguous images of the same object and config are grouped together obs_dict=obslog(infilelist) confno_i,confdatlist = configmap(infilelist) configs = len(confdatlist) objectlist = list(set(obs_dict['OBJECT'])) objno_i = np.array([objectlist.index(obs_dict['OBJECT'][i]) for i in range(infiles)],dtype=int) grp_i = np.zeros((infiles),dtype=int) grp_i[1:] = ((confno_i[1:] != confno_i[:-1]) | (objno_i[1:] != objno_i[:-1])).cumsum() for g in np.unique(grp_i): ilist = np.where(grp_i==g)[0] outfiles = len(ilist) outfilelist = [infilelist[i] for i in ilist] imagenolist = [int(os.path.basename(infilelist[i]).split('.')[0][-4:]) for i in ilist] log.message('\nExtract: '+objectlist[objno_i[ilist[0]]]+' Grating %s Grang %6.2f Artic %6.2f' % \ confdatlist[confno_i[ilist[0]]], with_header=False) log.message(' Images: '+outfiles*'%i ' % tuple(imagenolist), with_header=False) hdu0 = pyfits.open(outfilelist[0]) rows,cols = hdu0['SCI'].data.shape[1:3] cbin,rbin = np.array(obs_dict["CCDSUM"][0].split(" ")).astype(int) # special version for lamp data lampid = obs_dict["LAMPID"][0].strip().upper() if lampid!="NONE": specpollampextract(outfilelist, logfile=logfile) continue # sum spectra to find target, background artifacts, and estimate sky flat and psf functions count = 0 for i in range(outfiles): badbin_orc = pyfits.open(outfilelist[i])['BPM'].data > 0 if count == 0: count_orc = (~badbin_orc).astype(int) image_orc = pyfits.open(outfilelist[i])['SCI'].data*count_orc var_orc = pyfits.open(outfilelist[i])['VAR'].data*count_orc else: count_orc += (~badbin_orc).astype(int) image_orc += pyfits.open(infilelist[i])['SCI'].data*(~badbin_orc).astype(int) var_orc += pyfits.open(outfilelist[i])['VAR'].data*(~badbin_orc).astype(int) count += 1 if count ==0: print 'No valid images' continue image_orc[count_orc>0] /= count_orc[count_orc>0] badbinall_orc = (count_orc==0) | (image_orc==0) # bin is bad in all images badbinone_orc = (count_orc < count) | (image_orc==0) # bin is bad in at least one image var_orc[count_orc>0] /= (count_orc[count_orc>0])**2 wav_orc = pyfits.open(outfilelist[0])['WAV'].data slitid = obs_dict["MASKID"][0] if slitid[0] =="P": slitwidth = float(slitid[2:5])/10. else: slitwidth = float(slitid) hdusum = pyfits.PrimaryHDU(header=hdu0[0].header) hdusum = pyfits.HDUList(hdusum) header=hdu0['SCI'].header.copy() hdusum.append(pyfits.ImageHDU(data=image_orc, header=header, name='SCI')) hdusum.append(pyfits.ImageHDU(data=var_orc, header=header, name='VAR')) hdusum.append(pyfits.ImageHDU(data=badbinall_orc.astype('uint8'), header=header, name='BPM')) hdusum.append(pyfits.ImageHDU(data=wav_orc, header=header, name='WAV')) # hdusum.writeto("groupsum_"+str(g)+".fits",clobber=True) psf_orc,skyflat_orc,badbinnew_orc,isbkgcont_orc,maprow_od,drow_oc = \ specpolsignalmap(hdusum,logfile=logfile) maprow_ocd = maprow_od[:,None,:] + np.zeros((2,cols,4)) maprow_ocd[:,:,[1,2]] -= drow_oc[:,:,None] # edge is straight, target curved isedge_orc = (np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]) | \ (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3]) istarget_orc = (np.arange(rows)[:,None] > maprow_ocd[:,None,:,1]) & \ (np.arange(rows)[:,None] < maprow_ocd[:,None,:,2]) isskycont_orc = (((np.arange(rows)[:,None] < maprow_ocd[:,None,:,0]+rows/16) | \ (np.arange(rows)[:,None] > maprow_ocd[:,None,:,3]-rows/16)) & ~isedge_orc) isbkgcont_orc &= (~badbinall_orc & ~isedge_orc & ~istarget_orc) badbinall_orc |= badbinnew_orc badbinone_orc |= badbinnew_orc # pyfits.PrimaryHDU(var_orc.astype('float32')).writeto('var_orc1.fits',clobber=True) # pyfits.PrimaryHDU(badbinnew_orc.astype('uint8')).writeto('badbinnew_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinall_orc.astype('uint8')).writeto('badbinall_orc.fits',clobber=True) # pyfits.PrimaryHDU(badbinone_orc.astype('uint8')).writeto('badbinone_orc.fits',clobber=True) # scrunch and normalize psf from summed images (using badbinone) for optimized extraction psfnormmin = 0.70 # wavelengths with less than this flux in good bins are marked bad wbin = wav_orc[0,rows/2,cols/2]-wav_orc[0,rows/2,cols/2-1] wbin = float(int(wbin/0.75)) wmin,wmax = wav_orc.min(axis=2).max(),wav_orc.max(axis=2).min() wedgemin = wbin*int(wmin/wbin+0.5) + wbin/2. wedgemax = wbin*int(wmax/wbin-0.5) + wbin/2. wedge_w = np.arange(wedgemin,wedgemax+wbin,wbin) wavs = wedge_w.shape[0] - 1 binedge_orw = np.zeros((2,rows,wavs+1)) psf_orw = np.zeros((2,rows,wavs)) specrow_or = maprow_od[:,1:3].mean(axis=1)[:,None] + np.arange(-rows/4,rows/4) # pyfits.PrimaryHDU(var_orc.astype('float32')).writeto('var_orc2.fits',clobber=True) for o in (0,1): for r in specrow_or[o]: binedge_orw[o,r] = interp1d(wav_orc[o,r],np.arange(cols))(wedge_w) psf_orw[o,r] = scrunch1d(psf_orc[o,r],binedge_orw[o,r]) psf_orw /= psf_orw.sum(axis=1)[:,None,:] # np.savetxt("psfnorm_ow.txt",(psf_orw*okbin_orw).sum(axis=1).T,fmt="%10.4f") # pyfits.PrimaryHDU(psf_orw.astype('float32')).writeto('psf_orw.fits',clobber=True) # pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_orw.fits',clobber=True) # set up optional image-dependent column shift for slitless data colshiftfilename = "colshift.txt" docolshift = os.path.isfile(colshiftfilename) if docolshift: img_I,dcol_I = np.loadtxt(colshiftfilename,dtype=float,unpack=True,usecols=(0,1)) shifts = img_I.shape[0] log.message('Column shift: \n Images '+shifts*'%5i ' % tuple(img_I), with_header=False) log.message(' Bins '+shifts*'%5.2f ' % tuple(dcol_I), with_header=False) # background-subtract and extract spectra psfbadfrac_iow = np.zeros((outfiles,2,wavs)) for i in range(outfiles): hdulist = pyfits.open(outfilelist[i]) sci_orc = hdulist['sci'].data var_orc = hdulist['var'].data badbin_orc = (hdulist['bpm'].data > 0) | badbinnew_orc tnum = os.path.basename(outfilelist[i]).split('.')[0][-3:] # make background continuum image, smoothed over resolution element rblk,cblk = int(1.5*8./rbin), int(slitwidth*8./cbin) target_orc = np.zeros_like(sci_orc) for o in (0,1): bkgcont_rc = blksmooth2d(sci_orc[o],isbkgcont_orc[o],rblk,cblk,0.25,mode="mean") # remove sky continuum: ends of bkg continuum * skyflat skycont_c = (bkgcont_rc.T[isskycont_orc[o].T]/skyflat_orc[o].T[isskycont_orc[o].T]) \ .reshape((cols,-1)).mean(axis=1) skycont_rc = skycont_c*skyflat_orc[o] # remove sky lines: image - bkg cont run through 2d sky averaging obj_data = ((sci_orc[o] - bkgcont_rc)/skyflat_orc)[o] obj_data[(badbin_orc | isedge_orc | istarget_orc)[o]] = np.nan # pyfits.PrimaryHDU(obj_data.astype('float32')).writeto('obj_data.fits',clobber=True) skylines_rc = make_2d_skyspectrum(obj_data,wav_orc[o],np.array([[0,rows],]))*skyflat_orc[o] target_orc[o] = sci_orc[o] - skycont_rc - skylines_rc # pyfits.PrimaryHDU(skylines_rc.astype('float32')).writeto('skylines_rc_'+tnum+'_'+str(o)+'.fits',clobber=True) # pyfits.PrimaryHDU(skycont_rc.astype('float32')).writeto('skycont_rc_'+tnum+'_'+str(o)+'.fits',clobber=True) target_orc *= (~badbin_orc).astype(int) # pyfits.PrimaryHDU(target_orc.astype('float32')).writeto('target_'+tnum+'_orc.fits',clobber=True) # extract spectrum optimally (Horne, PASP 1986) target_orw = np.zeros((2,rows,wavs)); var_orw = np.zeros_like(target_orw) badbin_orw = np.ones((2,rows,wavs),dtype='bool'); wt_orw = np.zeros_like(target_orw) dcol = 0. if docolshift: if int(tnum) in img_I: dcol = dcol_I[np.where(img_I==int(tnum))] # table has observed shift for o in (0,1): for r in specrow_or[o]: target_orw[o,r] = scrunch1d(target_orc[o,r],binedge_orw[o,r]+dcol) var_orw[o,r] = scrunch1d(var_orc[o,r],binedge_orw[o,r]+dcol) badbin_orw[o,r] = scrunch1d(badbin_orc[o,r].astype(float),binedge_orw[o,r]+dcol) > 0.001 badbin_orw |= (var_orw == 0) badbin_orw |= ((psf_orw*(~badbin_orw)).sum(axis=1)[:,None,:] < psfnormmin) # pyfits.PrimaryHDU(var_orw.astype('float32')).writeto('var_'+tnum+'_orw.fits',clobber=True) # pyfits.PrimaryHDU(badbin_orw.astype('uint8')).writeto('badbin_'+tnum+'_orw.fits',clobber=True) # use master psf shifted in row to allow for guide errors pwidth = 2*int(1./psf_orw.max()) ok_w = ((psf_orw*badbin_orw).sum(axis=1) < 0.03/float(pwidth/2)).all(axis=0) crosscor_s = np.zeros(pwidth) for s in range(pwidth): crosscor_s[s] = (psf_orw[:,s:s-pwidth]*target_orw[:,pwidth/2:-pwidth/2]*ok_w).sum() smax = np.argmax(crosscor_s) s_S = np.arange(smax-pwidth/4,smax-pwidth/4+pwidth/2+1) polycof = la.lstsq(np.vstack((s_S**2,s_S,np.ones_like(s_S))).T,crosscor_s[s_S])[0] pshift = -(-0.5*polycof[1]/polycof[0] - pwidth/2) s = int(pshift+pwidth)-pwidth sfrac = pshift-s psfsh_orw = np.zeros_like(psf_orw) outrow = np.arange(max(0,s+1),rows-(1+int(abs(pshift)))+max(0,s+1)) psfsh_orw[:,outrow] = (1.-sfrac)*psf_orw[:,outrow-s] + sfrac*psf_orw[:,outrow-s-1] # pyfits.PrimaryHDU(psfsh_orw.astype('float32')).writeto('psfsh_'+tnum+'_orw.fits',clobber=True) wt_orw[~badbin_orw] = psfsh_orw[~badbin_orw]/var_orw[~badbin_orw] var_ow = (psfsh_orw*wt_orw*(~badbin_orw)).sum(axis=1) badbin_ow = (var_ow == 0) var_ow[~badbin_ow] = 1./var_ow[~badbin_ow] # pyfits.PrimaryHDU(var_ow.astype('float32')).writeto('var_'+tnum+'_ow.fits',clobber=True) # pyfits.PrimaryHDU(target_orw.astype('float32')).writeto('target_'+tnum+'_orw.fits',clobber=True) # pyfits.PrimaryHDU(wt_orw.astype('float32')).writeto('wt_'+tnum+'_orw.fits',clobber=True) sci_ow = (target_orw*wt_orw).sum(axis=1)*var_ow badlim = 0.20 psfbadfrac_iow[i] = (psfsh_orw*badbin_orw.astype(int)).sum(axis=1)/psfsh_orw.sum(axis=1) badbin_ow |= (psfbadfrac_iow[i] > badlim) # cdebug = 39 # np.savetxt("xtrct"+str(cdebug)+"_"+tnum+".txt",np.vstack((psf_orw[:,:,cdebug],var_orw[:,:,cdebug], \ # wt_orw[:,:,cdebug],target_orw[:,:,cdebug])).reshape((4,2,-1)).transpose(1,0,2).reshape((8,-1)).T,fmt="%12.5e") # write O,E spectrum, prefix "s". VAR, BPM for each spectrum. y dim is virtual (length 1) # for consistency with other modes hduout = pyfits.PrimaryHDU(header=hdulist[0].header) hduout = pyfits.HDUList(hduout) header=hdulist['SCI'].header.copy() header.update('VAREXT',2) header.update('BPMEXT',3) header.update('CRVAL1',wedge_w[0]+wbin/2.) header.update('CRVAL2',0) header.update('CDELT1',wbin) header.update('CTYPE1','Angstroms') hduout.append(pyfits.ImageHDU(data=sci_ow.reshape((2,1,wavs)), header=header, name='SCI')) header.update('SCIEXT',1,'Extension for Science Frame',before='VAREXT') hduout.append(pyfits.ImageHDU(data=var_ow.reshape((2,1,wavs)), header=header, name='VAR')) hduout.append(pyfits.ImageHDU(data=badbin_ow.astype("uint8").reshape((2,1,wavs)), header=header, name='BPM')) hduout.writeto('e'+outfilelist[i],clobber=True,output_verify='warn') log.message('Output file '+'e'+outfilelist[i] , with_header=False) # np.savetxt("psfbadfrac_iow.txt",psfbadfrac_iow.reshape((-1,wavs)).T,fmt="%8.5f") return