def test_template(data_type,data_spectrum,teff,logg,feh,start_lambda,end_lambda,shift): ### Import a template spectrum - test template_spectrum = "template_" + str(teff) + "_" + str(logg) + "_" + str(feh) + ".dat" if data_type=="flux": template_spectrum = functions.read_ascii(model_path_flux + template_spectrum) if data_type=="norm": template_spectrum = functions.read_ascii(model_path_norm + template_spectrum) template_spectrum = functions.read_table(template_spectrum) template_spectrum = transpose(array(template_spectrum)) if data_type=="flux": template_spectrum = spectype_functions.normalise(template_spectrum,flux_normalise_w1,flux_normalise_w2) ### Chop both spectra data_region = spectype_numerical_functions.chop_spectrum(data_spectrum,start_lambda,end_lambda) template_region = spectype_numerical_functions.chop_spectrum(template_spectrum,start_lambda,end_lambda) ### Conform template spectrum to data spectrum -> same wavelength scale template_region = spectype_numerical_functions.conform_spectrum(data_region,template_region) ### Find shift data_region_shifted,template_region_shifted = spectype_functions.shift_spectrum(data_region,template_region,shift) chisq = spectype_numerical_functions.chisq(data_region_shifted,template_region_shifted) # plt.clf() # plt.plot(data_region_shifted[0],data_region_shifted[1]) # plt.plot(template_region_shifted[0],template_region_shifted[1]) # plt.title(data_type + " " + str(teff) + " " + str(logg) + " " + str(feh) +" " + str(chisq)) # plt.show() return chisq
def find_shift(data_type,data_spectrum,teff,logg,feh,start_lambda,end_lambda): ### Import the template spectrum template_spectrum = "template_" + str(teff) + "_" + str(logg) + "_" + str(feh) + ".dat" if data_type=="flux": template_spectrum = functions.read_ascii(model_path_flux + template_spectrum) if data_type=="norm": template_spectrum = functions.read_ascii(model_path_norm + template_spectrum) template_spectrum = functions.read_table(template_spectrum) template_spectrum = transpose(array(template_spectrum)) if data_type=="flux": template_spectrum =spectype_functions.normalise(template_spectrum,flux_normalise_w1,flux_normalise_w2) ### Chop both spectra data_region = spectype_numerical_functions.chop_spectrum(data_spectrum,start_lambda,end_lambda) template_region = spectype_numerical_functions.chop_spectrum(template_spectrum,start_lambda,end_lambda) ### Conform template spectrum to data spectrum -> same wavelength scale template_region = spectype_numerical_functions.conform_spectrum(data_region,template_region) ### Find shift chisq_shift = [] shift_limit = 20 shift = -1*shift_limit while shift <= shift_limit: data_region_shifted,template_region_shifted = spectype_functions.shift_spectrum(data_region,template_region,shift) chisq_shift.append(spectype_numerical_functions.chisq(data_region_shifted,template_region_shifted)) shift = shift + 1 chisq_min = spectype_functions.find_min(chisq_shift) best_shift = chisq_min - shift_limit return best_shift
def query_hscand(query_entry): ### Write the mysql query csh script command = "mysql --defaults-file=/home/gzhou/hscand.cfg HSCAND -e \"" + query_entry + "\" > /home/gzhou/query_result.txt" mysql_query = open("mysql_query.csh","w") mysql_query.write("#! /bin/csh \n") mysql_query.write(command + "\n") mysql_query.close() ### Copy the required scripts to hatsouth@princeton os.system("chmod a+x mysql_query.csh") print "copying files to hatsouth@princeton" os.system("scp hscand.cfg [email protected]:/home/gzhou/") os.system("scp mysql_query.csh [email protected]:/home/gzhou/") ### Execute the program and copy the results over print "Executing .csh files on princeton via ssh" os.system("ssh [email protected] '/home/gzhou/mysql_query.csh'") os.system("scp [email protected]:/home/gzhou/query_result.txt .") ### Read query_result.txt in as a list query_result = functions.read_ascii("query_result.txt") query_result = functions.read_table(query_result) os.system("rm query_result.txt") os.system("rm mysql_query.csh") return query_result
def main(file_path,file_name): file_path_reduced = file_path+"reduced/" file_path_temp = file_path+"temp/" coo = functions.read_table(functions.read_ascii(file_path_temp+"master_coo")) spatial = loadtxt(file_path_reduced+"spatial_"+file_name+".dat") coord_fit = array(fit_2dgauss(coo,spatial)) savetxt(file_path+"reduced/coords_"+file_name+".dat",coord_fit,fmt="%.10f")
def plot_isochrones(program_dir,style,lwidth): isochrones = functions.read_ascii(program_dir + "isochrone.dat") isochrones = functions.read_table(isochrones) isochrones = isochrones[:len(isochrones)-1] isochrones = transpose(isochrones) teff = 10**array(isochrones[4]) logg = array(isochrones[5]) plt.plot(teff,logg,style,linewidth=lwidth)
def run_fxcor(input_file,input_rv,lines): iraf.unlearn(iraf.keywpars) iraf.filtpars.setParam("f_type","square",check=1,exact=1) iraf.filtpars.setParam("cuton",50,check=1,exact=1) iraf.filtpars.setParam("cutoff",2000,check=1,exact=1) os.system("rm fxcor_shift*") iraf.fxcor( objects = input_file, \ templates = input_rv, \ apertures = "*", \ cursor = "",\ continuum = "both",\ filter = "both",\ rebin = "smallest",\ pixcorr = 0,\ osample = lines,\ rsample = lines,\ apodize = 0.2,\ function = "gaussian",\ width = 15,\ height= 0.,\ peak = 0,\ minwidth = 15,\ maxwidth = 15,\ weights = 1.,\ background = "INDEF",\ window = "INDEF",\ wincenter = "INDEF",\ output = "fxcor_shift",\ verbose = "long",\ imupdate = 0,\ graphics = "stdgraph",\ interactive = 0,\ autowrite = 1,\ ccftype = "image",\ observatory = "sso",\ continpars = "",\ filtpars = "",\ keywpars = "") vel_shift = functions.read_ascii("fxcor_shift.txt") vel_shift = functions.read_table(vel_shift) vel_shift = str(vel_shift[0][11]) if vel_shift == "INDEF": vel_shift = 0 print "shifting by ",vel_shift,"km/s" return vel_shift
def loop_input_spectrum(input_wave,input_flux,folder,teff_space,logg_space,feh_space,w1,w2,perform_normalise): data = [] for teff in teff_space: for logg in logg_space: for feh in feh_space: template_spectrum = "template_" + str(teff) + "_" + str(logg) + "_" + str(feh)+".dat" #print folder + template_spectrum template_spectrum = functions.read_ascii(folder+template_spectrum) template_spectrum = functions.read_table(template_spectrum) template_spectrum = transpose(array(template_spectrum)) if folder == model_path_flux: template_spectrum = spectype_functions.normalise(template_spectrum,flux_normalise_w1,flux_normalise_w2) i1 = w1 - min(input_wave) i2 = w2 - min(input_wave) input_wave_cropped = input_wave[i1:i2] input_flux_cropped = input_flux[i1:i2] template_spectrum = spectype_numerical_functions.chop_spectrum(template_spectrum,w1-10,w2+10) template_interp = interpolate.splrep(template_spectrum[0],template_spectrum[1],s=0) template_flux = interpolate.splev(input_wave_cropped,template_interp,der=0) sigma = 3.0 if perform_normalise: diff_flux = input_flux_cropped/median(input_flux_cropped) - template_flux/median(template_flux) else: diff_flux = input_flux_cropped - template_flux diff_flux = clip(diff_flux,median(diff_flux) - sigma*std(diff_flux),median(diff_flux)+sigma*std(diff_flux)) rms = sqrt(sum(diff_flux**2) /float(len(input_wave_cropped))) # plt.clf() # plt.plot(input_wave_cropped,input_flux_cropped/median(input_flux_cropped)) # plt.plot(input_wave_cropped,template_flux/median(template_flux)) # plt.show() # #sys.exit() #print rms data.append(rms) return data
print RV_list[i] RV_list = temp_list if len(RV_list) > 0: ### Create string list RV_Standards to feed into iraf RV_Standards = "" for i in range(len(RV_list)): RV_Standards = RV_Standards + RV_list[i] + "," ### Append VHELIO to all RV standard stars - this requires ### Ascii file RV_standard.dat to be present in program/ ### Read in RV_standard.dat RV_standard_dat = functions.read_ascii(program_dir + "RV_standard.dat") RV_standard_dat = functions.read_table(RV_standard_dat) ### Hedit VHELIO by matching object_name to RV_standard_dat for i in range(len(RV_list)): ### Find star name file_location = file_path_reduced + RV_list[i] hdulist = pyfits.open(file_location) object_name = hdulist[0].header["OBJECT"] hdulist.close() ### Find corresponding RV information in database VHELIO = retrieve_RV(object_name, RV_standard_dat) append_VHELIO(file_location, str(VHELIO)) ############################ ### Apply header changes ###
iraf.continpars.setParam("low_reject",2.0,check=1,exact=1) iraf.continpars.setParam("high_reject",2.0,check=1,exact=1) ### Then apply fxcor to the stellar regions for RV measurement os.system("rm fxcor_stellar*") #region = "*" #region = "a5700-6100" region = "a5250-6815" normalise(file_name) run_fxcor("temp.fits","mdwarf_template_norm.fits",region,"fxcor_stellar",0,False) os.system("cat fxcor_stellar.txt") ### Now calculate RV data = functions.read_ascii("fxcor_stellar.txt") data = functions.read_table(data) rv = [] rverr = [] for i in data: if functions.is_number(i[3]): hjd = i[3]+50000 if functions.is_number(i[12]): if abs(i[12]) < 500 and abs(i[13]) < 500: rv.append(i[12]) rverr.append(i[13]) RV = median(rv) RV_err = median(rverr)
def plot_spectrum(rms_data,input_spectrum): rms_data = functions.read_ascii(rms_data) rms_data = functions.read_table(rms_data) rms_data = transpose(rms_data) ### Find min for i in range(len(rms_data[0])): if rms_data[3][i] == min(rms_data[3]): teff_min = rms_data[0][i] logg_min = rms_data[1][i] feh_min = rms_data[2][i] break print teff_min,logg_min,feh_min teff_list = [] logg_list = [] rms_list = [] for i in range(len(rms_data[0])): if rms_data[2][i] == feh_min: teff_list.append(rms_data[0][i]) logg_list.append(rms_data[1][i]) rms_list.append(rms_data[3][i]) plt.subplot(211) cm = matplotlib.cm.get_cmap('jet') sc = plt.scatter(teff_list, logg_list, c=rms_list, vmin=min(rms_list), vmax=max(rms_list), s=70, cmap=cm,edgecolor="w") cbar = plt.colorbar(sc) cbar.ax.set_ylabel("RMS") plt.scatter(teff_min,logg_min,color="r",s=70,marker="+") spectype_functions.plot_isochrones(program_dir,"r-",1) plt.xlim(max(teff_list)+250,min(teff_list)-250) plt.ylim(max(logg_list)+.25,min(logg_list)-0.25) plt.xlabel("Teff (K)") plt.ylabel("Logg") plt.subplot(212) data_spectrum = functions.read_ascii(input_spectrum) data_spectrum = functions.read_table(data_spectrum) data_spectrum = transpose(array(data_spectrum)) data_spectrum = spectype_functions.normalise(data_spectrum,flux_normalise_w1,flux_normalise_w2) template_spectrum = "template_" + str(int(teff_min)) + "_" + str(logg_min) + "_" + str(feh_min)+".dat" template_spectrum = functions.read_ascii(model_path_flux+template_spectrum) template_spectrum = functions.read_table(template_spectrum) template_spectrum = transpose(array(template_spectrum)) template_spectrum = spectype_functions.normalise(template_spectrum,flux_normalise_w1,flux_normalise_w2) plt.plot(data_spectrum[0],data_spectrum[1],"b-") plt.plot(template_spectrum[0],template_spectrum[1],"g-") plt.xlim(3700,5800) plt.xlabel("Wavelength (A)") plt.ylabel("Normalised flux") plt.show()
def find_shift(input1,input2,i1,i2,shift_range): if abs(i2 - i1) < 300: i1 = i1 - 150 i2 = i2 + 150 if i1 < 0: i1 = 0 i2 = 300 if i2 > len(input1): i2 = len(input1) i1 = len(input1) - 300 ### Use xcorr currdir = os.getcwd() os.chdir(file_path_reduced) os.system("rm "+file_path_reduced+"shift_spec*") input1_cropped = input1[i1:i2]/median(input1[i1:i2]) input2_cropped = input2[i1:i2]/median(input1[i1:i2]) wave_axis = arange(1,len(input1_cropped)+1) shift_spec1 = open(file_path_reduced+"shift_spec1.txt","w") functions.write_table(transpose([wave_axis,input1_cropped]),shift_spec1) shift_spec1.close() shift_spec2 = open(file_path_reduced+"shift_spec2.txt","w") functions.write_table(transpose([wave_axis,input2_cropped]),shift_spec2) shift_spec2.close() iraf.rspectext( input = file_path_reduced+"shift_spec1.txt",\ output = file_path_reduced+"shift_spec1.fits",\ title = "shift_spec1",\ flux = 0,\ dtype = "interp",\ crval1 = "",\ cdelt1 = "",\ fd1 = "",\ fd2 = "") iraf.rspectext( input = file_path_reduced+"shift_spec2.txt",\ output = file_path_reduced+"shift_spec2.fits",\ title = "shift_spec2",\ flux = 0,\ dtype = "interp",\ crval1 = "",\ cdelt1 = "",\ fd1 = "",\ fd2 = "") time.sleep(0.5) ### Find shift os.system("rm apshift*") ### Makesure keywpars is set at default iraf.unlearn(iraf.keywpars) cuton = len(input1_cropped)/25. cutoff = len(input1_cropped)/2.5 iraf.filtpars.setParam("f_type","welch",check=1,exact=1) iraf.filtpars.setParam("cuton",cuton,check=1,exact=1) iraf.filtpars.setParam("cutoff",cutoff,check=1,exact=1) run_fxcor(file_path_reduced+"shift_spec1.fits",file_path_reduced+"shift_spec2.fits","*","apshift",0,10,"gaussian","INDEF",0) vel_shift = functions.read_ascii("apshift.txt") vel_shift = functions.read_table(vel_shift) vel_shift = vel_shift[0][6] if vel_shift == "INDEF": vel_shift = 0.0 if abs(vel_shift) > shift_range: vel_shift = 0.0 print "best pixel shift of ",vel_shift os.system("rm apshift*") os.system("rm "+file_path_reduced+"shift_spec*") os.chdir(currdir) #if i1 < shift_range: # i1 = shift_range #if i2 > len(input1)-shift_range: # i2 = len(input1)-shift_range # shift_rms = [] # shift_list = [] # for shift in range(-1*shift_range,shift_range+1): # input1_cropped = input1[i1+shift:i2+shift] # input2_cropped = input2[i1:i2] # diff = input1_cropped/median(input1_cropped) * input2_cropped/median(input2_cropped) # rms = sum(diff) # #rms = sqrt(sum(diff**2) /float(len(diff))) # shift_rms.append(rms) # shift_list.append(shift) # for i in range(len(shift_rms)): # if shift_rms[i] == max(shift_rms): # break # print "Applying a shift of ",shift_list[i] # plt.clf() # plt.plot(input1[i1+shift_list[i]:i2+shift_list[i]]/median(input1[i1+shift_list[i]:i2+shift_list[i]]),"b-") # plt.plot(input1[i1:i2]/median(input1[i1:i2]),"r-") # plt.plot(input2[i1:i2]/median(input2[i1:i2]),"g-") # plt.show() # return shift_list[i] return int(round(vel_shift,0))
hsmso_connect = functions.read_config_file("HSMSO_CONNECT") hscand_connect = functions.read_config_file("HSCAND_CONNECT") default_teff = float(functions.read_config_file("TEFF_ESTIMATE")) default_logg = float(functions.read_config_file("LOGG_ESTIMATE")) teff_ini,logg_ini = functions.estimate_teff_logg(object_name,hsmso_connect,hscand_connect,default_teff,default_logg) feh_ini = 0.0 print "Initial estimate of teff, logg: ",str(teff_ini),str(logg_ini) ### Change directory to reduced/ program_dir = os.getcwd() + "/" #Save the current working directory os.chdir(file_path_reduced) #Change to ../reduced/ dir ### Load in spectra flux_spectrum = functions.read_ascii("fluxcal_" + file_name + ".dat") flux_spectrum = functions.read_table(flux_spectrum) flux_spectrum = transpose(array(flux_spectrum)) flux_spectrum = spectype_functions.normalise(flux_spectrum,flux_normalise_w1,flux_normalise_w2) norm_spectrum = functions.read_ascii("norm_" + file_name + ".dat") norm_spectrum = functions.read_table(norm_spectrum) norm_spectrum = transpose(array(norm_spectrum)) print "Using specific regions for spectral typing" ### Check the temp and define which logg sensitive regions to use #if teff_ini > 4750 and teff_ini < 5750: if teff_ini > 4750 and teff_ini < 6250: #logg_regions = [[5140,5235]] logg_regions = [[5100,5400]] if teff_ini <= 4750 and teff_ini > 4250: logg_regions = [[5100,5400]]
### Set program dir and change working directory program_dir = os.getcwd() + "/" #Save the current working directory os.chdir(file_path_reduced) #Change to ../temp/ dir ### Find info from the fits header hdulist = pyfits.open(file_path_reduced+"spec_" + file_name) object_name = hdulist[0].header["OBJECT"] dateobs = hdulist[0].header["DATE-OBS"] mjd = hdulist[0].header["MJD-OBS"] exptime = hdulist[0].header["EXPTIME"] comment = hdulist[0].header["NOTES"] hdulist.close() ### Read info from text files in reduced/ spectype = functions.read_ascii("spectype.txt") spectype = functions.read_table(spectype) for entry in spectype: if entry[1] == object_name and entry[0] == file_name: teff = entry[2] logg = entry[4] feh = entry[6] image_quality = functions.read_ascii("image_quality.dat") image_quality = functions.read_table(image_quality) sn = 0. entry_found = False for entry in image_quality: if entry[0] == file_name and entry[1] == object_name: sn = entry[5]
def detect_stars(input_image,se_path,no_stars): image_data = pyfits.getdata(input_image) oned = [] for i in range(len(image_data)): for j in range(len(image_data)): oned.append(image_data[i,j]) med = median(oned) run_daofind(input_image,"master_coo",1) os.system("rm coords.cat") SEcommand = se_path+" "+input_image+" -c default.sex" SEcommand = SEcommand+" -BACK_TYPE MANUAL -BACK_VALUE "+str(med) os.system(SEcommand) os.system("cat coords.cat") SE_coo = functions.read_ascii("coords.cat") SE_coo = functions.read_table(SE_coo) temp = [] for i in SE_coo: if i[0] < 36.: temp.append(i) SE_coo = temp phot_coo = functions.read_ascii("master_coo") phot_coo = functions.read_table(phot_coo) temp = [] for i in phot_coo: if i[0] < 36.: temp.append(i) phot_coo = temp ### Check if the objects in phot_coo exists also in SE_coo confirmed_objects = [] for phot_obj in phot_coo: phot_obj_x = phot_obj[0] phot_obj_y = phot_obj[1] for SE_obj in SE_coo: SE_obj_x = SE_obj[0] SE_obj_y = SE_obj[1] SE_obj_fwhm = SE_obj[4] SE_obj_fwhm = 6 # if SE_obj_fwhm < 5. or SE_obj_fwhm > 10.0: # SE_obj_fwhm = 5 if abs(phot_obj_x-SE_obj_x)<SE_obj_fwhm and abs(phot_obj_y-SE_obj_y)<SE_obj_fwhm: confirmed_objects.append(phot_obj) break if len(confirmed_objects) == 0 and len(SE_coo) > 0: print "NO matching objects, using SE coordinates" confirmed_objects = [] for SE_obj in SE_coo: confirmed_objects.append([SE_obj[0],SE_obj[1],"INDEF",0.5,0.5,0.5,SE_obj[0]]) elif len(confirmed_objects) == 0 and len(phot_coo) > 0: print "NO matching objects, using iraf.phot coordinates" confirmed_objects = phot_coo elif len(confirmed_objects)==0 and len(phot_coo)==0 and len(SE_coo)==0: print "NO objects detected!!!" sys.exit() ### Order by brightness flux_list = [] for i in confirmed_objects: aperture = circle(i[1]-1,i[0]-1,2.0,image_data) flux = aperture*image_data - aperture*med flux = flux.sum() flux_list.append(flux) flux_list_sorted = sorted(flux_list,reverse=True) print "flux",flux_list_sorted temp = [] for i in range(len(flux_list_sorted)): j = flux_list.index(flux_list_sorted[i]) temp.append(confirmed_objects[j]) confirmed_objects = temp ### remove unwanted objects if no_stars > 0: confirmed_objects = confirmed_objects[:no_stars] master_out = open("master_coo","w") functions.write_table(confirmed_objects,master_out) master_out.close()
######################## ### Start of program ### ######################## file_path = sys.argv[1] file_path_temp = file_path + "temp/" file_path_reduced = file_path + "reduced/" file_name = sys.argv[2] print "This script uses iraf.fxcor to generate a CCF for " +file_name + " using synthetic templates" program_dir = os.getcwd() + "/" #Save the current working directory ### Load fxcor RV measurements fxcor_stellar = functions.read_ascii(file_path_reduced + "fxcor_stellar.txt") fxcor_stellar = functions.read_table(fxcor_stellar) ### Load grating / camera settings grating = functions.read_config_file("GRATING") dichroic = functions.read_config_file("RT560") region_w1 = functions.read_param_file(grating+"_"+dichroic+"_w1") region_w2 = functions.read_param_file(grating+"_"+dichroic+"_w2") ### Load location of library synthetic_library = functions.read_param_file("RV_SPECTRAL_LIBRARY") ### Load RV fxcor region stellar_region = functions.read_param_file("STELLAR_REGION") ### Determine best aperture
object_mjd = hdulist[0].header['MJD-OBS'] hdulist.close() camera = functions.read_config_file("CAMERA") grating = functions.read_config_file("GRATING") dichroic = functions.read_config_file("DICHROIC") combine_aps = functions.read_config_file("COMBINE_APERTURES") task = functions.read_config_file("TASK") no_apertures = eval(functions.read_config_file("NO_APERTURES")) print "This script applies NeAr arc image to calibrate the object spectrum " +file_name ### Get slice numbers and arc images to use arc_list = functions.read_ascii(file_path_temp + "arcs_to_use.txt") coo = functions.read_ascii(file_path_temp+"master_coo") coo = functions.read_table(coo) ### Calculate the fractional weight of each arc arc_weight = [] for arc_name in arc_list: hdulist = pyfits.open(file_path + arc_name) arc_mjd = hdulist[0].header['MJD-OBS'] hdulist.close() arc_weight.append(abs(arc_mjd - object_mjd)) arc_weight = array(arc_weight) arc_weight = arc_weight / sum(arc_weight) ### Define linelist to use linelist = grating + "_linelist.dat"
################################### ### Load fxcor output txt files ### ################################### ### Set file_path file_path = sys.argv[1] file_path_temp = file_path + "temp/" file_path_reduced = file_path + "reduced/" file_name = sys.argv[2] program_dir = os.getcwd() + "/" #Save the current working directory ### Load fxcor RV measurements fxcor_stellar = functions.read_ascii(file_path_reduced + "fxcor_stellar.txt") fxcor_stellar = functions.read_table(fxcor_stellar) ### Load grating / camera settings grating = functions.read_config_file("GRATING") dichroic = functions.read_config_file("DICHROIC") region_w1 = functions.read_param_file(grating+"_"+dichroic+"_w1") region_w2 = functions.read_param_file(grating+"_"+dichroic+"_w2") ######################## ### Start of program ### ######################## ### Find weights according flux of each aperture aperture_weights = find_flux_weights(file_name) print "weights for each aperture"
cand_txt.close() else: print "ERROR entries not found for " + object_name if len(RV_points) > 1: print "Calculating orbital solution" ### Extract candidate phase information from HSCAND if object_name[:4]=="HATS": print "Using HSCAND for candidate parameters" query_entry = "select HATSE,HATSP,HATSq from HATS where HATSname=\'%s\' " % object_name_query cand_params = mysql_query.query_hscand(query_entry)[1] else: ### Try to find it in "candidatex.txt" candidates_txt = functions.read_ascii(plots_folder + "candidates.txt") candidates_txt = functions.read_table(candidates_txt) object_found = False for entry in candidates_txt: print entry[0] if entry[0] == object_name_query: print "Using candidates.txt for candidate parameters" object_found = True cand_params = [entry[5],entry[6],entry[7]] #break if not object_found: print "Using default candidate parameters" RV_points = transpose(RV_points) HJD_points = RV_points[0] cand_params = [min(HJD_points),max(HJD_points)-min(HJD_points),0.1]
no_trials = 0 npoints = 20 while (not good_correction) and no_trials < 5: os.system("rm apshift*") ### Makesure keywpars is set at default iraf.unlearn(iraf.keywpars) iraf.filtpars.setParam("f_type","square",check=1,exact=1) iraf.filtpars.setParam("cuton",50,check=1,exact=1) iraf.filtpars.setParam("cutoff",10000,check=1,exact=1) run_fxcor("norm_" + im_slice + "_" + file_name,"telluric.fits",telluric_region,"apshift",0,npoints,"gaussian") vel_shift = functions.read_ascii("apshift.txt") vel_shift = functions.read_table(vel_shift) vel_shift = str(vel_shift[0][11]) if not vel_shift == "INDEF": good_correction = True else: print "Fit did not converge, trying again with ",npoints,"n_points" npoints = npoints + 10 no_trials = no_trials + 1 ### IF it still doesn't work, use a the centre1d function if vel_shift == "INDEF": os.system("rm apshift*") ### Makesure keywpars is set at default iraf.unlearn(iraf.keywpars)
import functions import os import sys from numpy import * import matplotlib.pyplot as plt def rms(input_list): input_list = array(input_list) rms = sqrt(sum(input_list**2) / len(input_list)) return rms RV = functions.read_ascii("aperture_RV_3aps.dat") RV = functions.read_table(RV) for i in RV: ap1 = i[2] ap1_RV = i[3] ap2 = i[4] ap2_RV = i[5] plt.plot(array([ap1,ap2]) - min([ap1,ap2]),[ap1_RV,ap2_RV]) plt.show()
### Set program dir and change working directory program_dir = os.getcwd() + "/" #Save the current working directory os.chdir(file_path_reduced) #Change to ../temp/ dir ### Find info from the fits header hdulist = pyfits.open(file_path_reduced+"normspec_" + file_name) object_name = hdulist[0].header["OBJECT"] dateobs = hdulist[0].header["DATE-OBS"] mjd = hdulist[0].header["MJD-OBS"] exptime = hdulist[0].header["EXPTIME"] comment = hdulist[0].header["NOTES"] hdulist.close() ### Read info from text files in reduced/ RV_dat = functions.read_ascii("RV.dat") RV_dat = functions.read_table(RV_dat) for entry in RV_dat: if entry[0] == object_name and entry[1] == file_name: if functions.is_number(entry[2]): hjd = entry[2] + 50000 RV = entry[3] RV_err = entry[4] ccf_height = entry[5] ccf_log = functions.read_ascii("ccf_log.txt") ccf_log = functions.read_table(ccf_log) ccf_fwhm = 0 bis = 0 bis_err = 0
program_dir = os.getcwd() + "/" #Save the current working directory os.chdir(file_path_reduced) #Change to ../temp/ dir os.system("mkdir ccf_pdfs") os.system("rm ccf_pdfs/*" + file_name + "*") ################ ### Load ccf ### ################ plt.clf() hdulist = pyfits.open(file_path_reduced + "normspec_"+file_name) candidate = hdulist[0].header["OBJECT"] hdulist.close() ccf = functions.read_ascii("ccf_" + file_name + ".txt") ccf = functions.read_table(ccf) ### Find max max_ccf = max(transpose(ccf)[1]) max_pos = 0 for i in range(len(ccf)): if ccf[i][1] == max_ccf: max_pos = i break #ccf = ccf[i-200:i+200] ccf = ccf[i-40:i+40] ccf = transpose(ccf) ### Define plotting axes
free_param_vals.append(temp_param_vals[i]) free_param_range.append(temp_param_range[i]) free_param_func.append("b") print "FREE PARAMS" for i in range(len(free_param_names)): print free_param_names[i], free_param_vals[i], free_param_range[i] print "FIXED PARAMS" for i in range(len(fixed_param_names)): print fixed_param_names[i], fixed_param_vals[i] x0 = zeros(len(free_param_names)) free_param_vals = [functions.read_ascii("best_param_mcmc")[1]] free_param_vals = array(functions.read_table(free_param_vals))[0] print free_param_vals phase, flux, err, model = [], [], [], [] for n in range(len(lclist)): lc = lclist[n] lc = loadtxt(lc) phase_n, flux_n, err_n, model_n = fitting_functions.lc_chisq( free_param_vals, free_param_names, fixed_param_names, fixed_param_vals, lc, False, True, cadence[n]) phase += list(phase_n) flux += list(flux_n) err += list(err_n)
no_apertures = int(functions.read_config_file("NO_APERTURES")) no_stars = int(functions.read_config_file("NO_STARS")) se_path = functions.read_param_file("SE_PATH") program_dir = os.getcwd()+"/" ### Set file_path file_path = sys.argv[1] file_path_temp = file_path + "temp/" file_path_reduced = file_path + "reduced/" file_name = sys.argv[2] interactive = functions.read_config_file("INTERACT") image_slices_list = functions.read_ascii(file_path_temp + "slice_" + file_name+".txt") image_slices_list = functions.read_table(image_slices_list) image_slices_list = image_slices_list[1:] hdulist = pyfits.open(file_path + file_name) object_name = hdulist[0].header['OBJECT'] hdulist.close() os.chdir(file_path_temp) ######################################################## ### Reconstruct array by reading in each image slice ### ######################################################## spatial_image = [] for image_slice in image_slices_list: row_image = pyfits.getdata(image_slice[0])
value = redden,\ R = 3.1,\ type = "E(B-V)",\ apertures = "*",\ override = 1,\ uncorrect = 0,\ mode = "al") ### Create .dat file out of fits file redden_name os.system("rm " + redden_name + ".dat") iraf.wspectext(redden_name + "[*,1,1]", redden_name + ".dat") spectrum = functions.read_ascii(redden_name + ".dat") spectrum = functions.read_table(spectrum) temp = [] for i in spectrum: if len(i) == 2: if functions.is_number(i[0]): temp.append(i) spectrum = temp spectrum = spectrum[1:len(spectrum)-2] output_spectrum = open(redden_name + ".dat","w") functions.write_table(spectrum,output_spectrum) output_spectrum.close() os.system("mv " + redden_name + ".dat deredden") os.system("mv " + redden_name + " deredden")
def plot_spectrum(rms_data,input_spectrum): print "Plotting ",input_spectrum rms_data = functions.read_ascii(rms_data) rms_data = functions.read_table(rms_data) rms_data = transpose(rms_data) ### Find min for i in range(len(rms_data[0])): if rms_data[3][i] == min(rms_data[3]): teff_min = rms_data[0][i] logg_min = rms_data[1][i] feh_min = rms_data[2][i] break print teff_min,logg_min,feh_min,min(rms_data[3]) teff_list = [] logg_list = [] rms_list = [] for i in range(len(rms_data[0])): if rms_data[2][i] == feh_min: teff_list.append(rms_data[0][i]) logg_list.append(rms_data[1][i]) rms_list.append(rms_data[3][i]) ### Create 2D space teff_space = arange(min(teff_list),max(teff_list)+250,250) logg_space = arange(min(logg_list),max(logg_list)+0.5,0.5) rms_space = zeros([len(teff_space),len(logg_space)]) for i in range(len(rms_list)): x_index = int((teff_list[i] - min(teff_list)) / 250.) y_index = int((logg_list[i] - min(logg_list)) / 0.5) rms_space[x_index,y_index] = rms_list[i] ### Crop 2D space to perform gaussian fit for min teff_space_cropped,logg_space_cropped,rms_space_cropped=spectype_functions.chop_array(teff_space,logg_space,transpose(rms_space),teff_min,logg_min,250,0.5) rms_space_cropped = -1*(rms_space_cropped - rms_space_cropped.max()) print rms_space_cropped try: gauss_fit = spectype_functions.fitgaussian(rms_space_cropped) teff_min_fit = min(teff_space_cropped) + gauss_fit[2] * 250 logg_min_fit = min(logg_space_cropped) + gauss_fit[1] * 0.5 except TypeError: print "Bad gaussian fit, using abs min" teff_min_fit = teff_min logg_min_fit = logg_min if teff_min_fit < 3500: teff_min_fit = 3500 if teff_min_fit > 9000: teff_min_fit = 9000 if logg_min_fit < 0.0: logg_min_fit = 0.0 if logg_min_fit > 5.0: logg_min_fit = 5.0 teff_min = int(spectype_functions.round_value(teff_min_fit,250.)) logg_min = spectype_functions.round_value(logg_min_fit,0.5) print teff_min,logg_min ### Plot teff_logg space plt.figure(figsize=(7,5)) plt.subplot(211) plt.title(object_name+" "+file_name+" "+str(int(round(teff_min_fit,0)))+" "+str(round(logg_min_fit,1))+" "+str(feh_min)+" \n RMS="+str(round(min(rms_data[3]),4))) v_min = min(rms_list) v_max = min(rms_list)+((max(rms_list)-min(rms_list))/3.) #v_max = max(rms_list) rms_space = clip(rms_space,v_min,v_max) cm = matplotlib.cm.get_cmap('jet') sc = plt.contourf(teff_space,logg_space,transpose(rms_space),100,cmap=cm) #sc = plt.scatter(teff_list, logg_list, c=rms_list, vmin=min(rms_list), vmax=(max(rms_list)-min(rms_list))/3+min(rms_list), s=150, cmap=cm,edgecolor="w") cbar = plt.colorbar(sc) cbar.ax.set_ylabel("RMS") plt.scatter(teff_min_fit,logg_min_fit,color="r",s=70,marker="+") spectype_functions.plot_isochrones(program_dir,"r-",1) plt.xlim(max(teff_list),min(teff_list)) plt.ylim(max(logg_list),min(logg_list)) #plt.xlim(max(teff_list)+250,min(teff_list)-250) #plt.ylim(max(logg_list)+.25,min(logg_list)-0.25) plt.xlabel("Teff (K)") plt.ylabel("Logg") ### Plot spectrum plt.subplot(212) data_spectrum = functions.read_ascii(input_spectrum) data_spectrum = functions.read_table(data_spectrum) data_spectrum = transpose(array(data_spectrum)) data_spectrum = spectype_functions.normalise(data_spectrum,flux_normalise_w1,flux_normalise_w2) template_spectrum = "template_" + str(int(teff_min)) + "_" + str(logg_min) + "_" + str(feh_min)+".dat" template_spectrum = functions.read_ascii(model_path_flux+template_spectrum) template_spectrum = functions.read_table(template_spectrum) template_spectrum = transpose(array(template_spectrum)) template_spectrum = spectype_functions.normalise(template_spectrum,flux_normalise_w1,flux_normalise_w2) data_wave = data_spectrum[0] data_flux = data_spectrum[1] template_wave = template_spectrum[0] template_flux = template_spectrum[1] c = 3.0 * 10**5 data_wave = data_wave / ((vel_shift / c) + 1) data_interp = interpolate.splrep(data_wave,data_flux,s=0) data_flux = interpolate.splev(master_flux_wave,data_interp,der=0) template_interp = interpolate.splrep(template_wave,template_flux,s=0) template_flux = interpolate.splev(master_flux_wave,template_interp,der=0) plt.plot(master_flux_wave,data_flux,"b-",label="data") plt.plot(master_flux_wave,template_flux,"g-",label="template") plt.xlim(3600,5800) ylim_range = max(template_flux)-min(template_flux) plt.ylim(min(template_flux)-ylim_range*0.2,max(template_flux)+ylim_range*0.2) plt.legend(loc="lower right",ncol=2) plt.xlabel("Wavelength (A)") plt.ylabel("Normalised flux") os.system("rm "+file_path_reduced+"spectype_plots/"+object_name+"_"+file_name+".pdf") plt.savefig(file_path_reduced+"spectype_plots/"+object_name+"_"+file_name+".pdf") #plt.show() return teff_min_fit,logg_min_fit,feh_min
################# ### Functions ### ################# ######################## ### Start of program ### ######################## file_path = sys.argv[1] file_path_temp = file_path + "temp/" file_path_reduced = file_path + "reduced/" file_name = sys.argv[2] ### Read in the correct image slice to analyse slices = functions.read_ascii(file_path_temp + "stellar_apertures.txt") slices = functions.read_table(slices) slice_to_use = slices[0][0] image_data = pyfits.getdata(file_path_temp + str(int(slice_to_use)) + "_" + file_name) slice_data = image_data ### Chop the 200 columns in the centre of the image image_data = transpose(image_data) image_data = image_data[len(image_data)/2 - 100:len(image_data)/2 + 100] image_data = transpose(image_data) median_list = [] for i in image_data: median_list.append(median(i)) for i in range(len(median_list)): if median_list[i] == max(median_list):
######################## ### Set file_path file_path = sys.argv[1] file_path_temp = file_path + "temp/" file_path_reduced = file_path + "reduced/" file_name = sys.argv[2] biassubtracted_file_name = "out_ccdproc_" + file_name ### Load in the image slices table ### This table was created by define_image_slices.py ### and contains locations of the image slices ### according to a flat field frame image_slices = functions.read_ascii(file_path_temp + "image_slice_table.txt") image_slices = functions.read_table(image_slices) ### Loop through and cut out each slice ### save in individual files print "Chopping image into its image slices" os.chdir(file_path_temp) slices_file_list = "" for i in range(len(image_slices)): start_column = int(image_slices[i][0]) end_column = int(image_slices[i][1]) region = '[1:4093,' + str(start_column) + ':'+str(end_column)+']' print region os.system("rm " + str(i) +"_"+ file_name)
free_param_range.append(temp_param_range[i]) free_param_func.append("b") print "FREE PARAMS" for i in range(len(free_param_names)): print free_param_names[i],free_param_vals[i],free_param_range[i] print "FIXED PARAMS" for i in range(len(fixed_param_names)): print fixed_param_names[i],fixed_param_vals[i] x0 = zeros(len(free_param_names)) free_param_vals = [functions.read_ascii("best_param_mcmc")[1]] free_param_vals = array(functions.read_table(free_param_vals))[0] print free_param_vals phase,flux,err,model = fitting_functions.lc_chisq(free_param_vals,free_param_names,fixed_param_names,fixed_param_vals,lc,False,True) ### Plot data plt.clf() plt.scatter(phase,flux,s=1,color="k") plt.scatter(phase+1,flux,s=1,color="k") plt.scatter(phase,model,s=2,color="r") plt.scatter(phase+1,model,s=2,color="r") plt.xlim(0.995,1.005) plt.show()
### Set program dir and change working directory program_dir = os.getcwd() + "/" #Save the current working directory os.chdir(file_path_reduced) #Change to ../temp/ dir ### Find info from the fits header hdulist = pyfits.open(file_path + file_name) object_name = hdulist[0].header["OBJECT"] dateobs = hdulist[0].header["DATE-OBS"] mjd = hdulist[0].header["MJD-OBS"] exptime = hdulist[0].header["EXPTIME"] comment = hdulist[0].header["NOTES"] hdulist.close() image_quality = functions.read_ascii("image_quality.dat") image_quality = functions.read_table(image_quality) for entry in image_quality: if entry[0] == file_name and entry[1] == object_name: sn = entry[5] import MySQLdb sql_date = string.split(dateobs,"T")[0] sql_time = string.split(dateobs,"T")[1] print "Connecting to database" db=MySQLdb.connect(host="marbles.anu.edu.au",user="******",passwd="h@ts0uthDB",db="daniel1") c = db.cursor() c.execute("""SELECT SPECid FROM SPEC WHERE SPECmjd=""" + str(mjd) + """ and SPECobject=\"%s\" """ % object_name)
#teff_regions = [[3800,5500]] #logg_regions = [[]] #teff_regions = [[3500,5900]] ########################## ### Start the analysis ### ########################## ### Change directory to reduced/ program_dir = os.getcwd() + "/" #Save the current working directory os.chdir(file_path_reduced) #Change to ../reduced/ dir ### Load in spectra norm_spectrum = functions.read_ascii("norm_" + file_name + ".dat") norm_spectrum = functions.read_table(norm_spectrum) norm_spectrum = transpose(array(norm_spectrum)) flux_spectrum = functions.read_ascii("fluxcal_" + file_name + ".dat") flux_spectrum = functions.read_table(flux_spectrum) flux_spectrum = transpose(array(flux_spectrum)) flux_spectrum = spectype_functions.normalise(flux_spectrum,flux_normalise_w1,flux_normalise_w2) ### Find shift os.system("rm apshift*") ### Makesure keywpars is set at default iraf.unlearn(iraf.keywpars) iraf.filtpars.setParam("f_type","square",check=1,exact=1)