コード例 #1
0
ファイル: fitting_functions.py プロジェクト: georgezhou/lcfit
def calc_probability(initial_params,free_param_names,fixed_param_names,fixed_param_values,prior_params,prior_range,lc,chisq_base,plot_pdf,cadence):

    chisq = 0
    for n in range(len(lc)):
        lc_n = lc[n]
        chisq += lc_chisq(initial_params,free_param_names,fixed_param_names,fixed_param_values,lc_n,plot_pdf,False,cadence[n])

    global stellar_params,tested_params,chisq_log

    prob = (chisq_base-chisq)/2

    global period_i,t0_i,rsum_i,rratio_i,i_0_i,ld1_i,ld2_i,tdiff_i,edepth_i,planet_f_i,planet_alpha_i
    ### Give dummy values to avoid error
    [period_i,t0_i,rsum_i,rratio_i,i_0_i,ld1_i,ld2_i,tdiff_i,edepth_i,planet_f_i,planet_alpha_i] = [1,1,1,1,1,1,1,1,1,1,1]


    ### Set parameter names
    for i in range(len(free_param_names)):
        globals()[free_param_names[i]+"_i"] = initial_params[i]

    for i in range(len(fixed_param_names)):
        globals()[fixed_param_names[i]+"_i"] = fixed_param_values[i]

    i_0_i = i_0_i * pi / 180.
    
    try:
        e_0_i = (ecosw_i**2+esinw_i**2)**(0.5)
        w_0_i = arccos(ecosw_i/e_0_i)
    except ZeroDivisionError:
        e_0_i = 0
        w_0_i = pi

    a_0_i = (rsum_i)/(1+rratio_i)

    for i in range(len(free_param_names)):
        if initial_params[i] < prior_range[i][0] or initial_params[i] > prior_range[i][1]:
            print free_param_names[i],initial_params[i],prior_range[i]
            prob = NaN
            break

    if functions.isnan(prob):

        tested_params = [list(initial_params)]
        chisq_log = [[chisq]]


        f = open("test","w")
        functions.write_table(tested_params,f)
        f.close()
        os.system("cat test >> mcmc_tested_params")

        f = open("test","w")
        functions.write_table(chisq_log,f)
        f.close()
        os.system("cat test >> mcmc_chisq_log")

    print prob
    return prob
コード例 #2
0
def calc_probability(initial_params,default_params,free_param_names,fixed_param_names,fixed_param_values,prior_params,prior_mean,prior_std,prior_func,lc,chisq_base,plot_pdf,cadence):

    initial_params = array(initial_params)*array(default_params)+array(default_params)

    chisq = 0
    for n in range(len(lc)):
        lc_n = lc[n]
        chisq += lc_chisq(initial_params,free_param_names,fixed_param_names,fixed_param_values,lc_n,plot_pdf,False,cadence[n])

    global stellar_params,tested_params,chisq_log

    prob = (chisq_base-chisq)/2

    ### Calculate stellar parameters
    global period_i,t0_i,rsum_i,rratio_i,i_0_i,ld1_i,ld2_i,edepth_i
    ### Give dummy values to avoid error
    [period_i,t0_i,rsum_i,rratio_i,i_0_i,ld1_i,ld2_i,edepth_i] = [1,1,1,1,1,1,1,1]

    ### Set parameter names
    for i in range(len(free_param_names)):
        globals()[free_param_names[i]+"_i"] = initial_params[i]

    for i in range(len(fixed_param_names)):
        globals()[fixed_param_names[i]+"_i"] = fixed_param_values[i]

    i_0_i = i_0_i * pi / 180.
    
    e_0_i = sqrt(ecosw_i**2+esinw_i**2)
    w_0_i = arccos(ecosw_i/e_0_i)

    a_0_i = (rsum_i)/(1+rratio_i)

    for i in range(len(free_param_names)):
        if prior_func[i] == "g":
            prob = prob * gaussian(initial_params[i],prior_mean[i],prior_std[i])
        if prior_func[i] == "b":
            prob = prob * box(initial_params[i],prior_mean[i],prior_std[i])
            #print prob,initial_params[i],prior_mean[i],prior_std[i]
    
    if functions.isnan(prob):

        tested_params = [list(initial_params)]
        chisq_log = [[chisq]]


        f = open("test","w")
        functions.write_table(tested_params,f)
        f.close()
        os.system("cat test >> mcmc_tested_params")

        f = open("test","w")
        functions.write_table(chisq_log,f)
        f.close()
        os.system("cat test >> mcmc_chisq_log")

    print prob
    return prob
コード例 #3
0
def create_lightcurves(file_path,file_list,reflist,out_dir):
    jd_header = "UTMJD"


    lightcurves = []
    for i in reflist:
        lightcurves.append([])

    for input_file in file_list:

        hdulist = pyfits.open(file_path+input_file)
        jd = hdulist[0].header[jd_header]

        print input_file,jd

        phot = loadtxt(file_path+input_file+".phot")

        for star_i in range(len(reflist)):
            star_found = False
            #for star_j in phot:
            for j in range(len(phot)):
                star_j = phot[j]
                if reflist[star_i,0] == star_j[0]:

                    lightcurves[star_i].append([jd]+list(star_j[1:]))

                    star_found = True
                    break
            if not star_found:
                lightcurves[star_i].append([jd]+list(nan*zeros(len(phot[0]))))

    for i in range(len(reflist)):

        try:
            lc = array(lightcurves[i])

        except ValueError:
            print "ValueError in lightcurve",i
            lc = lightcurves[i]


        try:
            savetxt(out_dir+str(int(reflist[i,0]))+".rawlc",lc,fmt="%.5f")
        except (TypeError,ValueError):
            print "Error in writing lightcurve",i
            o = open(out_dir+str(int(reflist[i,0]))+".rawlc","w")
            functions.write_table(lc,o)
            o.close()
コード例 #4
0
ファイル: formatlc.py プロジェクト: georgezhou/lcfit
data_midtime = median(data[:,0])
epoch = round((data_midtime-param[0])/ param[1])
midtime = param[0]+epoch*param[1]
tdur = param[2]/24.
oot = [[midtime-tdur*4,midtime-tdur*0.6],[midtime+tdur*0.6,midtime+tdur*4]]
transit = [[midtime-tdur*1.,midtime+tdur*1.]]


# oot = cutout_regions(data,oot)
# plt.scatter(oot[:,0],oot[:,2],s=1)
# oot_fit = polyfit(oot[:,0],oot[:,2],6)
# x = arange(min(oot[:,0]),max(oot[:,0]),0.05)
# oot_plot = polyval(oot_fit,x)
# plt.plot(x,oot_plot)
# plt.show()

data = cutout_regions(data,transit)
# data[:,2] = data[:,2] - polyval(oot_fit,data[:,0])

mag = data[:,2]
flux = 10**((mag-median(mag))/-2.5)

o = open("1.dat","w")
output = [data[:,0],flux,ones(len(data))]
output = transpose(output)
functions.write_table(output,o)
o.close()

plt.scatter(data[:,0],flux)
plt.show()
コード例 #5
0
ファイル: analyse_spectrum.py プロジェクト: georgezhou/hsfu23
### Calculate signal/noise
i_signal = spectrum_maximum
i_sky = 0.03
i_dark = 0.001
i_RN = 5
gain = 0.9

signal_noise = (gain*spectrum_maximum) / (sqrt((i_signal*gain) + (i_sky * exptime)**2 + (i_dark* exptime)**2 + i_RN**2))
signal_noise = signal_noise * 2.3 ### convert from sn/pixel to sn/resolution element

print "The signal to noise is ",signal_noise

### Write information to table
### Format
### file_name object_name HJD max S/N flag(1=saturate)
info = [[file_name,object_name,JD,exptime,spectrum_maximum,signal_noise]]

### Write to log file
if os.path.exists(file_path_reduced + "image_quality.dat"):
	image_quality_file = functions.read_ascii(file_path_reduced + "image_quality.dat")
	image_quality_file = functions.read_table(image_quality_file)

	for image in image_quality_file:
		if not image[0] == file_name and not image[1] == object_name:
			info.append(image)

image_quality = open(file_path_reduced + "image_quality.dat","w")
functions.write_table(info,image_quality)
image_quality.close()
コード例 #6
0
ファイル: deredden.py プロジェクト: georgezhou/hsfu23
        type = "E(B-V)",\
        apertures = "*",\
        override = 1,\
        uncorrect = 0,\
        mode = "al")

    ### Create .dat file out of fits file redden_name
    
    os.system("rm " + redden_name + ".dat")

    iraf.wspectext(redden_name + "[*,1,1]", redden_name + ".dat")

    spectrum = functions.read_ascii(redden_name + ".dat")
    spectrum = functions.read_table(spectrum)
    temp = []
    for i in spectrum:
        if len(i) == 2:
            if functions.is_number(i[0]):
                temp.append(i)
    spectrum = temp
    spectrum = spectrum[1:len(spectrum)-2]

    output_spectrum = open(redden_name + ".dat","w")
    functions.write_table(spectrum,output_spectrum)
    output_spectrum.close()

    os.system("mv " + redden_name + ".dat deredden")
    os.system("mv " + redden_name + " deredden")

    redden = redden + redden_step
コード例 #7
0
from numpy import *
import matplotlib.pyplot as plt
import functions

data = loadtxt("kplr006603043-2011145075126_slc.tab")

data_new = []
for i in data:
    if i[0] > 1691.5 and i[0] < 1693.1:
        data_new.append(i)
data = array(data_new)

mag = data[:, 3]
flux = 10**((mag - median(mag)) / -2.5)

o = open("lc2.dat", "w")
output = [data[:, 0], flux, data[:, 4]]
output = transpose(output)
functions.write_table(output, o)
o.close()

plt.scatter(data[:, 0], flux)
plt.show()
コード例 #8
0
ファイル: fake_transit.py プロジェクト: georgezhou/lcfit
    
for n in arange(1,20):

    starting_pos = random.gauss(0.6,0.1)


    if cadence == "short":
        hjd_i = arange(t0_i-1*starting_pos,t0_i+starting_pos,1.0/(24.*60.))
    if cadence == "long":
        hjd_i = arange(t0_i-1*starting_pos,t0_i+starting_pos,30.0/(24.*60.))
        #hjd_i = arange(t0_i-starting_pos,t0_i+starting_pos,1.0/(24.*60.))

    model = make_model(hjd_i)

    #bnoise = brownian(0., hjd_i, 0.5*10.**(-4))
    gnoise = gaussian(hjd_i,0.,1*10.**(-4))

    #model = model + bnoise + gnoise

    model += gnoise

    data = transpose([hjd_i,model,ones(len(hjd_i))])

    o = open("data/transit_lc_"+str(n)+"_"+str(planet_f_i)+"_"+str(planet_alpha_i),"w")
    functions.write_table(data,o)
    o.close()


    # plt.scatter(hjd_i,model,s=0.1)
    # plt.show()
コード例 #9
0
    master_flat = transpose(master_flat)

    ### Find the median value across the column
    median_values = []
    for i in range(len(master_flat)):
        median_values.append(median(master_flat[i]))

    ### Find the start and end of each image slice
    initial_value = median_values[0]
    threshold = mean(median_values)/2.
    region_table = []
    for i in range(1,len(median_values)):
        if median_values[i] > threshold and median_values[i-1] < threshold:
            region_table.append([i+1,i+38])
            #region_table.append([i+5,i+33])

    ### Write the table into a text file
    image_slice_table = open(file_path_temp + "image_slice_table.txt","w")
    functions.write_table(region_table,image_slice_table)
    image_slice_table.close()

### If no master flat exists, use default table
else:
    print "Using default image slice positions"
    camera = functions.read_config_file("CAMERA")
    if camera == "red":
        os.system("cp image_slice_table_red.txt " + file_path_temp + "image_slice_table.txt")
    if camera == "blue":
        os.system("cp image_slice_table_blue.txt " + file_path_temp + "image_slice_table.txt")

コード例 #10
0
ファイル: plot_cand_RV.py プロジェクト: georgezhou/hsfu23
else:
    object_name_query = object_name

print object_name_query

plots_folder = functions.read_param_file("RV_PLOTS_FOLDER")

### Extract rv points from HSMSO
query_entry = "select SPEChjd, SPECrv, SPECrv_err from SPEC where SPECtype=\"RV\" and SPECobject=\"%s\" " % object_name
RV_points = mysql_query.query_hsmso(query_entry)

print RV_points

if len(RV_points) > 0:
    cand_txt = open(plots_folder + object_name + ".txt","w")
    functions.write_table(RV_points,cand_txt)
    cand_txt.close()
else:
    print "ERROR entries not found for " + object_name

if len(RV_points) > 1:
    print "Calculating orbital solution"
    ### Extract candidate phase information from HSCAND
    if object_name[:4]=="HATS":
        print "Using HSCAND for candidate parameters"
        query_entry = "select HATSE,HATSP,HATSq from HATS where HATSname=\'%s\' " % object_name_query
        cand_params = mysql_query.query_hscand(query_entry)[1]

    else:
        ### Try to find it in "candidatex.txt"
        candidates_txt = functions.read_ascii(plots_folder + "candidates.txt")
コード例 #11
0
ファイル: spectype_main_V4.py プロジェクト: georgezhou/hsfu23
    count = 1
    for region in teff_regions:
        rms = array(loop_input_spectrum(master_flux_wave,flux_flux,model_path_flux,teff_space,logg_space,feh_space,region[0],region[1],True))
        rms = (rms + rms_logg + rms_feh) / 3.
        rms_teff = rms_teff + rms
        count = count+1
    
    rms_teff = rms_teff / float(count)

    i = find_min_index(rms_teff)
    print teff_table_list[i],logg_table_list[i],feh_table_list[i],rms_teff[i]

    rms_red_table = transpose([teff_table_list,logg_table_list,feh_table_list,rms])

    output_rms_table = open(str(reddening)+"_rms_table","w")
    functions.write_table(rms_red_table,output_rms_table)
    output_rms_table.close()

    reddening_rms.append(min(rms_teff))

###########################
### Find best reddening ###
###########################

for i in range(len(reddening_values)):
    if reddening_rms[i] == min(reddening_rms):
        best_reddening = reddening_values[i]
        print best_reddening
        break

plot_spectrum(str(best_reddening)+"_rms_table",reddening_list[0])
コード例 #12
0
ファイル: calculate_RV.py プロジェクト: georgezhou/hsfu23
        row_name = string.split(row_name,"_")[-1]
        if not string.split(file_name,"_")[-1] == row_name:
            temp_table.append(i)
    RV_out = temp_table
else:
    RV_out = []

### Extract the important elements from fxcor_stellar.txt
### And save those into a table "RV_out.dat"
extracted_list = extract_elements(fxcor_stellar,[0,1,2,3,4,7,8,12,13])

RV_out = RV_out + extracted_list

extract_output = open(file_path_reduced + "RV_out.dat","w")
extract_output.write("#object image reference HJD aperture stellar_height stellar_fwhm vhelio vhelio_verr \n")
functions.write_table(RV_out,extract_output)
extract_output.close()
 
#############################################################################
### Perform averaging of RV standards from RV_out.dat to estimate real RV ###
#############################################################################

### Open the image header for more info
hdulist = pyfits.open(file_path_reduced + "normspec_"+file_name)
object_name = hdulist[0].header['OBJECT']
exptime = hdulist[0].header['EXPTIME']
hdulist.close()

### Find number of RV standards
os.system("python find_rv.py "+file_path+" "+file_name)
nstandards = open(file_path_temp + "RV_Standard_list").read()
コード例 #13
0
ファイル: spectype_main.py プロジェクト: georgezhou/hsfu23
    def find_shift(input1,input2,i1,i2,shift_range):
        if abs(i2 - i1) < 300:
            i1 = i1 - 150
            i2 = i2 + 150

        if i1 < 0:
            i1 = 0
            i2 = 300
        if i2 > len(input1):
            i2 = len(input1)
            i1 = len(input1) - 300
            
        ### Use xcorr
        currdir = os.getcwd()
        os.chdir(file_path_reduced)
        os.system("rm "+file_path_reduced+"shift_spec*")
        input1_cropped = input1[i1:i2]/median(input1[i1:i2])
        input2_cropped = input2[i1:i2]/median(input1[i1:i2])
        wave_axis = arange(1,len(input1_cropped)+1)
        
        shift_spec1 = open(file_path_reduced+"shift_spec1.txt","w")
        functions.write_table(transpose([wave_axis,input1_cropped]),shift_spec1)
        shift_spec1.close()

        shift_spec2 = open(file_path_reduced+"shift_spec2.txt","w")
        functions.write_table(transpose([wave_axis,input2_cropped]),shift_spec2)
        shift_spec2.close()
        
        iraf.rspectext(
            input = file_path_reduced+"shift_spec1.txt",\
            output = file_path_reduced+"shift_spec1.fits",\
            title = "shift_spec1",\
            flux = 0,\
            dtype = "interp",\
            crval1 = "",\
            cdelt1 = "",\
            fd1 = "",\
            fd2 = "")

        iraf.rspectext(
            input = file_path_reduced+"shift_spec2.txt",\
            output = file_path_reduced+"shift_spec2.fits",\
            title = "shift_spec2",\
            flux = 0,\
            dtype = "interp",\
            crval1 = "",\
            cdelt1 = "",\
            fd1 = "",\
            fd2 = "")

        time.sleep(0.5) 
            
        ### Find shift
        os.system("rm apshift*")

        ### Makesure keywpars is set at default
        iraf.unlearn(iraf.keywpars)

        cuton = len(input1_cropped)/25.
        cutoff = len(input1_cropped)/2.5

        iraf.filtpars.setParam("f_type","welch",check=1,exact=1)
        iraf.filtpars.setParam("cuton",cuton,check=1,exact=1)
        iraf.filtpars.setParam("cutoff",cutoff,check=1,exact=1)

        run_fxcor(file_path_reduced+"shift_spec1.fits",file_path_reduced+"shift_spec2.fits","*","apshift",0,10,"gaussian","INDEF",0)
        vel_shift = functions.read_ascii("apshift.txt")
        vel_shift = functions.read_table(vel_shift)
        vel_shift = vel_shift[0][6]
        if vel_shift == "INDEF":
            vel_shift = 0.0
        if abs(vel_shift) > shift_range:
            vel_shift = 0.0

        print "best pixel shift of ",vel_shift

        os.system("rm apshift*")
        os.system("rm "+file_path_reduced+"shift_spec*")
        os.chdir(currdir)
        
        #if i1 < shift_range:
        #    i1 = shift_range
        #if i2 > len(input1)-shift_range:
        #    i2 = len(input1)-shift_range

        # shift_rms = []
        # shift_list = []
        # for shift in range(-1*shift_range,shift_range+1):
        #     input1_cropped = input1[i1+shift:i2+shift]
        #     input2_cropped = input2[i1:i2]

        #     diff = input1_cropped/median(input1_cropped) * input2_cropped/median(input2_cropped)
        #     rms = sum(diff)
        #     #rms = sqrt(sum(diff**2) /float(len(diff)))
        #     shift_rms.append(rms)
        #     shift_list.append(shift)
            
        # for i in range(len(shift_rms)):
        #     if shift_rms[i] == max(shift_rms):
        #         break
        
        # print "Applying a shift of ",shift_list[i]
        # plt.clf()
        # plt.plot(input1[i1+shift_list[i]:i2+shift_list[i]]/median(input1[i1+shift_list[i]:i2+shift_list[i]]),"b-")
        # plt.plot(input1[i1:i2]/median(input1[i1:i2]),"r-")
        # plt.plot(input2[i1:i2]/median(input2[i1:i2]),"g-")
        # plt.show()

        # return shift_list[i]
        return int(round(vel_shift,0))
コード例 #14
0
ファイル: spectype_main.py プロジェクト: georgezhou/hsfu23
def calculate_spectral_params(teff_ini,logg_ini,feh_ini):

    def find_shift(input1,input2,i1,i2,shift_range):
        if abs(i2 - i1) < 300:
            i1 = i1 - 150
            i2 = i2 + 150

        if i1 < 0:
            i1 = 0
            i2 = 300
        if i2 > len(input1):
            i2 = len(input1)
            i1 = len(input1) - 300
            
        ### Use xcorr
        currdir = os.getcwd()
        os.chdir(file_path_reduced)
        os.system("rm "+file_path_reduced+"shift_spec*")
        input1_cropped = input1[i1:i2]/median(input1[i1:i2])
        input2_cropped = input2[i1:i2]/median(input1[i1:i2])
        wave_axis = arange(1,len(input1_cropped)+1)
        
        shift_spec1 = open(file_path_reduced+"shift_spec1.txt","w")
        functions.write_table(transpose([wave_axis,input1_cropped]),shift_spec1)
        shift_spec1.close()

        shift_spec2 = open(file_path_reduced+"shift_spec2.txt","w")
        functions.write_table(transpose([wave_axis,input2_cropped]),shift_spec2)
        shift_spec2.close()
        
        iraf.rspectext(
            input = file_path_reduced+"shift_spec1.txt",\
            output = file_path_reduced+"shift_spec1.fits",\
            title = "shift_spec1",\
            flux = 0,\
            dtype = "interp",\
            crval1 = "",\
            cdelt1 = "",\
            fd1 = "",\
            fd2 = "")

        iraf.rspectext(
            input = file_path_reduced+"shift_spec2.txt",\
            output = file_path_reduced+"shift_spec2.fits",\
            title = "shift_spec2",\
            flux = 0,\
            dtype = "interp",\
            crval1 = "",\
            cdelt1 = "",\
            fd1 = "",\
            fd2 = "")

        time.sleep(0.5) 
            
        ### Find shift
        os.system("rm apshift*")

        ### Makesure keywpars is set at default
        iraf.unlearn(iraf.keywpars)

        cuton = len(input1_cropped)/25.
        cutoff = len(input1_cropped)/2.5

        iraf.filtpars.setParam("f_type","welch",check=1,exact=1)
        iraf.filtpars.setParam("cuton",cuton,check=1,exact=1)
        iraf.filtpars.setParam("cutoff",cutoff,check=1,exact=1)

        run_fxcor(file_path_reduced+"shift_spec1.fits",file_path_reduced+"shift_spec2.fits","*","apshift",0,10,"gaussian","INDEF",0)
        vel_shift = functions.read_ascii("apshift.txt")
        vel_shift = functions.read_table(vel_shift)
        vel_shift = vel_shift[0][6]
        if vel_shift == "INDEF":
            vel_shift = 0.0
        if abs(vel_shift) > shift_range:
            vel_shift = 0.0

        print "best pixel shift of ",vel_shift

        os.system("rm apshift*")
        os.system("rm "+file_path_reduced+"shift_spec*")
        os.chdir(currdir)
        
        #if i1 < shift_range:
        #    i1 = shift_range
        #if i2 > len(input1)-shift_range:
        #    i2 = len(input1)-shift_range

        # shift_rms = []
        # shift_list = []
        # for shift in range(-1*shift_range,shift_range+1):
        #     input1_cropped = input1[i1+shift:i2+shift]
        #     input2_cropped = input2[i1:i2]

        #     diff = input1_cropped/median(input1_cropped) * input2_cropped/median(input2_cropped)
        #     rms = sum(diff)
        #     #rms = sqrt(sum(diff**2) /float(len(diff)))
        #     shift_rms.append(rms)
        #     shift_list.append(shift)
            
        # for i in range(len(shift_rms)):
        #     if shift_rms[i] == max(shift_rms):
        #         break
        
        # print "Applying a shift of ",shift_list[i]
        # plt.clf()
        # plt.plot(input1[i1+shift_list[i]:i2+shift_list[i]]/median(input1[i1+shift_list[i]:i2+shift_list[i]]),"b-")
        # plt.plot(input1[i1:i2]/median(input1[i1:i2]),"r-")
        # plt.plot(input2[i1:i2]/median(input2[i1:i2]),"g-")
        # plt.show()

        # return shift_list[i]
        return int(round(vel_shift,0))

    def loop_input_spectrum(input_wave,input_flux,folder,teff_space,logg_space,feh_space,w1,w2,perform_normalise,shift_range,fix_feh):

        i1 = w1 - min(input_wave)
        i2 = w2 - min(input_wave)

        shift = 0

        if shift_range > 0:

            i = 0
            for teff in teff_space:
                for logg in logg_space:
                    for feh in feh_space:
                        if teff == teff_ini and logg == logg_ini and feh == feh_ini:
                            if folder == model_path_flux:
                                template_flux = spec_database[i]
                            if folder == model_path_norm:
                                template_flux = normspec_database[i]
                            
                            try:
                                shift = find_shift(input_flux,template_flux,i1,i2,shift_range)
                            except iraf.IrafError:
                                print "IRAF fxcor stupid error, setting shift to 0"
                                shift = 0
                            break
                        else:
                            i = i+1

        i = 0
        data = []
        for teff in teff_space:
            for logg in logg_space:
                for feh in feh_space:

                    if fix_feh:
                        feh_0_index = int((0 - feh)/0.5)
                    else:
                        feh_0_index = 0

                    if folder == model_path_flux:
                        template_flux = spec_database[i+feh_0_index]
                    if folder == model_path_norm:
                        template_flux = normspec_database[i+feh_0_index]

                    input_wave_cropped = input_wave[i1+shift:i2+shift]
                    input_flux_cropped = input_flux[i1+shift:i2+shift]
                    template_flux_cropped = template_flux[i1:i2]


                    sigma = 10.0

                    if perform_normalise:
                        diff_flux = input_flux_cropped/median(input_flux_cropped) - template_flux_cropped/median(template_flux_cropped)

                    else:
                        diff_flux = input_flux_cropped - template_flux_cropped

                    diff_flux = clip(diff_flux,median(diff_flux) - sigma*std(diff_flux),median(diff_flux)+sigma*std(diff_flux))

                    rms = sqrt(sum(diff_flux**2) /float(len(input_wave_cropped)))

                    # shift_rms = []
                    # shift_range = 0
                    # for shift in range(-1*shift_range,shift_range+1):
                    #     input_wave_cropped = input_wave[i1:i2]
                    #     input_flux_cropped = input_flux[i1:i2]
                    #     template_flux_cropped = template_flux[i1+shift:i2+shift]

                    #     sigma = 5.0

                    #     if perform_normalise:
                    #         diff_flux = input_flux_cropped/median(input_flux_cropped) - template_flux_cropped/median(template_flux_cropped)

                    #     else:
                    #         diff_flux = input_flux_cropped - template_flux_cropped

                    #     diff_flux = clip(diff_flux,median(diff_flux) - sigma*std(diff_flux),median(diff_flux)+sigma*std(diff_flux))

                    #     rms = sqrt(sum(diff_flux**2) /float(len(input_wave_cropped)))
                    #     shift_rms.append(rms)

                    # rms = min(shift_rms)

                    ### Weight against feh
                    feh_index = (feh - min(feh_space))/0.5
                    logg_index = (logg - min(logg_space))/0.5
                    rms = rms * feh_weights[int(feh_index)] * logg_weights[int(logg_index)]

                    # plt.clf()
                    # plt.plot(input_wave_cropped,input_flux_cropped/median(input_flux_cropped))
                    # plt.plot(input_wave_cropped,template_flux_cropped/median(template_flux_cropped))
                    # plt.plot(input_wave_cropped,diff_flux)
                    # plt.show()
                    # #sys.exit()

                    # print rms
                    data.append(rms)
                    i = i+1
        return data

    def plot_spectrum(rms_data,input_spectrum):

        print "Plotting ",input_spectrum

        rms_data = functions.read_ascii(rms_data)
        rms_data = functions.read_table(rms_data)
        rms_data = transpose(rms_data)

        ### Find min
        for i in range(len(rms_data[0])):
            if rms_data[3][i] == min(rms_data[3]):
                teff_min = rms_data[0][i]
                logg_min = rms_data[1][i]
                feh_min = rms_data[2][i]
                break

        print teff_min,logg_min,feh_min,min(rms_data[3])

        teff_list = []
        logg_list = []
        rms_list = []

        for i in range(len(rms_data[0])):
            if rms_data[2][i] == feh_min:
                teff_list.append(rms_data[0][i])
                logg_list.append(rms_data[1][i])
                rms_list.append(rms_data[3][i])

        ### Create 2D space
        teff_space = arange(min(teff_list),max(teff_list)+250,250)
        logg_space = arange(min(logg_list),max(logg_list)+0.5,0.5)

        rms_space = zeros([len(teff_space),len(logg_space)])

        for i in range(len(rms_list)):
            x_index = int((teff_list[i] - min(teff_list)) / 250.)
            y_index = int((logg_list[i] - min(logg_list)) / 0.5)
            rms_space[x_index,y_index] = rms_list[i]


        ### Crop 2D space to perform gaussian fit for min
        teff_space_cropped,logg_space_cropped,rms_space_cropped=spectype_functions.chop_array(teff_space,logg_space,transpose(rms_space),teff_min,logg_min,250,0.5)
        rms_space_cropped = -1*(rms_space_cropped - rms_space_cropped.max())
        print rms_space_cropped
        try:
            gauss_fit = spectype_functions.fitgaussian(rms_space_cropped)
            teff_min_fit = min(teff_space_cropped) + gauss_fit[2] * 250
            logg_min_fit = min(logg_space_cropped) + gauss_fit[1] * 0.5
        except TypeError:
            print "Bad gaussian fit, using abs min"
            teff_min_fit = teff_min
            logg_min_fit = logg_min

        if teff_min_fit < 3500:
            teff_min_fit = 3500
        if teff_min_fit > 9000:
            teff_min_fit = 9000

        if logg_min_fit < 0.0:
            logg_min_fit = 0.0
        if logg_min_fit > 5.0:
            logg_min_fit = 5.0

        teff_min = int(spectype_functions.round_value(teff_min_fit,250.))
        logg_min = spectype_functions.round_value(logg_min_fit,0.5)

        print teff_min,logg_min

        ### Plot teff_logg space
        plt.figure(figsize=(7,5))
        plt.subplot(211)

        plt.title(object_name+" "+file_name+" "+str(int(round(teff_min_fit,0)))+" "+str(round(logg_min_fit,1))+" "+str(feh_min)+" \n RMS="+str(round(min(rms_data[3]),4)))

        v_min = min(rms_list)
        v_max = min(rms_list)+((max(rms_list)-min(rms_list))/3.)
        #v_max = max(rms_list)
        rms_space = clip(rms_space,v_min,v_max)

        cm = matplotlib.cm.get_cmap('jet')
        sc = plt.contourf(teff_space,logg_space,transpose(rms_space),100,cmap=cm)

        #sc = plt.scatter(teff_list, logg_list, c=rms_list, vmin=min(rms_list), vmax=(max(rms_list)-min(rms_list))/3+min(rms_list), s=150, cmap=cm,edgecolor="w")
        cbar = plt.colorbar(sc)
        cbar.ax.set_ylabel("RMS")

        plt.scatter(teff_min_fit,logg_min_fit,color="r",s=70,marker="+")

        spectype_functions.plot_isochrones(program_dir,"r-",1)
        plt.xlim(max(teff_list),min(teff_list))
        plt.ylim(max(logg_list),min(logg_list))
        #plt.xlim(max(teff_list)+250,min(teff_list)-250)
        #plt.ylim(max(logg_list)+.25,min(logg_list)-0.25)

        plt.xlabel("Teff (K)")
        plt.ylabel("Logg")

        ### Plot spectrum
        plt.subplot(212)
        data_spectrum = functions.read_ascii(input_spectrum)
        data_spectrum = functions.read_table(data_spectrum)
        data_spectrum = transpose(array(data_spectrum))
        data_spectrum = spectype_functions.normalise(data_spectrum,flux_normalise_w1,flux_normalise_w2)

        template_spectrum = "template_" + str(int(teff_min)) + "_" + str(logg_min) + "_" + str(feh_min)+".dat"
        template_spectrum = functions.read_ascii(model_path_flux+template_spectrum)
        template_spectrum = functions.read_table(template_spectrum)
        template_spectrum = transpose(array(template_spectrum))
        template_spectrum = spectype_functions.normalise(template_spectrum,flux_normalise_w1,flux_normalise_w2)
    
        data_wave = data_spectrum[0]
        data_flux = data_spectrum[1]
        template_wave = template_spectrum[0]
        template_flux = template_spectrum[1]

        c = 3.0 * 10**5

        data_wave = data_wave / ((vel_shift / c) + 1)

        data_interp = interpolate.splrep(data_wave,data_flux,s=0)
        data_flux = interpolate.splev(master_flux_wave,data_interp,der=0)

        template_interp = interpolate.splrep(template_wave,template_flux,s=0)
        template_flux = interpolate.splev(master_flux_wave,template_interp,der=0)


        plt.plot(master_flux_wave,data_flux,"b-",label="data")
        plt.plot(master_flux_wave,template_flux,"g-",label="template")
        plt.xlim(3600,5800)
        ylim_range = max(template_flux)-min(template_flux)
        plt.ylim(min(template_flux)-ylim_range*0.2,max(template_flux)+ylim_range*0.2)

        plt.legend(loc="lower right",ncol=2)

        plt.xlabel("Wavelength (A)")
        plt.ylabel("Normalised flux")

        os.system("rm "+file_path_reduced+"spectype_plots/"+object_name+"_"+file_name+".pdf")
        plt.savefig(file_path_reduced+"spectype_plots/"+object_name+"_"+file_name+".pdf")
        #plt.show()

        return teff_min_fit,logg_min_fit,feh_min

    def find_min_index(input_list):
        for i in range(len(input_list)):
            if input_list[i] == min(input_list):
                return i

    #################
    ### Make axes ###
    #################

    teff_space = []
    logg_space = []
    feh_space = []

    teff_min = teff_ini - 750
    teff_max = teff_ini + 750

    if teff_min < 3500:
        teff_min = int(3500)

    if teff_max > 8000:
        teff_max = int(8000)

    ### Create grid
    teff_i = teff_min
    while teff_i <= teff_max:
        teff_space.append(teff_i)
        teff_i = teff_i + 250

    logg_i = 0.0
    while logg_i <= 5.0:
        logg_space.append(logg_i)
        logg_i = logg_i + 0.5

    feh_i = -2.5
    while feh_i <= 0.5:
        feh_space.append(feh_i)
        feh_i = feh_i + 0.5

    teff_space = array(teff_space)
    logg_space = array(logg_space)
    feh_space = array(feh_space)

    ######################
    ### Define regions ###
    ######################

    print "Using specific regions for spectral typing"
    ### Check the temp and define which logg sensitive regions to use
    if teff_ini > 4750 and teff_ini < 5750:
    #if teff_ini > 4750 and teff_ini < 6250:
        #logg_regions = [[5140,5235]]
        logg_regions = [[5100,5400]]
    if teff_ini <= 4750 and teff_ini > 4250:
        logg_regions = [[4100,4200],[5100,5400]]
    if teff_ini <= 4250:
        logg_regions = [[4720,4970]]
        #logg_regions = [[3900,4000],[4100,4200],[4300,4400],[4500,4600],[4600,4700],[4700,4900],[4720,4810],[5100,5400]]
    if teff_ini >= 5750 and teff_ini < 6250:
        #logg_regions = [[3850,4500]]
        logg_regions = [[3670,3715],[3900,4000],[5100,5400]]
    if teff_ini >= 6250:
        logg_regions = [[3670,3715],[3900,4000]]

    ### logg_regions = [[4500,5800]]
    if teff_ini <= 4000:
        feh_regions = [[4800,5000],[5000,5100],[5200,5500],[4500,5700]]
    if teff_ini <= 4750 and teff_ini > 4000:
        feh_regions = [[4450,4500],[4800,5000],[5000,5100],[5200,5500]]
    if teff_ini <= 5500 and teff_ini > 4750:
        feh_regions = [[3900,4000],[4450,4500],[5100,5400]]
    if teff_ini > 5500:
        feh_regions = [[3900,4100],[4280,4320],[5100,5200]]

    ### Define the regions used in flux spectra matching
    # if teff_ini >= 5750:
    #     teff_regions = [[4835,4885],[4315,4365],[4075,4125],[3800,3900]]
    # if teff_ini < 5750:
    #     teff_regions = [[3900,5700]]

    teff_regions = [[3800,5700]]

    feh_weights = ones(len(feh_space))
    logg_weights = ones(len(logg_space))

    ##########################
    ### Start the analysis ###
    ##########################

    ### Change directory to reduced/
    os.chdir(file_path_reduced) #Change to ../reduced/ dir

    ### Load in spectra
    norm_spectrum = functions.read_ascii("norm_" + file_name + ".dat")
    norm_spectrum = functions.read_table(norm_spectrum)
    norm_spectrum = transpose(array(norm_spectrum))

    flux_spectrum = functions.read_ascii("fluxcal_" + file_name + ".dat")
    flux_spectrum = functions.read_table(flux_spectrum)
    flux_spectrum = transpose(array(flux_spectrum))
    flux_spectrum = spectype_functions.normalise(flux_spectrum,flux_normalise_w1,flux_normalise_w2)

    ### Find shift
    os.system("rm apshift*")

    ### Makesure keywpars is set at default
    iraf.unlearn(iraf.keywpars)

    iraf.filtpars.setParam("f_type","square",check=1,exact=1)
    iraf.filtpars.setParam("cuton",50,check=1,exact=1)
    iraf.filtpars.setParam("cutoff",2000,check=1,exact=1)

    os.system("cp "+model_path_norm+"fits_files/"+ini_template_spectrum+".fits .")

    run_fxcor("norm_"+ file_name,ini_template_spectrum+".fits","*","apshift",0,15,"gaussian","INDEF","INDEF")
    vel_shift = functions.read_ascii("apshift.txt")
    vel_shift = functions.read_table(vel_shift)
    vel_shift = vel_shift[0][11]
    if vel_shift == "INDEF":
        vel_shift = 0.0
    if abs(vel_shift) > 1000.:
        vel_shift = 0.0

    os.system("rm "+ini_template_spectrum+".fits")
    os.system("rm apshift*")
    print "Velocity shift of ",vel_shift

    ### Correct shift
    flux_wave = flux_spectrum[0]
    flux_flux = flux_spectrum[1]
    norm_wave = norm_spectrum[0]
    norm_flux = norm_spectrum[1]

    c = 3.0 * 10**5

    norm_wave = norm_wave / ((vel_shift / c) + 1)

    ### Interpolate onto a 1A grid
    master_flux_wave = arange(3500,5900,1.)
    master_norm_wave = arange(4550,5900,1.)

    flux_interp = interpolate.splrep(flux_wave,flux_flux,s=0)
    flux_flux = interpolate.splev(master_flux_wave,flux_interp,der=0)
    norm_interp = interpolate.splrep(norm_wave,norm_flux,s=0)
    norm_flux = interpolate.splev(master_norm_wave,norm_interp,der=0)

    ######################################
    ### Start Chi^2 array calculations ###
    ######################################

    os.system("mkdir spectype_plots")
    #os.system("rm spectype_plots/" + object_name + "*.pdf")

    #################################
    ### Read database into memory ###
    #################################
    print "Reading database into memory"

    spec_database = []
    normspec_database = []

    teff_table_list = []
    logg_table_list = []
    feh_table_list = []

    for teff in teff_space:
        print teff
        for logg in logg_space:
            for feh in feh_space:
                ### Read in spec
                template_spectrum = "template_" + str(teff) + "_" + str(logg) + "_" + str(feh)+".dat"
                #template_spectrum = functions.read_ascii(model_path_flux+template_spectrum)
                #template_spectrum = functions.read_table(template_spectrum)

                template_spectrum = loadtxt(model_path_flux+template_spectrum,comments='#')
                template_spectrum = transpose(array(template_spectrum))
                template_spectrum = spectype_functions.normalise(template_spectrum,flux_normalise_w1,flux_normalise_w2)

                template_interp = interpolate.splrep(template_spectrum[0],template_spectrum[1],s=0)
                template_flux = interpolate.splev(master_flux_wave,template_interp,der=0)

                spec_database.append(template_flux)

                ### Read in normspec
                template_spectrum = "template_" + str(teff) + "_" + str(logg) + "_" + str(feh)+".dat"
                #template_spectrum = functions.read_ascii(model_path_norm+template_spectrum)
                #template_spectrum = functions.read_table(template_spectrum)
                template_spectrum = loadtxt(model_path_norm+template_spectrum,comments='#')
                template_spectrum = transpose(array(template_spectrum))

                template_interp = interpolate.splrep(template_spectrum[0],template_spectrum[1],s=0)
                template_flux = interpolate.splev(master_norm_wave,template_interp,der=0)

                normspec_database.append(template_flux)

                teff_table_list.append(teff)
                logg_table_list.append(logg)
                feh_table_list.append(feh)


    master_weight = min(array(loop_input_spectrum(master_flux_wave,flux_flux,model_path_flux,teff_space,logg_space,feh_space,3900,5700,False,0,False)))
    master_weight = master_weight /2.
    print "Reference rms of ",master_weight 

    ### >= F and <= M stars often get fitted with lower Fe/H, so we first fix Fe/H to
    ### get an idea of logg, before going through the usual routine.
    if teff_ini >= 5000 and ini_fix_feh == "true":

        ##################################################################
        ### Perform logg-weighted spectrum calculations - WITH FEH = 0 ###
        ##################################################################
        print "Calculating [Fe/H] fixed logg weighted rms"

        rms_logg = zeros(len(teff_table_list))

        logg_regions_min = []
        logg_regions_weights = []

        count = 1
        for region in logg_regions:
            w1 = region[0]
            w2 = region[1]
            if w1 <= 4000:
                to_normalise = False
            else:
                to_normalise = True
            if w1 < 3900 or master_weight > 1.0:
                shift_range = 0
            else:
                shift_range = 10

            if w1 < 4600:
                folder = model_path_flux
                input_wave = master_flux_wave
                input_spec = flux_flux
            else:
                folder = model_path_norm
                input_wave = master_norm_wave
                input_spec = norm_flux

            rms = array(loop_input_spectrum(input_wave,input_spec,folder,teff_space,logg_space,feh_space,w1,w2,to_normalise,shift_range,True))
            i = find_min_index(rms)
            print teff_table_list[i],logg_table_list[i],feh_table_list[i],rms[i]
            #rms_logg = rms_logg + rms
            logg_regions_min.append(logg_table_list[i])
            logg_regions_weights.append(min(rms))
            rms_logg = rms_logg + (master_weight / min(rms))*rms
            count = count + 1

        rms_logg = rms_logg / float(count)
        i = find_min_index(rms_logg)
        print teff_table_list[i],logg_table_list[i],0.0,rms_logg[i]
        #logg_min = logg_table_list[i]

        print logg_regions_min

        # if teff_ini >= 5750 and teff_ini < 6250: ### There should be no giants here, so choose the largest logg measurement 
        #     logg_min = max(logg_regions_min)
        # else:
        #     logg_min = 1/array(logg_regions_weights)
        #     logg_min = logg_min / sum(logg_min)
        #     logg_min = sum(logg_min * array(logg_regions_min))
        #     #logg_min = average(logg_regions_min)

        logg_min = 1/array(logg_regions_weights)
        logg_min = logg_min / sum(logg_min)
        logg_min = sum(logg_min * array(logg_regions_min))
        #logg_min = average(logg_regions_min)

        print "[Fe/H] Fixed Logg sensitive regions identify best fit of ",logg_min
        logg_min_index = (logg_min - min(logg_space))/0.5

        for i in range(len(logg_weights)):
            logg_weights[i] = (abs(i - logg_min_index)/5.)+1.

        # rms_logg_table = transpose([teff_table_list,logg_table_list,feh_table_list,rms_logg])

        # output_rms_table = open("temp_rms_table","w")
        # functions.write_table(rms_logg_table,output_rms_table)
        # output_rms_table.close()

        # plot_spectrum("temp_rms_table","fluxcal_"+file_name+".dat")

        # sys.exit()


    ######################################################################
    ### Perform logg-weighted spectrum calculations - WITH FEH VARYING ###
    ######################################################################
    print "Calculating logg weighted rms"

    rms_logg = zeros(len(teff_table_list))

    logg_regions_min = []
    logg_regions_weights = []

    count = 1
    for region in logg_regions:
        w1 = region[0]
        w2 = region[1]
        if w1 <= 4000:
            to_normalise = False
        else:
            to_normalise = True
        if w1 < 3900 or master_weight > 1.0:
            shift_range = 0
        else:
            shift_range = 10

        if w1 < 4600:
            folder = model_path_flux
            input_wave = master_flux_wave
            input_spec = flux_flux
        else:
            folder = model_path_norm
            input_wave = master_norm_wave
            input_spec = norm_flux
            
        rms = array(loop_input_spectrum(input_wave,input_spec,folder,teff_space,logg_space,feh_space,w1,w2,to_normalise,shift_range,False))
        i = find_min_index(rms)
        print teff_table_list[i],logg_table_list[i],feh_table_list[i],rms[i]
        #rms_logg = rms_logg + rms
        logg_regions_min.append(logg_table_list[i])
        logg_regions_weights.append(min(rms))
        rms_logg = rms_logg + (master_weight / min(rms))*rms
        count = count + 1

    rms_logg = rms_logg / float(count)
    i = find_min_index(rms_logg)
    print teff_table_list[i],logg_table_list[i],feh_table_list[i],rms_logg[i]
    #logg_min = logg_table_list[i]

    logg_min = 1/array(logg_regions_weights)
    logg_min = logg_min / sum(logg_min)
    logg_min = sum(logg_min * array(logg_regions_min))
    print "Logg sensitive regions identify best fit of ",logg_min
    logg_min_index = int((logg_min - min(logg_space))/0.5)
    
    logg_weights = ones(len(logg_weights))
    for i in range(len(logg_weights)):
        logg_weights[i] = (abs(i - logg_min_index)/10.)+1.

    # rms_logg_table = transpose([teff_table_list,logg_table_list,feh_table_list,rms_logg])

    # output_rms_table = open("temp_rms_table","w")
    # functions.write_table(rms_logg_table,output_rms_table)
    # output_rms_table.close()

    # plot_spectrum("temp_rms_table","fluxcal_"+file_name+".dat")

    # sys.exit()

    ##################################################
    ### Perform feh-weighted spectrum calculations ###
    ##################################################
    print "Calculating feh weighted rms"

    rms_feh = zeros(len(teff_table_list))

    count = 1
    for region in feh_regions:
        w1 = region[0]
        w2 = region[1]
        if w1 < 4600:
            folder = model_path_flux
            input_wave = master_flux_wave
            input_spec = flux_flux
        else:
            folder = model_path_norm
            input_wave = master_norm_wave
            input_spec = norm_flux

        if w1 < 3900 or master_weight > 1.0:
            shift_range = 0
        else:
            shift_range = 10

        rms = array(loop_input_spectrum(input_wave,input_spec,folder,teff_space,logg_space,feh_space,w1,w2,True,shift_range,False))
        i = find_min_index(rms)
        print teff_table_list[i],logg_table_list[i],feh_table_list[i],rms[i]
        #rms_feh = rms_feh + rms
        rms_feh = rms_feh + (master_weight / min(rms))*rms
        count = count + 1

    rms_feh = rms_feh / float(count)
    i = find_min_index(rms_feh)
    print teff_table_list[i],logg_table_list[i],feh_table_list[i],rms_feh[i]

    feh_min = feh_table_list[i]
    feh_min_index = int((feh_min - min(feh_space))/0.5)

    for i in range(len(feh_weights)):
        feh_weights[i] = (abs(i - feh_min_index)/10.)+1.

    # rms_feh_table = transpose([teff_table_list,logg_table_list,feh_table_list,rms_feh])

    # output_rms_table = open("temp_rms_table","w")
    # functions.write_table(rms_feh_table,output_rms_table)
    # output_rms_table.close()

    # plot_spectrum("temp_rms_table","fluxcal_"+file_name+".dat")

    ###############################################################################
    ### Calculate the corresponding chisq array for a range of reddening values ###
    ###############################################################################
    print "Calculating teff weighted rms"

    ### Change directory to reduced/deredden/
    os.chdir(file_path_reduced + "deredden/") #Change to ../reduced/deredden dir

    ### Determine and create reddening loop
    os.system("ls *" + string.split(file_name,".")[0] + "*.dat > reddening_list")
    reddening_list = functions.read_ascii("reddening_list")
    os.system("rm reddening_list")
    os.system("rm ./*rms_table")

    reddening_values = []
    reddening_rms = []

    for flux_spectrum in reddening_list:

        reddening = float(string.split(flux_spectrum,"_")[1])
        reddening_values.append(reddening)

        print "Trying reddening value E(B-V) = " + str(reddening)

        ### Load in flux spectrum of different reddening
        flux_spectrum = functions.read_ascii(flux_spectrum)
        flux_spectrum = functions.read_table(flux_spectrum)
        flux_spectrum = transpose(array(flux_spectrum))
        flux_spectrum = spectype_functions.normalise(flux_spectrum,flux_normalise_w1,flux_normalise_w2)
        ### Correct shift
        flux_wave = flux_spectrum[0]
        flux_flux = flux_spectrum[1]

        c = 3.0 * 10**5

        flux_wave = flux_wave / ((vel_shift / c) + 1)

        ### Interpolate onto a 1A grid
        flux_interp = interpolate.splrep(flux_wave,flux_flux,s=0)
        flux_flux = interpolate.splev(master_flux_wave,flux_interp,der=0)

        rms_teff = zeros(len(teff_table_list))
        
        if master_weight > 1.0:
            shift_range = 0.0
        else:
            shift_range = 10.0

        if teff_ini > 6000:
            region_normalise = True
        if teff_ini <= 6000:
            region_normalise = False
            
        count = 1
        for region in teff_regions:

            rms = array(loop_input_spectrum(master_flux_wave,flux_flux,model_path_flux,teff_space,logg_space,feh_space,region[0],region[1],region_normalise,shift_range,False))
            rms = 0.6*rms + 0.2*rms_logg + 0.2*rms_feh
            #rms_teff = rms_teff + rms
            

            rms_teff = rms_teff + rms
            count = count+1

        rms_teff = rms_teff / float(count)

        reddening_weight_factor = 0.5

        ### Weight against reddening
        if reddening >= 0.0 and reddening <= max_reddening:
            reddening_weight = 1.0
        if reddening > max_reddening:
            reddening_weight = (reddening - max_reddening)/reddening_weight_factor + 1
        if reddening < 0.0:
            reddening_weight = abs(reddening)/reddening_weight_factor + 1

        #reddening_weight = 1.0

        rms_teff = rms_teff * reddening_weight

        i = find_min_index(rms_teff)
        print teff_table_list[i],logg_table_list[i],feh_table_list[i],rms_teff[i]

        rms_red_table = transpose([teff_table_list,logg_table_list,feh_table_list,rms_teff])

        output_rms_table = open(str(reddening)+"_rms_table","w")
        functions.write_table(rms_red_table,output_rms_table)
        output_rms_table.close()

        reddening_rms.append(min(rms_teff))

    ###########################
    ### Find best reddening ###
    ###########################

    for i in range(len(reddening_values)):
        if reddening_rms[i] == min(reddening_rms):
            best_reddening = reddening_values[i]
            break

    print "Best reddeining value of E(B-V):",best_reddening
    teff_min,logg_min,feh_min=plot_spectrum(str(best_reddening)+"_rms_table",reddening_list[i])
    print object_name,teff_min,logg_min,feh_min

    os.system("rm ./*_rms_table")
    os.chdir(file_path_reduced)

    return teff_min,logg_min,feh_min
コード例 #15
0
    plt.show()


    plt.clf()
    plt.figure(figsize=(17.25,6))
    #plt.contourf(log10(spatial_image-spatial_image.min()+1))
    plt.imshow(log10(spatial_image-spatial_image.min()+1),interpolation="nearest")
    for i in range(len(coo)):
        #plt.scatter(coo[i][0]-1,coo[i][1]-1)
        chrtext = i+ord("A")
        plt.text(coo[i][0]-1,coo[i][1]-1,chr(chrtext),horizontalalignment="center",verticalalignment="center")


    os.system("rm " + file_path_reduced + "spatial_" + file_name + ".pdf")
    plt.savefig(file_path_reduced + "spatial_" + file_name + ".pdf")


    o = open("master_coo","w")
    functions.write_table(coo,o)
    o.close()

    print "Coordinate file updated"
    os.system("cat master_coo")



else:
    plt.savefig(file_path_reduced + "spatial_" + file_name + ".pdf")

コード例 #16
0
ファイル: find_errors.py プロジェクト: chelseah/LCFIT
        x,fit = fitsmooth(x,y,5)
    else:
        x,fit = fitsmooth(x,y,10)

    xmin,xmax = find_edges(x,fit)
    plt.axvline(x=xmin)
    plt.axvline(x=xmax)

    xmid = x[0]
    for j in range(len(x)):
        if fit[j] == max(fit):
            xmid = x[j]
            break

    #xmid = best_param[i]

    plt.axvline(x=xmid)

    print axes_tested[i],best_param[i],xmid,xmin-xmid,xmax-xmid
    #best_param.append(xmid)

    plt.plot(x,fit,"k-")    
    plt.xlabel(axes_tested[i])
    plt.show()

best_param = [axes_tested,best_param]
f = open("best_param_mcmc","w")
functions.write_table(best_param,f)
f.close()

コード例 #17
0
ファイル: analyse_ccf.py プロジェクト: georgezhou/hsfu23
### log format
### file_name object_name height width BIS BIS_err

new_entry = [file_name, candidate, height, width, BIS, BIS_err]

### check if log file exists
if os.path.exists(file_path_reduced + "ccf_log.txt"):
    ccf_log = functions.read_ascii(file_path_reduced + "ccf_log.txt")
    ccf_log = functions.read_table(ccf_log)

    new_log = []
    entry_exists = False

    for exposure in ccf_log:
        if exposure[0] == file_name:
            new_log.append(new_entry)
            entry_exists = True
        else:
            new_log.append(exposure)
    if not entry_exists:
        new_log.append(new_entry)

else:
    new_log = [new_entry]

ccf_out = open(file_path_reduced + "ccf_log.txt","w")
ccf_out.write("#file_name candidate height width BIS BIS_err \n")
functions.write_table(new_log,ccf_out)
ccf_out.close
    
コード例 #18
0
ファイル: make_report.py プロジェクト: georgezhou/hsfu23
        os.system("mkdir outputs/ccf_plots/"+candidate_name)

        os.system("cp " + file_path + "reduced/ccf_pdfs/ccf_" + file_name + ".pdf outputs/ccf_plots/"+candidate_name+"/"+str(i[3])+".pdf")
        os.system("cp /priv/mulga2/george/wifes/candidates/RV_plots/"+candidate_name+".pdf outputs/RV_plots/")

    os.chdir("outputs/RV_plots/")
    os.system("gs -q -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -sOutputFile=RV_ANU23.pdf *.pdf")

    os.chdir(program_dir)
    os.chdir("outputs/")
    os.system("cp RV_plots/RV_ANU23.pdf .")
    os.system("tar -cvzpf ccf_plots.tar.gz ccf_plots/")

    RV_dat = open("temp.dat","w")
    RV_dat.write("#utdate imagename #candidate hjd RV(km/s) RVerr(km/s) ccf_height bis(km/s) bis_err(km/s)\n")
    functions.write_table(exposure_info,RV_dat)
    RV_dat.close()
    os.system("cat temp.dat | awk '{print $3,$4,$5,$6,$7,$8,$9}' > RV_ANU23.dat")
    os.system("rm temp.dat")

################ Deal with spectral typing observations
os.chdir(program_dir)

query_entry = "select SPECutdate,SPECobject,SPECteff,SPEClogg"
query_entry = query_entry + " from SPEC where SPECutdate >= \""+start_date+"\" and SPECutdate <=\""+end_date+"\" and SPECtype = \"ST\""
query_entry = query_entry + " and SPECobject like \"HATS%\""
#query_entry = query_entry + " and (SPECobject like \"KJ%\" or SPECobject like \"KS%\")"
#query_entry = query_entry + " and SPECobject like \"KJ%\""
#query_entry = query_entry + " and (SPECobject like \"KJ%\" or SPECobject like \"HATS%\")"
#query_entry = query_entry + " and (SPECobject like \"ASTEPC%\" or SPECobject like \"asud%\")"
#query_entry = query_entry + " and SPECobject like \"EP20%\""
コード例 #19
0
ファイル: detect_stars.py プロジェクト: georgezhou/hsfu23
def detect_stars(input_image,se_path,no_stars):
    
    image_data = pyfits.getdata(input_image)

    oned = []
    for i in range(len(image_data)):
        for j in range(len(image_data)):
            oned.append(image_data[i,j])

    med = median(oned)

    run_daofind(input_image,"master_coo",1)

    os.system("rm coords.cat")
    SEcommand = se_path+" "+input_image+" -c default.sex"
    SEcommand = SEcommand+" -BACK_TYPE MANUAL -BACK_VALUE "+str(med)
    os.system(SEcommand)

    os.system("cat coords.cat")

    SE_coo = functions.read_ascii("coords.cat")
    SE_coo = functions.read_table(SE_coo)

    temp = []
    for i in SE_coo:
        if i[0] < 36.:
            temp.append(i)
    SE_coo = temp

    phot_coo = functions.read_ascii("master_coo")
    phot_coo = functions.read_table(phot_coo)

    temp = []
    for i in phot_coo:
        if i[0] < 36.:
            temp.append(i)
    phot_coo  = temp

    ### Check if the objects in phot_coo exists also in SE_coo
    confirmed_objects = []

    for phot_obj in phot_coo:
        phot_obj_x = phot_obj[0]
        phot_obj_y = phot_obj[1]
        for SE_obj in SE_coo:
            SE_obj_x = SE_obj[0]
            SE_obj_y = SE_obj[1]
            SE_obj_fwhm = SE_obj[4]

            SE_obj_fwhm = 6
            
            # if SE_obj_fwhm < 5. or SE_obj_fwhm > 10.0:
            #     SE_obj_fwhm = 5

            if abs(phot_obj_x-SE_obj_x)<SE_obj_fwhm and abs(phot_obj_y-SE_obj_y)<SE_obj_fwhm:
                confirmed_objects.append(phot_obj)
                break

    if len(confirmed_objects) == 0 and len(SE_coo) > 0:
        print "NO matching objects, using SE coordinates"
        confirmed_objects = []
        for SE_obj in SE_coo:
            confirmed_objects.append([SE_obj[0],SE_obj[1],"INDEF",0.5,0.5,0.5,SE_obj[0]])

    elif len(confirmed_objects) == 0 and len(phot_coo) > 0:
        print "NO matching objects, using iraf.phot coordinates"
        confirmed_objects = phot_coo

    elif len(confirmed_objects)==0 and len(phot_coo)==0 and len(SE_coo)==0:
        print "NO objects detected!!!"
        sys.exit()


    ### Order by brightness


    flux_list = []
    for i in confirmed_objects:

        aperture = circle(i[1]-1,i[0]-1,2.0,image_data)
        flux = aperture*image_data - aperture*med
        flux = flux.sum()
        flux_list.append(flux)

    flux_list_sorted = sorted(flux_list,reverse=True)

    print "flux",flux_list_sorted

    temp = []
    for i in range(len(flux_list_sorted)):
        j = flux_list.index(flux_list_sorted[i])
        temp.append(confirmed_objects[j])
        
    confirmed_objects = temp
            
    ### remove unwanted objects
    if no_stars > 0:
        confirmed_objects = confirmed_objects[:no_stars]

    master_out = open("master_coo","w")
    functions.write_table(confirmed_objects,master_out)
    master_out.close()
コード例 #20
0
ファイル: RV_rms_hist.py プロジェクト: georgezhou/hsfu23
candidates_RMS = []
for candidate in good_candidates:
    candidate_name = candidate[0]
    hsmso_q = "select SPECrv from SPEC where SPECobject=\"" + candidate_name +"\" and SPECtype=\"RV\""
    candidate_RV = mysql_query.query_hsmso(hsmso_q)

    if len(candidate_RV) > 2:
        candidates_RMS.append(rms(candidate_RV))
        if rms(candidate_RV) > 5.0:
            print candidate_name

plt.hist(candidates_RMS,bins=rms_ax,histtype="step",hatch="/",color="b")
print max(candidates_RMS),median(candidates_RMS),min(candidates_RMS)

rms_out = open("candidate_rms.txt","w")
functions.write_table([candidates_RMS],rms_out)
rms_out.close()

#########################
### Plot RV Standards ###
#########################
query = "select distinct SPECobject from SPEC where SPECtype = \"RV\" and SPECcomment=\"RV Standard\""

RV_standards = mysql_query.query_hsmso(query)

RV_standards_RMS = []
for i in RV_standards:
    object_name = i[0]
    hsmso_q = "select SPECrv from SPEC where SPECobject=\"" + object_name +"\" and SPECtype=\"RV\""
    object_RV = mysql_query.query_hsmso(hsmso_q)
コード例 #21
0
ファイル: trace.py プロジェクト: georgezhou/hsfu23
def extract(image,trace,sigma,bk):

    if sigma < 1.:
        sigma = 1.

    #sigma = sigma*2
    #print sigma,"sigma"

    original_file = pyfits.open(image)
    original_header = iraf.imheader(images=image,longheader=1,Stdout=1)

    image = pyfits.getdata(image)
    image = transpose(image)

    line = []
    peak = []

    step = 200

    i = 0
    while i < len(image):
        crop = image[i]

        bk_i = []
        signal_i = []

        ### uncomment this to see where the extraction is taking place

        # plt.plot(crop)
        # plt.axvline(x=trace[i])
        # plt.show()

        x = arange(0,len(crop)+0.1-1,0.1)
        y = interpolate.interp1d(arange(0,len(crop)),crop)
        y = y(x)
        weights = []

        for j in range(len(x)):
            for n in bk:
                if x[j] > n[0] and x[j] < n[1]:
                    bk_i.append(y[j])
            
            if x[j] > trace[i] - sigma and x[j] < trace[i] + sigma:
                signal_i.append(y[j])

                weights.append(gaussian([1,trace[i],sigma,0],x[j]))

        if len(bk_i) > 0:
            bk_i = median(bk_i)
        else:
            bk_i = 0

        if len(signal_i) > 0:
            pass
        else:
            signal_i = [0]
            weights = [1]

        signal_i = array(signal_i)
        weights = array(weights)

        line.append(i+1)
        peak.append(sum(signal_i)-bk_i*len(signal_i))
        #peak.append(sum(signal_i*weights)-sum(bk_i*weights))

        i = i + 1    

    ### Uncomment to plot spectrum
    # plt.clf()
    # plt.plot(line,peak)
    # plt.show()

    data = transpose(array([line,peak]))
    f = open("spectrum.flux","w")
    functions.write_table(data,f)
    f.close()

    os.system("rm spectrum.fits")
    iraf.rspectext(
        input = "spectrum.flux",\
        output = "spectrum.fits",\
        title = "",\
        flux = 0,\
        dtype = "interp")
    
    update_fitsheader(original_header,original_file,"spectrum.fits")
    original_file.close()
コード例 #22
0
ファイル: run_candidate.py プロジェクト: georgezhou/hsfu23
            #     if cc[0] == candidate and cc[4] == 1:
            #         ap1_RVs.append(cc[7])
            #         hjd = cc[3]
            #     if cc[0] == candidate and cc[4] == 2:
            #         ap2_RVs.append(cc[7])

            # if len(ap1_RVs) > 0:
            #     ap1_RVs = median(ap1_RVs)
            # else:
            #     ap1_RVs = "INDEF"

            # if len(ap2_RVs) > 0:
            #     ap2_RVs = median(ap2_RVs)
            # else:
            #     ap2_RVs = "INDEF"

            # aperture_RV.append([hjd,file_name,stellar_apertures[0][0],ap1_RVs,stellar_apertures[1][0],ap2_RVs])
        
os.chdir("test_candidate")

file_out = open("RV.dat","w")
functions.write_table(RV_dat_list,file_out)
file_out.close()

#file_out = open("aperture_RV.dat","w")
#functions.write_table(aperture_RV,file_out)
#file_out.close()        
        
print RV_dat_list
#print aperture_RV