def ReadProdData(prod_fname): #See if the parameter files exist if os.access(prod_fname, os.F_OK): this_file = open(prod_fname, 'r') lines = this_file.readlines() EffDepth = [] TotalProd = [] SpallationProd = [] MuonProd = [] # get rid of the first two lines lines.pop(0) lines.pop(0) # now get the data into the dict for line in lines: this_line = LSDOst.RemoveEscapeCharacters(line) split_line = this_line.split(',') EffDepth.append(float(split_line[0])) TotalProd.append(float(split_line[1])) SpallationProd.append(float(split_line[2])) MuonProd.append(float(split_line[3])) return EffDepth, TotalProd, SpallationProd, MuonProd
def get_production_data(Directory): #Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" Dirname = LSDost.ReformatSeperators(Directory) Dirname = LSDost.AppendSepToDirectoryPath(Dirname) FileName= Dirname+"Muon_production_comparison.csv" # get rid of the first line of the file this_file = open(FileName, 'r') lines = this_file.readlines() values = [] # get rid of the header del lines[0:3] # now go through the lines, extracting the data for line in lines: # get rid of the control characters this_line = LSDost.RemoveEscapeCharacters(line) # Split the data split_line = this_line.split(',') float_vec = [] for element in split_line: float_vec.append(float(element)) values.append(float_vec) return values
def ReadCRONUSData(self, FileName): #See if the parameter files exist if os.access(FileName, os.F_OK): this_file = open(FileName, 'r') lines = this_file.readlines() CRONUS_eff_erate = [] CRONUS_erate = [] CRONUS_ext_uncert = [] CRONUS_in_uncert = [] # now get the data into the dict for line in lines: this_line = LSDOst.RemoveEscapeCharacters(line) split_line = this_line.split(',') CRONUS_eff_erate.append(float(split_line[4])) CRONUS_erate.append(float(split_line[5])) CRONUS_ext_uncert.append(float(split_line[6])) CRONUS_in_uncert.append(float(split_line[3])) #print "I got the erate from CRONUS, here is the data: " #print CRONUS_eff_erate # check to see if number of data elements are the same ERate = self.CRNData['erate_g_percm2_peryr'] if (len(ERate) != len(CRONUS_eff_erate)): print "CRONUS data doens't seem to be same length as other data" else: self.CRNData['CRONUS_erate_g_percm2_peryr'] = CRONUS_eff_erate self.CRNData['CRONUS_erate_mm_peryr'] = CRONUS_erate self.CRNData['CRONUS_int_uncert_mm_peryr'] = CRONUS_ext_uncert self.CRNData['CRONUS_ext_uncert_mm_peryr'] = CRONUS_in_uncert self.CRNData['CRONUS_total_uncert'] = np.add( CRONUS_in_uncert, CRONUS_ext_uncert) self.CRNData['CRONUS_total_uncert'] = np.multiply( self.CRNData['CRONUS_total_uncert'], (2.65 / 10000)) self.HaveCRONUSData = True else: print "Can't open CRONUS file."
def CollatenewCRONUScomparisonCRNData(): #Directory = "C://code//git_papers//crn_basinwide_paper//Compiled_results//Brauch_vs_newCRONUS//" Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//Brauch_vs_newCRONUS//" Dirname = LSDost.ReformatSeperators(Directory) Dirname = LSDost.AppendSepToDirectoryPath(Dirname) Fileformat = 'svg' label_size = 8 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.05 rcParams['legend.labelspacing'] = 0.1 rcParams['legend.columnspacing'] = 0.1 P_erate_newCRONUS = [] D_erate_newCRONUS = [] P_newCRONUS = [] D_newCRONUS = [] # loop through the directory, getting the results from the data for fname in glob(Dirname + "*_CRNResults.csv"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) # Now get the prefix of the file splitfname = NoDirFname.split('_CRNResults.csv') fprefix = splitfname[0] print "File prefix is: " + fprefix # now read in the data thisCRNData = CRNR.CRNResults(fname) # Only get the newCRONUS dethier and Palumbo data # now get the prefixes if "Dethier" in fprefix: if "newCRONUS" in fprefix: D_erate_newCRONUS = thisCRNData.GetErosionRates_mmperkyr_rho2650( ) elif "Palumbo" in fprefix: if "newCRONUS" in fprefix: P_erate_newCRONUS = thisCRNData.GetErosionRates_mmperkyr_rho2650( ) # Convert the data to arrays (to calculate errors) P_nC_CAIRN = np.asarray(P_erate_newCRONUS) D_nC_CAIRN = np.asarray(D_erate_newCRONUS) #print "P CAIRN is: " #print P_nC_CAIRN #print "D CAIRN is: " #print D_nC_CAIRN # Now get the CRONUScalc data print "Entering second glob loop" for fname in glob(Dirname + "*.csv"): print "I found comparison data! Name is " + fname # Now get the CRONUScalc data print "Entering third glob loop" for fname in glob(Dirname + "*Comparison.csv*"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) print "I found comparison data! Name is " + NoDirFname # Now get the prefix of the file splitfname = NoDirFname.split('_newCRONUSCAIRNComparison.csv') fprefix = splitfname[0] print "File prefix is: " + fprefix print "I am woking with the dataset: " + fprefix #See if the parameter files exist if os.access(fname, os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # get rid fo the first line lines.pop(0) # create the lists for populating with data CAIRN_erate = [] CAIRN_uncert = [] Report_erate = [] Report_uncert = [] newCRONUS_erate = [] newCRONUS_uncert = [] # now get the data into the dict for line in lines: this_line = LSDost.RemoveEscapeCharacters(line) split_line = this_line.split(',') #print split_line[22]+" " +split_line[23] #print split_line CAIRN_erate.append(float(split_line[16])) CAIRN_uncert.append(float(split_line[17])) Report_erate.append(float(split_line[20])) Report_uncert.append(float(split_line[21])) newCRONUS_erate.append(float(split_line[22])) newCRONUS_uncert.append(float(split_line[23])) # now get the prefixes if fprefix == "Dethier": D_newCRONUS = newCRONUS_erate elif fprefix == "Palumbo": P_newCRONUS = newCRONUS_erate #print "P_newCRONUS is: " #print P_newCRONUS #print "D_newCRONUS is: " #print D_newCRONUS P_nC = np.asarray(P_newCRONUS) D_nC = np.asarray(D_newCRONUS) Perr = np.divide(np.subtract(P_nC_CAIRN, P_nC), P_nC) Derr = np.divide(np.subtract(D_nC_CAIRN, D_nC), D_nC) print "The errors are: " print Perr print Derr print P_nC print P_nC_CAIRN # okay, now you should have the errors #=========================================================================== # now make plots based on these data # 3.26 inches = 83 mm, the size of a 1 column figure Fig1 = plt.figure(1, facecolor='white', figsize=(3.26, 3.26)) # generate a 120,90 grid. gs = GridSpec(100, 75, bottom=0.13, left=0.13, right=0.95, top=0.95) ax = Fig1.add_subplot(gs[10:100, 5:95]) ax.plot(P_nC, Perr, "o", markersize=5, color="maroon", label="Palumbo et al., 2010", markeredgewidth=1) ax.plot(D_nC, Derr, "ro", markersize=5, color="lawngreen", label="Dethier et al., 2014", markeredgewidth=1) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) ax.tick_params(axis='both', width=1) #ax.set_ylim([0.02,0.06]) #plt.title('We are not using this in the paper!! Use CRNCAIRNvsnewCRONUS_erates.py instead!') plt.xlabel('$\epsilon_{CRCalc}$ (g cm$^{-2}$ yr$^{-1}$)', fontsize=axis_size) plt.ylabel( '($\epsilon_{CAIRN-CRCalc}$-$\epsilon_{CRCalc}$)/$\epsilon_{CRCalc}$', fontsize=axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax.get_legend_handles_labels() plt.legend() plt.legend(handles, labels, numpoints=1, loc=4, ncol=1, borderaxespad=1.) plt.savefig(Dirname + "CAIRN_newCRONUS_emulator.svg", format=Fileformat)
def CRBERCvsReported(): Dirname = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" #Dirname = "C://basin_data//CosmoPaper//Results//Compiled//" #Fname = "Palumbo*_CompareResults.csv" SiteNames = [] SiteDicts = [] PaperNames = [] PaperColours = [] # loop through the directory, getting the results from the data for fname in glob(Dirname + "*_CompareResults.csv"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) # Now get the prefix of the file splitfname = NoDirFname.split('_CompareResults.csv') fprefix = splitfname[0] print "File prefix is: " + fprefix # add to the sitenames list and create a holding dictionary SiteNames.append(fprefix) thisdict = {} # now get the prefixes if fprefix == "Bierman": PaperNames.append("Bierman et al., 2005") PaperColours.append("blue") elif fprefix == "Dethier": PaperNames.append("Dethier et al., 2014") PaperColours.append("lawngreen") elif fprefix == "Kirchner": PaperNames.append("Kirchner et al., 2001") PaperColours.append("yellow") elif fprefix == "Munack": PaperNames.append("Munack et al., 2014") PaperColours.append("orange") elif fprefix == "Scherler": PaperNames.append("Scherler et al., 2014") PaperColours.append("black") elif fprefix == "Safran": PaperNames.append("Safran et al., 2005") PaperColours.append("powderblue") elif fprefix == "Palumbo": PaperNames.append("Palumbo et al., 2010") PaperColours.append("maroon") #See if the parameter files exist if os.access(fname, os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # get rid fo the first line lines.pop(0) # create the lists for populating with data BERC_erate = [] BERC_uncert = [] Report_erate = [] Report_uncert = [] BERC_shield = [] Report_shield = [] # now get the data into the dict for line in lines: this_line = LSDost.RemoveEscapeCharacters(line) split_line = this_line.split(',') BERC_erate.append(float(split_line[4])) BERC_uncert.append(float(split_line[5])) Report_erate.append(float(split_line[6])) Report_uncert.append(float(split_line[7])) BERC_shield.append(float(split_line[2])) Report_shield.append(float(split_line[3])) thisdict["BERC_erate"] = BERC_erate thisdict["BERC_uncert"] = BERC_uncert thisdict["Report_erate"] = Report_erate thisdict["Report_uncert"] = Report_uncert thisdict["BERC_shield"] = BERC_shield thisdict["Report_shield"] = Report_shield SiteDicts.append(thisdict) label_size = 8 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.02 rcParams['legend.labelspacing'] = 0.1 rcParams['legend.columnspacing'] = 0.05 # now make plots based on these data Fig1 = plt.figure(1, facecolor='white', figsize=(3.26, 3.26)) # generate a 120,90 grid. gs = GridSpec(100, 75, bottom=0.13, left=0.13, right=0.98, top=0.85) ax = Fig1.add_subplot(gs[10:100, 5:75]) #cmap = plt.cm.jet #colo = 0 for index, thisdict in enumerate(SiteDicts): #colo = colo + (1.000/len(SiteDicts)) plt.plot(thisdict['BERC_shield'], thisdict['Report_shield'], "o", markersize=4, color=PaperColours[index], label=PaperNames[index], markeredgewidth=1) #plt.plot(self.CRNData['AvgProdScaling'],self.CRNData['Error_CR'],color=cmap(self.CRNData['basin_relief']),"o", markersize=8 ) #plt.errorbar(datazz['erate_cosmocalc']*10, datazz['erate_cmperkyr']*10, xerr=datazz['error_cosmocalc'], yerr=datazz['error_newcode'], fmt='o',color = cmap(colo)) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) # This gets all the ticks, and pads them away from the axis so that the corners don't overlap ax.tick_params(axis='both', width=1, pad=2) for tick in ax.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax.yaxis.get_major_ticks(): tick.set_pad(3) plt.xlabel('CAIRN topographic shielding', fontsize=axis_size) plt.ylabel('Reported topographic shielding', fontsize=axis_size) handles, labels = ax.get_legend_handles_labels() plt.legend(handles, labels, numpoints=1, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plt.show() Fileformat = "svg" plt.savefig(Dirname + "CAIRN_vs_Reported_toposhield.svg", format=Fileformat)
def UpdateRasterWithShielding(path, prefix, Sample_names, Snowshield_values): #first get directory path into the correct format fmt_path = LSDost.ReformatSeperators(path) # add the trailing seperator fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path) # now find the correct file fname = fmt_path + prefix + "_CRNRasters.csv" # also make the outfile outfname = fmt_path + prefix + "_SS_CRNRasters.csv" outfile = open(outfname, 'w') new_lines = [] print "The sample names are" print Sample_names print "The snow shield values are: " print Snowshield_values #See if the parameter files exist if os.access(fname, os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # now get the list of DEM prefixes for line in lines: this_line = line.split(",") DEM_prefix = this_line[0] print "The DEM prefix is: " + DEM_prefix # Now get the sample name split_dem_prefix = DEM_prefix.split("_") sample_name = split_dem_prefix[-1] print "The sample name is: " + sample_name # get the index of the sample name to reference the shielding value i = Sample_names.index(sample_name) print "the index of the sample names is: " + str(i) # calculate the effective depth. The 160 is the attenuation thickness in g/cm^2 this_snow_depth = -160 * np.log(Snowshield_values[i]) print "The shielding is: " + str( Snowshield_values[i]) + " and eff_depth is: " + str( this_snow_depth) # update the snow effective depth this_line[1] = str(this_snow_depth) # update the line this_new_line = ",".join(this_line) new_lines.append(this_new_line) # this will get printed to file for line in new_lines: # you have to get rid of the control characters this_line = LSDost.RemoveEscapeCharacters(line) outfile.write("%s\n" % this_line)
def CRBERCvsReported_erates(): #Dirname = "C://basin_data//CosmoPaper//Results//Compiled//" Dirname = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" #Fname = "Palumbo*_CompareResults.csv" SiteNames = [] SiteDicts = [] PaperNames = [] PaperColours = [] # loop through the directory, getting the results from the data for fname in glob(Dirname+"*_ErateComparisonNew.csv"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) # Now get the prefix of the file splitfname = NoDirFname.split('_ErateComparisonNew.csv') fprefix = splitfname[0] print "File prefix is: " + fprefix # add to the sitenames list and create a holding dictionary SiteNames.append(fprefix) thisdict = {} # now get the prefixes if fprefix == "Bierman": PaperNames.append("Bierman et al., 2005") PaperColours.append("blue") elif fprefix == "Dethier": PaperNames.append("Dethier et al., 2014") PaperColours.append("lawngreen") elif fprefix == "Kirchner": PaperNames.append("Kirchner et al., 2001") PaperColours.append("yellow") elif fprefix == "Munack": PaperNames.append("Munack et al., 2014") PaperColours.append("orange") elif fprefix == "Scherler": PaperNames.append("Scherler et al., 2014") PaperColours.append("black") elif fprefix == "Safran": PaperNames.append("Safran et al., 2005") PaperColours.append("powderblue") elif fprefix == "Palumbo": PaperNames.append("Palumbo et al., 2010") PaperColours.append("maroon") print "I am woking with the dataset: " + fprefix min_erate = 5 max_erate = 5000 #See if the parameter files exist if os.access(fname,os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # get rid fo the first line lines.pop(0) # create the lists for populating with data BERC_erate = [] BERC_uncert = [] Report_erate = [] Report_uncert = [] # now get the data into the dict for line in lines: this_line = LSDost.RemoveEscapeCharacters(line) split_line = this_line.split(',') #print split_line BERC_erate.append(float(split_line[16])) BERC_uncert.append(float(split_line[17])) Report_erate.append(float(split_line[20])) Report_uncert.append(float(split_line[21])) # get the maximum and minimum erosion rates if (float(split_line[16]) > max_erate): max_erate = float(split_line[16]) if (float(split_line[20]) > max_erate): max_erate = float(split_line[20]) if (float(split_line[16]) < min_erate): min_erate = float(split_line[16]) if (float(split_line[20]) < min_erate): min_erate = float(split_line[20]) thisdict["BERC_erate"] = BERC_erate thisdict["BERC_uncert"] = BERC_uncert thisdict["Report_erate"] = Report_erate thisdict["Report_uncert"] = Report_uncert SiteDicts.append(thisdict) label_size = 10 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.05 rcParams['legend.labelspacing'] =0.1 rcParams['legend.columnspacing'] =0.1 # make a line between the maximum and minimum erate one_line = np.linspace(min_erate,max_erate,1000) # now make plots based on these data Fig1 = plt.figure(1, facecolor='white',figsize=(4.72,4.72)) # generate a 120,90 grid. gs = GridSpec(100,75,bottom=0.13,left=0.13,right=0.98,top=0.85) ax = Fig1.add_subplot(gs[10:100,5:75]) #cmap = plt.cm.jet #colo = 0 # plot the 1:1 line plt.plot(one_line,one_line,'k-',linewidth = 2) for index,thisdict in enumerate(SiteDicts): #colo = colo + (1.000/len(SiteDicts)) plt.errorbar(thisdict['BERC_erate'], thisdict['Report_erate'], thisdict['BERC_uncert'], thisdict['Report_uncert'], fmt='.',color = PaperColours[index], linewidth = 1.5) plt.plot(thisdict['BERC_erate'], thisdict['Report_erate'], "o", markersize=4, color=PaperColours[index], label = PaperNames[index],markeredgewidth=1.) ax.annotate("1:1 line", xy=(3000, 3000), xycoords='data', xytext=(200, 4000), textcoords='data', arrowprops=dict(arrowstyle="fancy", #linestyle="dashed", color="0.5", shrinkB=5, connectionstyle="arc3,rad=-0.3", ), ) #plt.plot(self.CRNData['AvgProdScaling'],self.CRNData['Error_CR'],color=cmap(self.CRNData['basin_relief']),"o", markersize=8 ) #plt.errorbar(datazz['erate_cosmocalc']*10, datazz['erate_cmperkyr']*10, xerr=datazz['error_cosmocalc'], yerr=datazz['error_newcode'], fmt='o',color = cmap(colo)) ax.spines['top'].set_linewidth(1.5) ax.spines['left'].set_linewidth(1.5) ax.spines['right'].set_linewidth(1.5) ax.spines['bottom'].set_linewidth(1.5) # This gets all the ticks, and pads them away from the axis so that the corners don't overlap ax.tick_params(axis='both', width=1.5, pad = 2) for tick in ax.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax.yaxis.get_major_ticks(): tick.set_pad(3) # logarithmic axes ax.set_yscale('log') ax.set_xscale('log') plt.xlabel('CAIRN denudation rate (mm/kyr)', fontsize = axis_size) plt.ylabel('Reported denudation rate (mm/kyr)', fontsize = axis_size) handles, labels = ax.get_legend_handles_labels() plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plt.show() Fileformat = "svg" plt.savefig(Dirname+"CAIRN_vs_Reported_erates.svg",format = Fileformat)
def GetRequiredFilesFromFolder(DataDirectory): #print "Current directory is: " + os.getcwd() #Append a closing slash to the data directory if one is not already there NewDataDirectory = LSDost.ReformatSeperators(DataDirectory) DataDirectory = LSDost.AppendSepToDirectoryPath(NewDataDirectory) #print "DataDirectory is (2): " + DataDirectory # Start with a master list of required files required_files = [] # find all the makefiles in the directory for FileName in glob(DataDirectory+"*.make"): #print "FileName is: " + FileName # Now you need to find all the sources in the makefile f = open(FileName,'r') # open file lines = f.readlines() # read in the data f.close() # Initiate an empty list that will contain the filenames cppfiles = [] # add the makefile to the files to be copied required_files.append(FileName) # loop through the lines, flag a start where SOURCES line starts start_flag = 0 for line in lines: if "SOURCE" in line: start_flag = 1 # If there is OBJECTS in the line, stop looking for ".cpp" if "OBJECTS" in line: start_flag = 0 # Look for .cpp between SOURCES and OBJECTS if start_flag == 1: if ".cpp" in line: # seperate the line using spaces split_line = line.split(' ') this_item = "" # Go through the split line looking for .cpp for item in split_line: if ".cpp" in item: this_item = item # get rid of the SOURCES new_this_item = this_item.replace("SOURCES=","") #print "This cpp file is: " + new_this_item # get rid of stupid escape characters this_file = LSDost.RemoveEscapeCharacters(new_this_item) cppfiles.append(this_file) # now print to screen the files required for this makefile #print "The files required for this makefile are: " #print cppfiles # now append to directory...this requires some logic because of the ../ seperators for filename in cppfiles: #print "Filename is: " + filename # special logic for the ../ seperator if "../" in filename: #print "There is a lower level in this filename, this means it is an object" thisfile = filename.replace("../","") thisdirectory = LSDost.RemoveDirectoryLevel(DataDirectory) fullfile = thisdirectory+thisfile fullfile2 = fullfile.replace(".cpp",".hpp") required_files.append(fullfile2) else: fullfile = DataDirectory+filename # append to the required files list required_files.append(fullfile) # now thin out the required files to remove duplicates nd = set(required_files) required_files_noduplicates = list(nd) #print "/n/n=================================" #print "Required files are: " #print required_files #print "--------" #print "And removing duplicates:" #print required_files_noduplicates #print "=====================================" return required_files_noduplicates
def __init__(self, FileName): # This gets the filename without the .csv file_prefix = LSDOst.GetFilePrefix(FileName) self.FilePrefix = file_prefix print "The object file prefix is: " + self.FilePrefix #See if the parameter files exist if os.access(FileName, os.F_OK): this_file = open(FileName, 'r') lines = this_file.readlines() # get rid of the control characters this_line = LSDOst.RemoveEscapeCharacters(lines[0]) # Now get a list with the names of the parameters self.VariableList = [] TestList = this_line.split(',') for name in TestList: this_name = LSDOst.RemoveEscapeCharacters(name) self.VariableList.append(this_name.lower()) print "Variable list is: " print self.VariableList # get rid of the names del lines[0] # now you need to make a dict that contains a list for each varaible name DataDict = {} TypeList = [] for name in self.VariableList: DataDict[name] = [] # now get the data into the dict #firstline = True for line in lines: this_line = LSDOst.RemoveEscapeCharacters(line) split_line = this_line.split(',') for index, name in enumerate(self.VariableList): this_var = LSDOst.RemoveEscapeCharacters(split_line[index]) #this_variable = LSDOst.ParseStringToType(this_var) DataDict[name].append(this_var) # now go back and get the correct type DataDictTyped = {} for name in self.VariableList: this_list = DataDict[name] typed_list = LSDOst.ParseListToType(this_list) DataDictTyped[name] = typed_list TypeList.append(type(typed_list[0])) self.PointData = DataDictTyped self.DataTypes = TypeList else: print "Uh oh I could not open that file" self.VariableList = [] self.DataTypes = [] self.PointData = {} # now make sure the data has latitude and longitude entries if "latitude" not in self.VariableList: print "Something has gone wrong, latitude is not in the variable list" print "Here is the variable list: " print self.VariableList if "longitude" not in self.VariableList: print "Something has gone wrong, longitude is not in the variable list" print "Here is the variable list: " print self.VariableList # Add the latitude and longitude to their own data members and get rid # of those from the VariableList self.Latitude = self.PointData["latitude"] self.Longitude = self.PointData["longitude"]
def __init__(self, FileName): # This gets the filename without the .csv file_prefix = LSDOst.GetFilePrefix(FileName) print "Loading a file, the file prefix is: " + file_prefix if file_prefix.endswith('_CRNResults'): file_prefix = file_prefix[:-11] self.FilePrefix = file_prefix else: self.FilePrefix = 'NULL' print "The object file prefix is: " + self.FilePrefix #See if the parameter files exist if os.access(FileName, os.F_OK): this_file = open(FileName, 'r') lines = this_file.readlines() # get rid of the header del lines[0:3] # get rid of the control characters this_line = LSDOst.RemoveEscapeCharacters(lines[0]) # Now get a list with the names of the parameters self.VariableList = this_line.split(',') #print "Variable list is: " #print self.VariableList # get rid of the names del lines[0] # now you need to make a dict that contains a vist for each varaible name DataDict = {} for name in self.VariableList: DataDict[name] = [] # now get the data into the dict for line in lines: this_line = LSDOst.RemoveEscapeCharacters(line) split_line = this_line.split(',') for index, name in enumerate(self.VariableList): if name == 'sample_name': DataDict[name].append(split_line[index]) elif name == 'nuclide': DataDict[name].append(split_line[index]) else: DataDict[name].append(float(split_line[index])) #if name == 'basin_relief': # print "getting relief of: " +str(float(split_line[index])) #print "Finished assimilating Data Dict" self.CRNData = DataDict else: print "Uh oh I could not open that file" self.VariableList = [] self.CRNData = {} # add a bool to let other modules know if you've got the data from CRONUS self.HaveCRONUSData = False