def BedPlotAutomator(Dirname): # This is used to tell the model we want a profile perpendicular to shore axis = 1 for fname in glob(Dirname+"*_BedElev.asc"): # first we need the filename without the path NoDirFname = LSDOst.GetFileNameNoPath(fname) print "fname is: "+ NoDirFname # Now get the prefix of the file splitfname = NoDirFname.split('_BedElev.asc') fprefix = splitfname[0] ElevationSwaths(Dirname, NoDirFname, axis, fprefix) # now do the bed thickness for fname in glob(Dirname+"*_BedThick.asc"): # first we need the filename without the path NoDirFname = LSDOst.GetFileNameNoPath(fname) print "fname is: "+ NoDirFname # Now get the prefix of the file splitfname = NoDirFname.split('_BedThick.asc') fprefix = splitfname[0] ElevationSwaths(Dirname, NoDirFname, axis, fprefix)
def GetListOfRasters(path, prefix): #first get directory path into the correct format fmt_path = LSDost.ReformatSeperators(path) # add the trailing seperator fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path) # now find the correct file fname = fmt_path + prefix + "_CRNRasters.csv" DEM_names = [] #See if the parameter files exist if os.access(fname, os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # now get the list of DEM prefixes for line in lines: this_line = line.split(",") DEM_prefix = this_line[0] DEM_names.append(DEM_prefix) else: print "*_CRNRasters.csv file not found. Are you sure it is there and you have the correct path?" return DEM_names
def get_production_data(Directory): #Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" Dirname = LSDost.ReformatSeperators(Directory) Dirname = LSDost.AppendSepToDirectoryPath(Dirname) FileName= Dirname+"Muon_production_comparison.csv" # get rid of the first line of the file this_file = open(FileName, 'r') lines = this_file.readlines() values = [] # get rid of the header del lines[0:3] # now go through the lines, extracting the data for line in lines: # get rid of the control characters this_line = LSDost.RemoveEscapeCharacters(line) # Split the data split_line = this_line.split(',') float_vec = [] for element in split_line: float_vec.append(float(element)) values.append(float_vec) return values
def GDALBatchConvert(DataDirectory, raster_format, target_format): NewDataDirectory = LSDost.ReformatSeperators(DataDirectory) DataDirectory = LSDost.AppendSepToDirectoryPath(NewDataDirectory) # Check the target format if target_format == "ENVI": target_extension = ".bil" elif target_format == "EHdr": target_extension = ".bil" elif target_format == "GTiff": target_extension = ".tiff" else: print "You have not selcted a valid raster format!" print "Options are ENVI, EHdr and GTiff" target_extension = "NULL" # now make a directory if target_extension != "NULL": target_directory = DataDirectory + target_format if not os.access(target_directory, os.F_OK): print "Making path: " os.mkdir(target_directory) print "I made a directory: " + target_directory else: print "Path: " + target_directory + " already exists." # Now check the source format if raster_format == "ENVI": raster_extension = ".bil" elif raster_format == "EHdr": raster_extension = ".bil" elif raster_format == "GTiff": raster_extension = ".tif" else: print "You have not selcted a valid raster format!" print "Options are ENVI, EHdr and GTiff" raster_extension = "NULL" # find all the dataset of the source format print "The data directory is: " + DataDirectory print "The raster extension is: " + raster_extension if raster_extension != "NULL": for FileName in glob(DataDirectory + "*" + raster_extension): print "found file: " + FileName subprocess.call(['gdalinfo', FileName])
def SwathPlot(path, filename, axis): # get the path to the raster file NewPath = LSDOst.AppendSepToDirectoryPath(path) FileName = NewPath + filename # get the data vectors means, medians, std_deviations, twentyfifth_percentile, seventyfifth_percentile = LSDMap_BM.SimpleSwath( path, filename, axis) print "Means shape is: " print means.shape x_vec, y_vec = LSDMap_IO.GetLocationVectors(FileName) print "X shape is: " print x_vec.shape print "Y shape is: " print y_vec.shape import matplotlib.pyplot as plt import matplotlib.lines as mpllines from mpl_toolkits.axes_grid1 import AxesGrid label_size = 20 #title_size = 30 axis_size = 28 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size # make a figure, sized for a ppt slide fig = plt.figure(1, facecolor='white', figsize=(10, 7.5)) gs = plt.GridSpec(100, 75, bottom=0.1, left=0.1, right=0.9, top=1.0) ax = fig.add_subplot(gs[10:100, 10:75]) if axis == 0: dir_vec = x_vec else: dir_vec = y_vec min_sd = np.subtract(means, std_deviations) plus_sd = np.add(means, std_deviations) ax.plot(dir_vec, means, linewidth=2, color="red") #ax.fill_between(dir_vec, twentyfifth_percentile, seventyfifth_percentile, facecolor='green', alpha = 0.7, interpolate=True) ax.fill_between(dir_vec, min_sd, plus_sd, facecolor='blue', alpha=0.5, interpolate=True) ax.set_xlim(dir_vec[0], dir_vec[-1]) plt.show()
def ReadProdData(prod_fname): #See if the parameter files exist if os.access(prod_fname, os.F_OK): this_file = open(prod_fname, 'r') lines = this_file.readlines() EffDepth = [] TotalProd = [] SpallationProd = [] MuonProd = [] # get rid of the first two lines lines.pop(0) lines.pop(0) # now get the data into the dict for line in lines: this_line = LSDOst.RemoveEscapeCharacters(line) split_line = this_line.split(',') EffDepth.append(float(split_line[0])) TotalProd.append(float(split_line[1])) SpallationProd.append(float(split_line[2])) MuonProd.append(float(split_line[3])) return EffDepth, TotalProd, SpallationProd, MuonProd
def GetCRNData(path, prefix): #first get directory path into the correct format fmt_path = LSDost.ReformatSeperators(path) # add the trailing seperator fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path) # now find the correct file fname = fmt_path + prefix + "_CRNData.csv" Sample_names = [] SnowShield_values = [] #See if the parameter files exist if os.access(fname, os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # get rid of the first line, since this has header information lines.pop(0) # now get the list of DEM prefixes for line in lines: this_line = line.split(",") SampleName = this_line[0] print "This line is: " print this_line # check to see if there is a snow shield value N_entries = len(this_line) if (N_entries == 8): SnowShield = float(this_line[7]) Sample_names.append(SampleName) SnowShield_values.append(SnowShield) else: print "there is no snow shielding on this line" SnowShield_values.append(1) Sample_names.append(SampleName) else: print "*_CRNRData.csv file not found. Are you sure it is there and you have the correct path?" return Sample_names, SnowShield_values
def TestListParsing(): List1 = ['1', '1.1', '2'] List2 = ['1', '1a', '2'] List3 = ['1', '2', '3'] joe1 = LSDost.ParseListToType(List1) joe2 = LSDost.ParseListToType(List2) joe3 = LSDost.ParseListToType(List3) print "Type of 1 is: " print type(joe1[0]) print "Type of 2 is: " print type(joe2[0]) print "Type of 3 is: " print type(joe3[0])
def TestParsing(): string1 = "13" string2 = "24.1" string3 = "yoyoma" joe1 = LSDost.ParseStringToType(string1) joe2 = LSDost.ParseStringToType(string2) joe3 = LSDost.ParseStringToType(string3) print "Type of 1 is: " print type(joe1) print "Type of 2 is: " print type(joe2) print "Type of 3 is: " print type(joe3)
def CopyDataAndParam(path, prefix, new_extension): #first get directory path into the correct format fmt_path = LSDost.ReformatSeperators(path) # add the trailing seperator fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path) # now find the correct file Datafname = fmt_path + prefix + "_CRNData.csv" Paramfname = fmt_path + prefix + ".CRNParam" Datafname_out = fmt_path + prefix + "_" + new_extension + "_CRNData.csv" Paramfname_out = fmt_path + prefix + "_" + new_extension + ".CRNParam" # copy the files shutil.copyfile(Datafname, Datafname_out) shutil.copyfile(Paramfname, Paramfname_out)
def TestOSTools(): path1 = "C://basin_data//Chile//lat26p0//" path2 = "M:/Yo/ma/yoyo.ma" path3 = "/home/smudd/devel_projects/LSDTopoTools/branches/LSDModel" path4 = "C:\\basin_data\\Chile\\lat26p0\\heyJude_DEM.flt" newpath1 = LSDost.ReformatSeperators(path1) print "Old path: " + path1 print "New path: " + newpath1 newpath2 = LSDost.ReformatSeperators(path2) print "Old path: " + path2 print "New path: " + newpath2 newpath3 = LSDost.ReformatSeperators(path3) print "Old path: " + path3 print "New path: " + newpath3 newpath4 = LSDost.ReformatSeperators(path4) print "Old path: " + path4 print "New path: " + newpath4 # test the directory adder # test the directory adder print "\n\n" newpath = LSDost.AppendSepToDirectoryPath(path1) print "Sep appended path is: " + newpath print "\n\n" newpath = LSDost.AppendSepToDirectoryPath(path3) print "Sep appended path is: " + newpath # Test the file prefix grabber fprefix = LSDost.GetFilePrefix(path4) print "\n\n" print "File prefix is: " + fprefix # Test the remove path level print "\n\n" print "Removing a directory level from: " + newpath newnewpath = LSDost.RemoveDirectoryLevel(newpath) print "The new directory is: " + newnewpath # Test the last directory name function print "\n\n" print "The last directory name in: " + newnewpath name = LSDost.GetLastDirectoryLevel(newnewpath) print "is: " + name
def ConvertAllCSVToGeoJSON(path): # make sure names are in correct format NewPath = LSDOst.AppendSepToDirectoryPath(path) print "The formatted path is: " + NewPath for FileName in glob(NewPath + "*.csv"): print "filename is: " + FileName thisPointData = LSDMPD.LSDMap_PointData(FileName) thisPointData.TranslateToReducedGeoJSON(FileName)
def CopyRequiredFilesToGitRepository(ObjectsDirectory,DriverDirectory,TargetDirectory): # Ensure the directories exist ObjectsDirectory,DriverDirectory,TargetDirectory,TargetDriverDirectory = CheckFileStructuresForCopy(ObjectsDirectory,DriverDirectory,TargetDirectory) # Now get the required files print "\n\n\n=================================" required_files_noduplicates = GetRequiredFilesFromFolder(DriverDirectory) print "The required files are: " print required_files_noduplicates print "=================================" # loop through these files, collecting the filenames and directory names # first you need to know what directory level the driver files are in print "\n\n\n======================================" n_level_of_driver_directory = LSDost.GetPathLevel(DriverDirectory) for FileName in required_files_noduplicates: # you need to know what level the file is ThisPath = LSDost.GetPath(FileName) ThisLevel = LSDost.GetPathLevel(ThisPath) #if it is the same level as the driver directory, it is in the driver directory! if ThisLevel == n_level_of_driver_directory: CopyDirectory = TargetDriverDirectory CopyFileName = LSDost.GetFileNameNoPath(FileName) CopyFileNameWithPath = CopyDirectory+CopyFileName else: CopyDirectory = TargetDirectory CopyFileName = LSDost.GetFileNameNoPath(FileName) CopyFileNameWithPath = CopyDirectory+CopyFileName print "The filename is: " + FileName print "The copy filename is: " + CopyFileNameWithPath shutil.copy(FileName, CopyFileNameWithPath) # now copy the files over print "=============================================="
def SpawnFoldersFromDEMList(DEM_names): # loop through the files checking if there are directories for the # DEMs. If not, make them for name in DEM_names: path = LSDost.AppendSepToDirectoryPath(name) if not os.access(path, os.F_OK): print "Making path: " os.mkdir(path) print path else: print "Path: " + path + " already exists."
def RasterMeanValue(path, file1): # make sure names are in correct format NewPath = LSDOst.AppendSepToDirectoryPath(path) raster_file1 = NewPath + file1 NPixels = LSDMap_IO.GetNPixelsInRaster(raster_file1) Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1, raster_band=1) mean_value = np.sum(Raster1) / float(NPixels) return mean_value
def SimpleSwath(path, file1, axis): # make sure names are in correct format NewPath = LSDOst.AppendSepToDirectoryPath(path) raster_file1 = NewPath + file1 # get some information about the raster NDV, xsize, ysize, GeoT, Projection, DataType = LSDMap_IO.GetGeoInfo( raster_file1) print "NDV is: " print NDV if NDV == None: NDV = -9999 print "No NDV defined" Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1, raster_band=1) #nan_raster = Raster1[Raster1==NDV]=np.nan #print nan_raster #now mask the nodata masked_Raster1 = np.ma.masked_values(Raster1, NDV) means = np.mean(masked_Raster1, axis) medians = np.median(masked_Raster1, axis) std_deviations = np.std(masked_Raster1, axis) twentyfifth_percentile = np.percentile(masked_Raster1, 25, axis) seventyfifth_percentile = np.percentile(masked_Raster1, 75, axis) # This stuff only works with numpy 1.8 or later, wich we don't have #means = np.nanmean(nan_raster, axis) #medians = np.nanmedian(nan_raster, axis) #std_deviations = np.nanstd(nan_raster, axis) #twentyfifth_percentile = np.nanpercentile(nan_raster, 25, axis) #seventyfifth_percentile = np.nanpercentile(nan_raster, 75, axis) #print means #print medians #print std_deviations #print twentyfifth_percentile #print seventyfifth_percentile return means, medians, std_deviations, twentyfifth_percentile, seventyfifth_percentile
def ReadCRONUSData(self, FileName): #See if the parameter files exist if os.access(FileName, os.F_OK): this_file = open(FileName, 'r') lines = this_file.readlines() CRONUS_eff_erate = [] CRONUS_erate = [] CRONUS_ext_uncert = [] CRONUS_in_uncert = [] # now get the data into the dict for line in lines: this_line = LSDOst.RemoveEscapeCharacters(line) split_line = this_line.split(',') CRONUS_eff_erate.append(float(split_line[4])) CRONUS_erate.append(float(split_line[5])) CRONUS_ext_uncert.append(float(split_line[6])) CRONUS_in_uncert.append(float(split_line[3])) #print "I got the erate from CRONUS, here is the data: " #print CRONUS_eff_erate # check to see if number of data elements are the same ERate = self.CRNData['erate_g_percm2_peryr'] if (len(ERate) != len(CRONUS_eff_erate)): print "CRONUS data doens't seem to be same length as other data" else: self.CRNData['CRONUS_erate_g_percm2_peryr'] = CRONUS_eff_erate self.CRNData['CRONUS_erate_mm_peryr'] = CRONUS_erate self.CRNData['CRONUS_int_uncert_mm_peryr'] = CRONUS_ext_uncert self.CRNData['CRONUS_ext_uncert_mm_peryr'] = CRONUS_in_uncert self.CRNData['CRONUS_total_uncert'] = np.add( CRONUS_in_uncert, CRONUS_ext_uncert) self.CRNData['CRONUS_total_uncert'] = np.multiply( self.CRNData['CRONUS_total_uncert'], (2.65 / 10000)) self.HaveCRONUSData = True else: print "Can't open CRONUS file."
def BasicMassBalance(path, file1, file2): # make sure names are in correct format NewPath = LSDOst.AppendSepToDirectoryPath(path) raster_file1 = NewPath + file1 raster_file2 = NewPath + file2 PixelArea = LSDMap_IO.GetPixelArea(raster_file1) print "PixelArea is: " + str(PixelArea) print "The formatted path is: " + NewPath Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1, raster_band=1) Raster2 = LSDMap_IO.ReadRasterArrayBlocks(raster_file2, raster_band=1) NewRaster = np.subtract(Raster2, Raster1) mass_balance = np.sum(NewRaster) * PixelArea print "linear dif " + str(np.sum(NewRaster)) return mass_balance
def CollateCRNData(): #Directory = "C://basin_data//CosmoPaper//Results//Compiled//" Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" Dirname = LSDost.ReformatSeperators(Directory) Dirname = LSDost.AppendSepToDirectoryPath(Dirname) Fileformat = 'svg' # This list will store the crn data CRNDataList = [] CRNprefixes = [] PaperNames = [] label_size = 8 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.05 rcParams['legend.labelspacing'] =0.1 rcParams['legend.columnspacing'] =0.1 # loop through the directory, getting the results from the data for fname in glob(Dirname+"*_CRNResults.csv"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) # Now get the prefix of the file splitfname = NoDirFname.split('_CRNResults.csv') fprefix = splitfname[0] # now produce the cronus name from this prefix CRONUS_name = Dirname+fprefix+"_CRONUS.csv" print "File prefix is: " + fprefix print "Cronus_name is: " + CRONUS_name # now read in the data thisCRNData = CRNR.CRNResults(fname) # read in the Cronus data and get the errors thisCRNData.ReadCRONUSData(CRONUS_name) thisCRNData.GetErrorsBetweenMethods() thisCRNData.GetErrorsBetweenCRONUS() CRNDataList.append(thisCRNData) CRNprefixes.append(fprefix) # now get the prefixes if fprefix == "Bierman": PaperNames.append("Bierman et al., 2005") elif fprefix == "Dethier": PaperNames.append("Dethier et al., 2014") elif fprefix == "Kirchner": PaperNames.append("Kirchner et al., 2001") elif fprefix == "Munack": PaperNames.append("Munack et al., 2014") elif fprefix == "Scherler": PaperNames.append("Scherler et al., 2014") elif fprefix == "Safran": PaperNames.append("Safran et al., 2005") elif fprefix == "Palumbo": PaperNames.append("Palumbo et al., 2010") #=========================================================================== # now make plots based on these data # 3.26 inches = 83 mm, the size of a 1 column figure Fig1 = plt.figure(1, facecolor='white',figsize=(3.26,3.26)) # generate a 120,90 grid. gs = GridSpec(100,75,bottom=0.13,left=0.13,right=0.98,top=0.85) ax = Fig1.add_subplot(gs[10:100,5:75]) # this gets the colors to map to specific sites cmap = plt.cm.jet colo = 0 for index,CRNObj in enumerate( CRNDataList): colo = colo + (1.000/len(CRNprefixes)) ax.plot(CRNObj.GetAverageCombinedScaling(),CRNObj.GetError_CR(), "o", markersize=4, color=cmap(colo), label = PaperNames[index],markeredgewidth=1) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) ax.tick_params(axis='both', width=1) plt.xlabel('Production factor ($S_{tot}$)', fontsize = axis_size) plt.ylabel('($\epsilon_{CR2.2}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize = axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax.get_legend_handles_labels() plt.legend() plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) plt.savefig(Dirname+"Production_vs_error.svg",format = Fileformat) #Fig1.show() Fig1.clf() # These don't work in my version of matplotlib...I must update (!) # have found a workaround below #plt.rcParams['xtick.major.linewidth'] = 4 #plt.rcParams['xtick.minor.width'] = 2 #=========================================================================== # now make plots based on these data # 3.26 inches = 83 mm, the size of a 1 column figure Fig2 = plt.figure(1, facecolor='white',figsize=(3.26,3.5)) # generate a 120,90 grid. gs = GridSpec(100,75,bottom=0.14,left=0.13,right=0.98,top=0.85) ax = Fig2.add_subplot(gs[10:100,5:75]) plt.rcParams['xtick.major.size'] = 4 plt.rcParams['xtick.minor.size'] = 3 plt.rcParams['ytick.major.size'] = 4 # this gets the colors to map to specific sites cmap = plt.cm.jet colo = 0 for index,CRNObj in enumerate( CRNDataList): colo = colo + (1.000/len(CRNprefixes)) ax.plot(CRNObj.GetErosionRates(),CRNObj.GetError_CR(), "o", markersize=4, color=cmap(colo), label = PaperNames[index],markeredgewidth=1) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) #ax.tick_params(axis='both', width=2.5) ax.set_xscale('log') # This gets all the ticks, and pads them away from the axis so that the corners don't overlap # the which command tells the program to get major and minor ticks ax.tick_params(axis='both', width=1, pad = 1, which = 'both') for tick in ax.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax.yaxis.get_major_ticks(): tick.set_pad(3) #for tick in ax.xaxis.get_minor_ticks(): # tick.tick_params(width = 2.5) plt.xlabel('$\epsilon_{CAIRN}$ (g cm$^{-2}$ yr$^{-1}$)', fontsize = axis_size) plt.ylabel('($\epsilon_{CR2.2}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize = axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax.get_legend_handles_labels() plt.legend() plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plt.show() plt.savefig(Dirname+"Erosion_vs_error.svg",format = Fileformat) Fig2.clf() #=========================================================================== # now make plots based on these data Fig3 = plt.figure(1, facecolor='white',figsize=(3.26,3.5)) # generate a 120,90 grid. gs = GridSpec(100,75,bottom=0.14,left=0.14,right=0.97,top=0.85) ax = Fig3.add_subplot(gs[10:100,5:75]) plt.rcParams['xtick.major.size'] = 4 plt.rcParams['ytick.major.size'] = 4 # this gets the colors to map to specific sites cmap = plt.cm.jet colo = 0 for index,CRNObj in enumerate( CRNDataList): colo = colo + (1.000/len(CRNprefixes)) ax.plot(CRNObj.GetAverageCombinedScaling(),CRNObj.GetError_CC(), "o", markersize=4, color=cmap(colo), label = PaperNames[index],markeredgewidth=1) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) #ax.tick_params(axis='both', width=2.5) # This gets all the ticks, and pads them away from the axis so that the corners don't overlap # the which command tells the program to get major and minor ticks ax.tick_params(axis='both', width=1, pad = 1, which = 'both') for tick in ax.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax.yaxis.get_major_ticks(): tick.set_pad(3) #for tick in ax.xaxis.get_minor_ticks(): # tick.tick_params(width = 2.5) plt.xlabel('Production factor ($S_{CCtot}$)', fontsize = axis_size) plt.ylabel('($\epsilon_{CC}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize = axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax.get_legend_handles_labels() plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plt.show() plt.savefig(Dirname+"COSMOCALC_vs_error.svg",format = Fileformat) Fig3.clf() #=========================================================================== # now make plots based on these data Fig4 = plt.figure(1, facecolor='white',figsize=(3.26,3.5)) # generate a 120,90 grid. gs = GridSpec(100,75,bottom=0.14,left=0.14,right=0.97,top=0.85) ax = Fig4.add_subplot(gs[10:100,5:75]) plt.rcParams['xtick.major.size'] = 4 # this gets the colors to map to specific sites cmap = plt.cm.jet colo = 0 for index,CRNObj in enumerate( CRNDataList): colo = colo + (1.000/len(CRNprefixes)) ax.plot(CRNObj.GetAverageCombinedScaling(),CRNObj.GetError_CR_em(), "o", markersize=4, color=cmap(colo), label = PaperNames[index],markeredgewidth=1) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) #ax.tick_params(axis='both', width=2.5) # This gets all the ticks, and pads them away from the axis so that the corners don't overlap # the which command tells the program to get major and minor ticks ax.tick_params(axis='both', width=1, pad = 1, which = 'both') for tick in ax.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax.yaxis.get_major_ticks(): tick.set_pad(3) #for tick in ax.xaxis.get_minor_ticks(): # tick.tick_params(width = 2.5) plt.xlabel('Production factor ($S_{CRShield}$*$S_{effp}$)', fontsize = axis_size) plt.ylabel('($\epsilon_{CC-CR}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize = axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax.get_legend_handles_labels() plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plt.show() plt.savefig(Dirname+"COSMOCALC_CRem_vs_error.svg",format = Fileformat) Fig4.clf()
def CollatenewCRONUScomparisonCRNData(): #Directory = "C://code//git_papers//crn_basinwide_paper//Compiled_results//Brauch_vs_newCRONUS//" Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//Brauch_vs_newCRONUS//" Dirname = LSDost.ReformatSeperators(Directory) Dirname = LSDost.AppendSepToDirectoryPath(Dirname) Fileformat = 'svg' label_size = 8 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.05 rcParams['legend.labelspacing'] = 0.1 rcParams['legend.columnspacing'] = 0.1 P_erate_newCRONUS = [] D_erate_newCRONUS = [] P_newCRONUS = [] D_newCRONUS = [] # loop through the directory, getting the results from the data for fname in glob(Dirname + "*_CRNResults.csv"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) # Now get the prefix of the file splitfname = NoDirFname.split('_CRNResults.csv') fprefix = splitfname[0] print "File prefix is: " + fprefix # now read in the data thisCRNData = CRNR.CRNResults(fname) # Only get the newCRONUS dethier and Palumbo data # now get the prefixes if "Dethier" in fprefix: if "newCRONUS" in fprefix: D_erate_newCRONUS = thisCRNData.GetErosionRates_mmperkyr_rho2650( ) elif "Palumbo" in fprefix: if "newCRONUS" in fprefix: P_erate_newCRONUS = thisCRNData.GetErosionRates_mmperkyr_rho2650( ) # Convert the data to arrays (to calculate errors) P_nC_CAIRN = np.asarray(P_erate_newCRONUS) D_nC_CAIRN = np.asarray(D_erate_newCRONUS) #print "P CAIRN is: " #print P_nC_CAIRN #print "D CAIRN is: " #print D_nC_CAIRN # Now get the CRONUScalc data print "Entering second glob loop" for fname in glob(Dirname + "*.csv"): print "I found comparison data! Name is " + fname # Now get the CRONUScalc data print "Entering third glob loop" for fname in glob(Dirname + "*Comparison.csv*"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) print "I found comparison data! Name is " + NoDirFname # Now get the prefix of the file splitfname = NoDirFname.split('_newCRONUSCAIRNComparison.csv') fprefix = splitfname[0] print "File prefix is: " + fprefix print "I am woking with the dataset: " + fprefix #See if the parameter files exist if os.access(fname, os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # get rid fo the first line lines.pop(0) # create the lists for populating with data CAIRN_erate = [] CAIRN_uncert = [] Report_erate = [] Report_uncert = [] newCRONUS_erate = [] newCRONUS_uncert = [] # now get the data into the dict for line in lines: this_line = LSDost.RemoveEscapeCharacters(line) split_line = this_line.split(',') #print split_line[22]+" " +split_line[23] #print split_line CAIRN_erate.append(float(split_line[16])) CAIRN_uncert.append(float(split_line[17])) Report_erate.append(float(split_line[20])) Report_uncert.append(float(split_line[21])) newCRONUS_erate.append(float(split_line[22])) newCRONUS_uncert.append(float(split_line[23])) # now get the prefixes if fprefix == "Dethier": D_newCRONUS = newCRONUS_erate elif fprefix == "Palumbo": P_newCRONUS = newCRONUS_erate #print "P_newCRONUS is: " #print P_newCRONUS #print "D_newCRONUS is: " #print D_newCRONUS P_nC = np.asarray(P_newCRONUS) D_nC = np.asarray(D_newCRONUS) Perr = np.divide(np.subtract(P_nC_CAIRN, P_nC), P_nC) Derr = np.divide(np.subtract(D_nC_CAIRN, D_nC), D_nC) print "The errors are: " print Perr print Derr print P_nC print P_nC_CAIRN # okay, now you should have the errors #=========================================================================== # now make plots based on these data # 3.26 inches = 83 mm, the size of a 1 column figure Fig1 = plt.figure(1, facecolor='white', figsize=(3.26, 3.26)) # generate a 120,90 grid. gs = GridSpec(100, 75, bottom=0.13, left=0.13, right=0.95, top=0.95) ax = Fig1.add_subplot(gs[10:100, 5:95]) ax.plot(P_nC, Perr, "o", markersize=5, color="maroon", label="Palumbo et al., 2010", markeredgewidth=1) ax.plot(D_nC, Derr, "ro", markersize=5, color="lawngreen", label="Dethier et al., 2014", markeredgewidth=1) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) ax.tick_params(axis='both', width=1) #ax.set_ylim([0.02,0.06]) #plt.title('We are not using this in the paper!! Use CRNCAIRNvsnewCRONUS_erates.py instead!') plt.xlabel('$\epsilon_{CRCalc}$ (g cm$^{-2}$ yr$^{-1}$)', fontsize=axis_size) plt.ylabel( '($\epsilon_{CAIRN-CRCalc}$-$\epsilon_{CRCalc}$)/$\epsilon_{CRCalc}$', fontsize=axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax.get_legend_handles_labels() plt.legend() plt.legend(handles, labels, numpoints=1, loc=4, ncol=1, borderaxespad=1.) plt.savefig(Dirname + "CAIRN_newCRONUS_emulator.svg", format=Fileformat)
def CRBERCvsReported(): Dirname = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" #Dirname = "C://basin_data//CosmoPaper//Results//Compiled//" #Fname = "Palumbo*_CompareResults.csv" SiteNames = [] SiteDicts = [] PaperNames = [] PaperColours = [] # loop through the directory, getting the results from the data for fname in glob(Dirname + "*_CompareResults.csv"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) # Now get the prefix of the file splitfname = NoDirFname.split('_CompareResults.csv') fprefix = splitfname[0] print "File prefix is: " + fprefix # add to the sitenames list and create a holding dictionary SiteNames.append(fprefix) thisdict = {} # now get the prefixes if fprefix == "Bierman": PaperNames.append("Bierman et al., 2005") PaperColours.append("blue") elif fprefix == "Dethier": PaperNames.append("Dethier et al., 2014") PaperColours.append("lawngreen") elif fprefix == "Kirchner": PaperNames.append("Kirchner et al., 2001") PaperColours.append("yellow") elif fprefix == "Munack": PaperNames.append("Munack et al., 2014") PaperColours.append("orange") elif fprefix == "Scherler": PaperNames.append("Scherler et al., 2014") PaperColours.append("black") elif fprefix == "Safran": PaperNames.append("Safran et al., 2005") PaperColours.append("powderblue") elif fprefix == "Palumbo": PaperNames.append("Palumbo et al., 2010") PaperColours.append("maroon") #See if the parameter files exist if os.access(fname, os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # get rid fo the first line lines.pop(0) # create the lists for populating with data BERC_erate = [] BERC_uncert = [] Report_erate = [] Report_uncert = [] BERC_shield = [] Report_shield = [] # now get the data into the dict for line in lines: this_line = LSDost.RemoveEscapeCharacters(line) split_line = this_line.split(',') BERC_erate.append(float(split_line[4])) BERC_uncert.append(float(split_line[5])) Report_erate.append(float(split_line[6])) Report_uncert.append(float(split_line[7])) BERC_shield.append(float(split_line[2])) Report_shield.append(float(split_line[3])) thisdict["BERC_erate"] = BERC_erate thisdict["BERC_uncert"] = BERC_uncert thisdict["Report_erate"] = Report_erate thisdict["Report_uncert"] = Report_uncert thisdict["BERC_shield"] = BERC_shield thisdict["Report_shield"] = Report_shield SiteDicts.append(thisdict) label_size = 8 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.02 rcParams['legend.labelspacing'] = 0.1 rcParams['legend.columnspacing'] = 0.05 # now make plots based on these data Fig1 = plt.figure(1, facecolor='white', figsize=(3.26, 3.26)) # generate a 120,90 grid. gs = GridSpec(100, 75, bottom=0.13, left=0.13, right=0.98, top=0.85) ax = Fig1.add_subplot(gs[10:100, 5:75]) #cmap = plt.cm.jet #colo = 0 for index, thisdict in enumerate(SiteDicts): #colo = colo + (1.000/len(SiteDicts)) plt.plot(thisdict['BERC_shield'], thisdict['Report_shield'], "o", markersize=4, color=PaperColours[index], label=PaperNames[index], markeredgewidth=1) #plt.plot(self.CRNData['AvgProdScaling'],self.CRNData['Error_CR'],color=cmap(self.CRNData['basin_relief']),"o", markersize=8 ) #plt.errorbar(datazz['erate_cosmocalc']*10, datazz['erate_cmperkyr']*10, xerr=datazz['error_cosmocalc'], yerr=datazz['error_newcode'], fmt='o',color = cmap(colo)) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) # This gets all the ticks, and pads them away from the axis so that the corners don't overlap ax.tick_params(axis='both', width=1, pad=2) for tick in ax.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax.yaxis.get_major_ticks(): tick.set_pad(3) plt.xlabel('CAIRN topographic shielding', fontsize=axis_size) plt.ylabel('Reported topographic shielding', fontsize=axis_size) handles, labels = ax.get_legend_handles_labels() plt.legend(handles, labels, numpoints=1, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plt.show() Fileformat = "svg" plt.savefig(Dirname + "CAIRN_vs_Reported_toposhield.svg", format=Fileformat)
def CRONUS_diff_params(): #Directory = "C://basin_data//CosmoPaper//Results//Compiled//" Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" Dirname = LSDost.ReformatSeperators(Directory) Dirname = LSDost.AppendSepToDirectoryPath(Dirname) Fileformat = 'svg' label_size = 8 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.05 rcParams['legend.labelspacing'] = 0.1 rcParams['legend.columnspacing'] = 0.1 fname = Dirname + "Scherler_CRNResults.csv" # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) # Now get the prefix of the file splitfname = NoDirFname.split('_CRNResults.csv') fprefix = splitfname[0] # now produce the cronus name from this prefix CRONUS_name1 = Dirname + fprefix + "_CRONUSEmulator.csv" CRONUS_name2 = Dirname + fprefix + "_CRONUSEmulator_Modified.csv" CRONUS_name3 = Dirname + fprefix + "_CRONUSEmulator_Modified2.csv" print "File prefix is: " + fprefix print "Cronus_name is: " + CRONUS_name1 # now read in the data. We need 3 objects for 3 different results thisCRNData1 = CRNR.CRNResults(fname) thisCRNData2 = CRNR.CRNResults(fname) thisCRNData3 = CRNR.CRNResults(fname) # read in the Cronus data and get the errors thisCRNData1.ReadCRONUSData(CRONUS_name1) thisCRNData1.GetErrorsBetweenMethods() thisCRNData1.GetErrorsBetweenCRONUS() thisCRNData2.ReadCRONUSData(CRONUS_name2) thisCRNData2.GetErrorsBetweenMethods() thisCRNData2.GetErrorsBetweenCRONUS() thisCRNData3.ReadCRONUSData(CRONUS_name3) thisCRNData3.GetErrorsBetweenMethods() thisCRNData3.GetErrorsBetweenCRONUS() #=========================================================================== # now make plots based on these data # 3.26 inches = 83 mm, the size of a 1 column figure Fig1 = plt.figure(1, facecolor='white', figsize=(3.26, 3.26)) # generate a 120,90 grid. # gendepth a grid. gs = GridSpec(100, 100, bottom=0.06, left=0.1, right=1.0, top=1.0) ax = Fig1.add_subplot(gs[10:90, 10:95]) ax.plot(thisCRNData1.GetAverageCombinedScaling(), thisCRNData1.GetError_CR(), "o", markersize=4, color="black", label="CRONUS2.2 default", markeredgewidth=1) ax.plot(thisCRNData2.GetAverageCombinedScaling(), thisCRNData2.GetError_CR(), "o", markersize=4, color="grey", label="Updated spallation", markeredgewidth=1) ax.plot(thisCRNData3.GetAverageCombinedScaling(), thisCRNData3.GetError_CR(), "o", markersize=4, color="white", label="Updated spallation, muons", markeredgewidth=1) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) ax.tick_params(axis='both', width=1) plt.xlabel('Production factor ($S_{tot}$)', fontsize=axis_size) plt.ylabel('($\epsilon_{CR2.2}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize=axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax.get_legend_handles_labels() plt.legend() plt.legend(handles, labels, numpoints=1, loc=1, ncol=1, borderaxespad=1.) #plt.show() plt.savefig(Dirname + "CRONUS_update_spallation_and_muons.svg", format=Fileformat)
def ElevationSwaths(path, filename, axis, fprefix): Fileformat = 'png' # get the path to the raster file NewPath = LSDOst.AppendSepToDirectoryPath(path) FileName = NewPath+filename # get the data vectors means,medians,std_deviations,twentyfifth_percentile,seventyfifth_percentile = LSDP.SimpleSwath(path, filename, axis) print "Means shape is: " print means.shape x_vec,y_vec = LSDP.GetLocationVectors(FileName) print "X shape is: " print x_vec.shape print "Y shape is: " print y_vec.shape import matplotlib.pyplot as plt import matplotlib.lines as mpllines from mpl_toolkits.axes_grid1 import AxesGrid label_size = 20 #title_size = 30 axis_size = 28 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size # make a figure, sized for a ppt slide fig = plt.figure(1, facecolor='white',figsize=(10,7.5)) gs = plt.GridSpec(100,75,bottom=0.1,left=0.1,right=0.9,top=1.0) ax = fig.add_subplot(gs[10:100,10:75]) if axis == 0: dir_vec = x_vec else: dir_vec = y_vec # get the distance from shore dist_from_shore = np.subtract(dir_vec[-1],dir_vec) min_sd = np.subtract(means,std_deviations) plus_sd = np.add(means,std_deviations) ax.plot(dist_from_shore,means, linewidth = 2.5, color = "black") #ax.fill_between(dist_from_shore, twentyfifth_percentile, seventyfifth_percentile, facecolor='green', alpha = 0.7, interpolate=True) ax.fill_between(dist_from_shore, min_sd, plus_sd, facecolor='blue', alpha = 0.25, interpolate=True) ax.set_xlim(dist_from_shore[0],dist_from_shore[-1]) ax.annotate('Standard deviation envelope', xy=(dist_from_shore[10],plus_sd[10]), xycoords='data', xytext=(0.1, 0.8), textcoords='axes fraction', size=label_size, # bbox=dict(boxstyle="round", fc="0.8"), arrowprops=dict(arrowstyle="simple", fc="0.6", ec="none", connectionstyle="arc3,rad=0.3"), ) ax.spines['top'].set_linewidth(2) ax.spines['left'].set_linewidth(2) ax.spines['right'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2) #ax.tick_params(axis='both', width=1) plt.xlabel('Distance from shore (m)', fontsize = axis_size) plt.ylabel('Bed elevation relative to MSL (m)', fontsize = axis_size) plt.title(fprefix) # This gets all the ticks, and pads them away from the axis so that the corners don't overlap ax.tick_params(axis='both', width=2, pad = 10) for tick in ax.xaxis.get_major_ticks(): tick.set_pad(10) #plt.show() plt.savefig(NewPath+fprefix+"_BedElev.png",format = Fileformat) plt.clf()
def ManageShieldingComputation(path, prefix, NJobs): # Do some housekeeping with the path names LSDost.ReformatSeperators(path) LSDost.AppendSepToDirectoryPath(path) # Now open the csv files LSDRasters_fname = path + prefix + "_CRNRasters.csv" LSDData_fname = path + prefix + "_CRNData.csv" LSDParams_fname = path + prefix + ".CRNParam" # And a command prompt fname this can be cut and paste into the command prompt LSDCommandPromt_fname = path + prefix + "_ShieldCommandPrompt.txt" # now go through the rasters listed, getting the n_nodes in each raster if os.access(LSDRasters_fname, os.F_OK): # this is the total lnumber of pixels to be analysed Ntotal_pixels = 0 # open the file and get the data Raster_file = open(LSDRasters_fname, 'r') lines = Raster_file.readlines() npixels_in_DEMs = [] n_cum_pixels = [] # loop through the file, collecting basin data for line in lines: split_line = line.split(",") basin_fname = split_line[0] # Add to the total number of pixels in the file # IMPORTANT: it is assumed all files are in ENVI bil format npixels_in_DEMs.append( LSDmt.GetNPixelsInRaster(basin_fname + ".bil")) Ntotal_pixels = Ntotal_pixels + npixels_in_DEMs[-1] n_cum_pixels.append(Ntotal_pixels) print "The total number of pixels are: " + str(Ntotal_pixels) # now go back through the loop, setting the seperation TargetPixels = Ntotal_pixels / NJobs #Next_Target = TargetPixels curr_line = 0 pixels_so_far = 0 breaks = [] # these are the indices into the breaks for line in lines: pixels_so_far = pixels_so_far + npixels_in_DEMs[curr_line] # if the number of pixels exceeds the target pixels, # this raster is not included if pixels_so_far > TargetPixels: breaks.append(curr_line + 1) pixels_so_far = 0 curr_line = curr_line + 1 # now print out the details print "The target pixels are: " + str(TargetPixels) print "the pixels are: " print npixels_in_DEMs print "\nThe breaks are: " if len(breaks) > NJobs: breaks.pop() if len(breaks) < NJobs: breaks.append(len(lines)) print breaks # now spawn the files Param_file = open(LSDParams_fname, 'r') Plines = Param_file.readlines() Param_file.close() Data_file = open(LSDData_fname, 'r') Dlines = Data_file.readlines() Data_file.close() # make the command prompt file CP_file = open(LSDCommandPromt_fname, 'w') CP_file.write( "Commands for running shielding calculations from command line.\n") CP_file.write( "To be used with the University of Edinburgh's basinwide CRN programs.\n" ) CP_file.write( "These are designed for use on a cluster without a job management system.\n" ) CP_file.write( "If you want to use qsub you will need to write your own script!\n" ) CP_file.write("Copy and paste these into the command line.\n") bb = 1 last_brk = 0 for brk in breaks: new_param_name = path + prefix + "_brk" + str(bb) + ".CRNParam" new_data_name = path + prefix + "_brk" + str(bb) + "_CRNData.csv" new_raster_name = path + prefix + "_brk" + str( bb) + "_CRNRasters.csv" New_Param_file = open(new_param_name, 'w') New_Param_file.writelines(Plines) New_Param_file.close() New_Data_file = open(new_data_name, 'w') New_Data_file.writelines(Dlines) New_Data_file.close() print "Last break: " + str(last_brk) + " and this break: " + str( brk) New_Raster_file = open(new_raster_name, 'w') thispx = npixels_in_DEMs[last_brk:brk] New_Raster_file.writelines(lines[last_brk:brk]) New_Raster_file.close() last_brk = brk print "these pixels are: " print thispx print "and sum is: " + str(sum(thispx)) CP_file.write("nohup nice ./Shielding_for_CRN.exe " + path + " " + prefix + "_brk" + str(bb) + "\n") bb = bb + 1 CP_file.close()
def UpdateRasterWithShielding(path, prefix, Sample_names, Snowshield_values): #first get directory path into the correct format fmt_path = LSDost.ReformatSeperators(path) # add the trailing seperator fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path) # now find the correct file fname = fmt_path + prefix + "_CRNRasters.csv" # also make the outfile outfname = fmt_path + prefix + "_SS_CRNRasters.csv" outfile = open(outfname, 'w') new_lines = [] print "The sample names are" print Sample_names print "The snow shield values are: " print Snowshield_values #See if the parameter files exist if os.access(fname, os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # now get the list of DEM prefixes for line in lines: this_line = line.split(",") DEM_prefix = this_line[0] print "The DEM prefix is: " + DEM_prefix # Now get the sample name split_dem_prefix = DEM_prefix.split("_") sample_name = split_dem_prefix[-1] print "The sample name is: " + sample_name # get the index of the sample name to reference the shielding value i = Sample_names.index(sample_name) print "the index of the sample names is: " + str(i) # calculate the effective depth. The 160 is the attenuation thickness in g/cm^2 this_snow_depth = -160 * np.log(Snowshield_values[i]) print "The shielding is: " + str( Snowshield_values[i]) + " and eff_depth is: " + str( this_snow_depth) # update the snow effective depth this_line[1] = str(this_snow_depth) # update the line this_new_line = ",".join(this_line) new_lines.append(this_new_line) # this will get printed to file for line in new_lines: # you have to get rid of the control characters this_line = LSDost.RemoveEscapeCharacters(line) outfile.write("%s\n" % this_line)
def CheckFileStructuresForCopy(ObjectsDirectory,DriverDirectory,TargetDirectory): # Format the target directories Td = LSDost.ReformatSeperators(TargetDirectory) TargetDirectory = LSDost.AppendSepToDirectoryPath(Td) TDd = TargetDirectory + DriverDirectory TargetDriverDirectory = LSDost.AppendSepToDirectoryPath(TDd) # Format the source directories Od = LSDost.ReformatSeperators(ObjectsDirectory) ObjectsDirectory = LSDost.AppendSepToDirectoryPath(Od) Dd = ObjectsDirectory+DriverDirectory DriverDirectory = LSDost.AppendSepToDirectoryPath(Dd) # Check if the source directories exist if not os.access(ObjectsDirectory,os.F_OK): print "The object directory for the code doesn't exist!" print "You wanted this directory: " + ObjectsDirectory return 0 if not os.access(ObjectsDirectory,os.F_OK): print "The driver directory for the code doesn't exist!" print "You wanted this directory: " + DriverDirectory return 0 if not os.access(ObjectsDirectory+"TNT"+os.sep,os.F_OK): print "The TNT directory for the code doesn't exist!" print "You wanted this directory: " + ObjectsDirectory+"TNT"+os.sep return 0 # check if the target object directory exists if not os.access(TargetDirectory,os.F_OK): print "The target directory for the code doesn't exist!" print "You wanted this directory: " + TargetDirectory print "I am making that now, along with the driver directory" os.mkdir(TargetDirectory) if not os.access(TargetDirectory,os.F_OK): print "WTF the directory was not made??!" os.mkdir(TargetDriverDirectory) # check just the driver directory if not os.access(TargetDriverDirectory,os.F_OK): print "The target driver directory for the code doesn't exist!" print "You wanted this directory: " + TargetDriverDirectory print "I am making that now" os.mkdir(TargetDriverDirectory) # Check if the TNT directory exists. If it does, remove and replace it # If it doesn't , just copy it across TNTTargetDirectory = TargetDirectory+'TNT'+os.sep TNTSourceDirectory = ObjectsDirectory+'TNT'+os.sep if not os.access(TNTTargetDirectory,os.F_OK): print "The target TNT directory for the code doesn't exist!" print "You wanted this directory: " + TargetDriverDirectory print "I am making that now" shutil.copytree(TNTSourceDirectory,TNTTargetDirectory) else: print "There is a TNT directory here already. Removing and replacing" shutil.rmtree(TNTTargetDirectory) shutil.copytree(TNTSourceDirectory,TNTTargetDirectory) print "=========================" print "DriverDirectory: " + DriverDirectory print "ObjectsDirectory: " + ObjectsDirectory print "TargetDirectory: " + TargetDirectory print "TargetDriverDirectory: " + TargetDriverDirectory print "=========================" return ObjectsDirectory,DriverDirectory,TargetDirectory,TargetDriverDirectory
def GetRequiredFilesFromFolder(DataDirectory): #print "Current directory is: " + os.getcwd() #Append a closing slash to the data directory if one is not already there NewDataDirectory = LSDost.ReformatSeperators(DataDirectory) DataDirectory = LSDost.AppendSepToDirectoryPath(NewDataDirectory) #print "DataDirectory is (2): " + DataDirectory # Start with a master list of required files required_files = [] # find all the makefiles in the directory for FileName in glob(DataDirectory+"*.make"): #print "FileName is: " + FileName # Now you need to find all the sources in the makefile f = open(FileName,'r') # open file lines = f.readlines() # read in the data f.close() # Initiate an empty list that will contain the filenames cppfiles = [] # add the makefile to the files to be copied required_files.append(FileName) # loop through the lines, flag a start where SOURCES line starts start_flag = 0 for line in lines: if "SOURCE" in line: start_flag = 1 # If there is OBJECTS in the line, stop looking for ".cpp" if "OBJECTS" in line: start_flag = 0 # Look for .cpp between SOURCES and OBJECTS if start_flag == 1: if ".cpp" in line: # seperate the line using spaces split_line = line.split(' ') this_item = "" # Go through the split line looking for .cpp for item in split_line: if ".cpp" in item: this_item = item # get rid of the SOURCES new_this_item = this_item.replace("SOURCES=","") #print "This cpp file is: " + new_this_item # get rid of stupid escape characters this_file = LSDost.RemoveEscapeCharacters(new_this_item) cppfiles.append(this_file) # now print to screen the files required for this makefile #print "The files required for this makefile are: " #print cppfiles # now append to directory...this requires some logic because of the ../ seperators for filename in cppfiles: #print "Filename is: " + filename # special logic for the ../ seperator if "../" in filename: #print "There is a lower level in this filename, this means it is an object" thisfile = filename.replace("../","") thisdirectory = LSDost.RemoveDirectoryLevel(DataDirectory) fullfile = thisdirectory+thisfile fullfile2 = fullfile.replace(".cpp",".hpp") required_files.append(fullfile2) else: fullfile = DataDirectory+filename # append to the required files list required_files.append(fullfile) # now thin out the required files to remove duplicates nd = set(required_files) required_files_noduplicates = list(nd) #print "/n/n=================================" #print "Required files are: " #print required_files #print "--------" #print "And removing duplicates:" #print required_files_noduplicates #print "=====================================" return required_files_noduplicates
def plot_production_rates(): #Directory = "C://basin_data//CosmoPaper//Results//" Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" Dirname = LSDost.ReformatSeperators(Directory) Dirname = LSDost.AppendSepToDirectoryPath(Dirname) # get the data prod_data = get_production_data(Directory) pdata = np.asarray(prod_data) depth = pdata[:,0] mu_CRONUS = pdata[:,1] mu_Braucher = pdata[:,4] mu_newCRONUS = pdata[:,5] total_CRONUS = pdata[:,6] total_Braucher = pdata[:,9] total_newCRONUS = pdata[:,10] # set up figure formatting Fileformat = 'svg' label_size = 10 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.05 rcParams['legend.labelspacing'] =0.1 rcParams['legend.columnspacing'] =0.1 # 3.26 inches = 83 mm, the size of a 1 column figure #Fig1 = plt.figure(1, facecolor='white',figsize=(3.26,3.26)) Fig1 = plt.figure(1, facecolor='white',figsize=(3.26,6)) # gendepth a grid. gs = GridSpec(100,100,bottom=0.1,left=0.1,right=1.0,top=1.0) ax = Fig1.add_subplot(gs[2:42,10:95]) ax.plot(mu_CRONUS,depth, "r-", label = "CRONUS",linewidth = 2) ax.plot(mu_Braucher,depth, "k-", label = "Braucher",linewidth = 2) ax.plot(mu_newCRONUS,depth, "b--", label = "new CRONUS",linewidth = 2) ax.spines['top'].set_linewidth(1) ax.spines['left'].set_linewidth(1) ax.spines['right'].set_linewidth(1) ax.spines['bottom'].set_linewidth(1) #ax.tick_params(axis='both', width=2.5) ax.set_yscale('log') ax.set_ylim([0,10**5]) ax.invert_yaxis() # This gets all the ticks, and pads them away from the axis so that the corners don't overlap # the which command tells the program to get major and minor ticks ax.tick_params(axis='both', width=1, pad = 1, which = 'both') for tick in ax.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax.yaxis.get_major_ticks(): tick.set_pad(3) plt.xlabel('Muon production (atoms g$^{-1}$ yr$^{-1}$)', fontsize = axis_size) plt.ylabel('Depth (g cm$^{-2}$)', fontsize = axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax.get_legend_handles_labels() #plt.legend(handles, labels, numpoints = 3, bbox_to_anchor=(0., 1.02, 1., .102), # loc=4, ncol=1, mode="expand", borderaxespad=0.0) #plt.legend(handles, labels, numpoints = 1, loc=1, ncol=3, mode="expand", borderaxespad=0.0) plt.legend(handles, labels, numpoints = 3, loc=4, ncol=1, borderaxespad=0.5) #plt.show() # 3.26 inches = 83 mm, the size of a 1 column figure #Fig2 = plt.figure(1, facecolor='white',figsize=(3.26,3.26)) # generate a grid. #gs2 = GridSpec(100,75,bottom=0.13,left=0.13,right=0.90,top=0.95) ax2 = Fig1.add_subplot(gs[57:97,10:95]) ax2.plot(total_CRONUS,depth, "r-", label = "CRONUS",linewidth = 2) ax2.plot(total_Braucher,depth, "k-", label = "Braucher",linewidth = 2) ax2.plot(total_newCRONUS,depth, "b--", label = "new CRONUS",linewidth = 2) ax2.spines['top'].set_linewidth(1) ax2.spines['left'].set_linewidth(1) ax2.spines['right'].set_linewidth(1) ax2.spines['bottom'].set_linewidth(1) #ax.tick_params(axis='both', width=2.5) ax2.set_yscale('log') ax2.set_ylim([0,10**3]) ax2.invert_yaxis() # This gets all the ticks, and pads them away from the axis so that the corners don't overlap # the which command tells the program to get major and minor ticks ax2.tick_params(axis='both', width=1, pad = 1, which = 'both') for tick in ax2.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax2.yaxis.get_major_ticks(): tick.set_pad(3) plt.xlabel('Total production (atoms g$^{-1}$ yr$^{-1}$)', fontsize = axis_size) plt.ylabel('Depth (g cm$^{-2}$)', fontsize = axis_size) #plt.title('Cosmocalc / New_code',fontsize = label_size+6) handles, labels = ax2.get_legend_handles_labels() #plt.legend(handles, labels, numpoints = 3, bbox_to_anchor=(0., 1.02, 1., .102), # loc=4, ncol=1, mode="expand", borderaxespad=0.2, fontsize = 8) plt.legend(handles, labels, numpoints = 3, loc=4, ncol=1, borderaxespad=0.5) plt.savefig(Dirname+"MuonProductionComparison.svg",format = Fileformat) plt.show()
def CRBERCvsReported_erates(): #Dirname = "C://basin_data//CosmoPaper//Results//Compiled//" Dirname = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//" #Fname = "Palumbo*_CompareResults.csv" SiteNames = [] SiteDicts = [] PaperNames = [] PaperColours = [] # loop through the directory, getting the results from the data for fname in glob(Dirname+"*_ErateComparisonNew.csv"): # get only the file without the data directory NoDirFname = LSDost.GetFileNameNoPath(fname) # Now get the prefix of the file splitfname = NoDirFname.split('_ErateComparisonNew.csv') fprefix = splitfname[0] print "File prefix is: " + fprefix # add to the sitenames list and create a holding dictionary SiteNames.append(fprefix) thisdict = {} # now get the prefixes if fprefix == "Bierman": PaperNames.append("Bierman et al., 2005") PaperColours.append("blue") elif fprefix == "Dethier": PaperNames.append("Dethier et al., 2014") PaperColours.append("lawngreen") elif fprefix == "Kirchner": PaperNames.append("Kirchner et al., 2001") PaperColours.append("yellow") elif fprefix == "Munack": PaperNames.append("Munack et al., 2014") PaperColours.append("orange") elif fprefix == "Scherler": PaperNames.append("Scherler et al., 2014") PaperColours.append("black") elif fprefix == "Safran": PaperNames.append("Safran et al., 2005") PaperColours.append("powderblue") elif fprefix == "Palumbo": PaperNames.append("Palumbo et al., 2010") PaperColours.append("maroon") print "I am woking with the dataset: " + fprefix min_erate = 5 max_erate = 5000 #See if the parameter files exist if os.access(fname,os.F_OK): this_file = open(fname, 'r') lines = this_file.readlines() # get rid fo the first line lines.pop(0) # create the lists for populating with data BERC_erate = [] BERC_uncert = [] Report_erate = [] Report_uncert = [] # now get the data into the dict for line in lines: this_line = LSDost.RemoveEscapeCharacters(line) split_line = this_line.split(',') #print split_line BERC_erate.append(float(split_line[16])) BERC_uncert.append(float(split_line[17])) Report_erate.append(float(split_line[20])) Report_uncert.append(float(split_line[21])) # get the maximum and minimum erosion rates if (float(split_line[16]) > max_erate): max_erate = float(split_line[16]) if (float(split_line[20]) > max_erate): max_erate = float(split_line[20]) if (float(split_line[16]) < min_erate): min_erate = float(split_line[16]) if (float(split_line[20]) < min_erate): min_erate = float(split_line[20]) thisdict["BERC_erate"] = BERC_erate thisdict["BERC_uncert"] = BERC_uncert thisdict["Report_erate"] = Report_erate thisdict["Report_uncert"] = Report_uncert SiteDicts.append(thisdict) label_size = 10 axis_size = 12 # Set up fonts for plots rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['arial'] rcParams['font.size'] = label_size rcParams['xtick.major.size'] = 4 rcParams['ytick.major.size'] = 4 rcParams['legend.fontsize'] = label_size rcParams['legend.handletextpad'] = 0.05 rcParams['legend.labelspacing'] =0.1 rcParams['legend.columnspacing'] =0.1 # make a line between the maximum and minimum erate one_line = np.linspace(min_erate,max_erate,1000) # now make plots based on these data Fig1 = plt.figure(1, facecolor='white',figsize=(4.72,4.72)) # generate a 120,90 grid. gs = GridSpec(100,75,bottom=0.13,left=0.13,right=0.98,top=0.85) ax = Fig1.add_subplot(gs[10:100,5:75]) #cmap = plt.cm.jet #colo = 0 # plot the 1:1 line plt.plot(one_line,one_line,'k-',linewidth = 2) for index,thisdict in enumerate(SiteDicts): #colo = colo + (1.000/len(SiteDicts)) plt.errorbar(thisdict['BERC_erate'], thisdict['Report_erate'], thisdict['BERC_uncert'], thisdict['Report_uncert'], fmt='.',color = PaperColours[index], linewidth = 1.5) plt.plot(thisdict['BERC_erate'], thisdict['Report_erate'], "o", markersize=4, color=PaperColours[index], label = PaperNames[index],markeredgewidth=1.) ax.annotate("1:1 line", xy=(3000, 3000), xycoords='data', xytext=(200, 4000), textcoords='data', arrowprops=dict(arrowstyle="fancy", #linestyle="dashed", color="0.5", shrinkB=5, connectionstyle="arc3,rad=-0.3", ), ) #plt.plot(self.CRNData['AvgProdScaling'],self.CRNData['Error_CR'],color=cmap(self.CRNData['basin_relief']),"o", markersize=8 ) #plt.errorbar(datazz['erate_cosmocalc']*10, datazz['erate_cmperkyr']*10, xerr=datazz['error_cosmocalc'], yerr=datazz['error_newcode'], fmt='o',color = cmap(colo)) ax.spines['top'].set_linewidth(1.5) ax.spines['left'].set_linewidth(1.5) ax.spines['right'].set_linewidth(1.5) ax.spines['bottom'].set_linewidth(1.5) # This gets all the ticks, and pads them away from the axis so that the corners don't overlap ax.tick_params(axis='both', width=1.5, pad = 2) for tick in ax.xaxis.get_major_ticks(): tick.set_pad(3) for tick in ax.yaxis.get_major_ticks(): tick.set_pad(3) # logarithmic axes ax.set_yscale('log') ax.set_xscale('log') plt.xlabel('CAIRN denudation rate (mm/kyr)', fontsize = axis_size) plt.ylabel('Reported denudation rate (mm/kyr)', fontsize = axis_size) handles, labels = ax.get_legend_handles_labels() plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) #plt.show() Fileformat = "svg" plt.savefig(Dirname+"CAIRN_vs_Reported_erates.svg",format = Fileformat)
def prepare_CRNRasters_file(path, prefix): print "\n\nHello! I am here to help you format your LSDRasters file." print "I will look in your directories for DEMs and shielding rasters, and write them into your LSDRasters file." print "It is my pleasure to perform this service for you!" # first, make sure path is working path = LSDOst.ReformatSeperators(path) path = LSDOst.AppendSepToDirectoryPath(path) # initiate some empty lists raster_names = [] toposhield_names = [] basin_names = [] self_shield_names = [] snow_shield_names = [] csv_strings = [] # now check to see if the path exists if not os.access(path, os.F_OK): print "The path you have chosen: " + path + " does not exist. Try with a new path." else: # now find all the rasters. These all have *.bil in them for FileName in glob(path + "*.bil"): # now remove the extension from the file print "The filename is: " + FileName print "Removing the bil extension" Prefix = FileName[:-4] print "new_filename is" + Prefix # now we see if the files have shielding rasters if Prefix[-3:] == "_SH": print "This is a shielding raster" toposhield_names.append(Prefix) elif Prefix[-7:] == "_snowBL": print "This is a snow raster" snow_shield_names.append(Prefix) elif Prefix[-7:] == "_SnowBL": print "This is a snow raster" snow_shield_names.append(Prefix) elif Prefix[-9:] == "_snowclip": print "This is a snow raster" snow_shield_names.append(Prefix) elif Prefix[-9:] == "_selfclip": print "This is a self raster" self_shield_names.append(Prefix) elif Prefix[-7:] == "_BASINS": print "This is a basins raster" basin_names.append(Prefix) elif Prefix[-7:] == "_HS": print "This is a hillshade raster" else: print "No matching pattern for shielding (topo, self or snow) so I am assuming this is a DEM\n" raster_names.append(Prefix) # now print these to a rasters csv file for raster_name in raster_names: this_ts_name = "NULL" this_sns_name = "NULL" this_slfs_name = "NULL" # search for toposhield # There is probably a more efficient way to do this but it gets the job done # Loop through all the toposhield names and find the one that contains the # raster name for ts in toposhield_names: if raster_name in ts: this_ts_name = ts # now the snow shield for sns in snow_shield_names: if raster_name in sns: this_sns_name = sns # now the snow shield for slfs in self_shield_names: if raster_name in slfs: this_slfs_name = slfs if (this_ts_name == "NULL" and this_sns_name == "NULL" and this_slfs_name == "NULL"): print "I have a DEM: " + raster_name + " only (no snow, self or toposhield rasters)." this_csv_line = raster_name else: this_csv_line = raster_name + "," if this_sns_name == "NULL": this_csv_line = this_csv_line + "0," else: this_csv_line = this_csv_line + this_sns_name + "," if this_slfs_name == "NULL": this_csv_line = this_csv_line + "0" else: this_csv_line = this_csv_line + this_slfs_name if not this_ts_name == "NULL": this_csv_line = this_csv_line + "," + this_ts_name # low append to the list csv_strings.append(this_csv_line) # now print the file fname_csv = path + prefix + "_CRNRasters.csv" f = open(fname_csv, 'w') for item in csv_strings: print >> f, item f.close()