def TestOSTools():

    path1 = "C://basin_data//Chile//lat26p0//"
    path2 = "M:/Yo/ma/yoyo.ma"
    path3 = "/home/smudd/devel_projects/LSDTopoTools/branches/LSDModel"
    path4 = "C:\\basin_data\\Chile\\lat26p0\\heyJude_DEM.flt"

    newpath1 = LSDost.ReformatSeperators(path1)
    print "Old path: " + path1
    print "New path: " + newpath1

    newpath2 = LSDost.ReformatSeperators(path2)
    print "Old path: " + path2
    print "New path: " + newpath2

    newpath3 = LSDost.ReformatSeperators(path3)
    print "Old path: " + path3
    print "New path: " + newpath3

    newpath4 = LSDost.ReformatSeperators(path4)
    print "Old path: " + path4
    print "New path: " + newpath4

    # test the directory adder
    # test the directory adder
    print "\n\n"
    newpath = LSDost.AppendSepToDirectoryPath(path1)
    print "Sep appended path is: " + newpath

    print "\n\n"
    newpath = LSDost.AppendSepToDirectoryPath(path3)
    print "Sep appended path is: " + newpath

    # Test the file prefix grabber
    fprefix = LSDost.GetFilePrefix(path4)
    print "\n\n"
    print "File prefix is: " + fprefix

    # Test the remove path level
    print "\n\n"
    print "Removing a directory level from: " + newpath
    newnewpath = LSDost.RemoveDirectoryLevel(newpath)
    print "The new directory is: " + newnewpath

    # Test the last directory name function
    print "\n\n"
    print "The last directory name in: " + newnewpath
    name = LSDost.GetLastDirectoryLevel(newnewpath)
    print "is: " + name
def get_production_data(Directory):
    
    #Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//"
    Dirname = LSDost.ReformatSeperators(Directory)
    Dirname = LSDost.AppendSepToDirectoryPath(Dirname)
    FileName= Dirname+"Muon_production_comparison.csv"
    
    # get rid of the first line of the file
    this_file = open(FileName, 'r')
    lines = this_file.readlines()
    
    values = []
    
    
            
    # get rid of the header
    del lines[0:3]
    
    # now go through the lines, extracting the data
    for line in lines:
        
        # get rid of the control characters
        this_line = LSDost.RemoveEscapeCharacters(line)
            
        # Split the data                       
        split_line = this_line.split(',')
        
        float_vec = []
        for element in split_line:
            float_vec.append(float(element))
            
        values.append(float_vec)
        
    return values
Exemple #3
0
def GetListOfRasters(path, prefix):

    #first get directory path into the correct format
    fmt_path = LSDost.ReformatSeperators(path)

    # add the trailing seperator
    fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path)

    # now find the correct file
    fname = fmt_path + prefix + "_CRNRasters.csv"

    DEM_names = []

    #See if the parameter files exist
    if os.access(fname, os.F_OK):
        this_file = open(fname, 'r')
        lines = this_file.readlines()

        # now get the list of DEM prefixes
        for line in lines:
            this_line = line.split(",")
            DEM_prefix = this_line[0]
            DEM_names.append(DEM_prefix)

    else:
        print "*_CRNRasters.csv file not found. Are you sure it is there and you have the correct path?"

    return DEM_names
Exemple #4
0
def SwathPlot(path, filename, axis):

    # get the path to the raster file
    NewPath = LSDOst.AppendSepToDirectoryPath(path)
    FileName = NewPath + filename

    # get the data vectors
    means, medians, std_deviations, twentyfifth_percentile, seventyfifth_percentile = LSDMap_BM.SimpleSwath(
        path, filename, axis)

    print "Means shape is: "
    print means.shape

    x_vec, y_vec = LSDMap_IO.GetLocationVectors(FileName)

    print "X shape is: "
    print x_vec.shape

    print "Y shape is: "
    print y_vec.shape

    import matplotlib.pyplot as plt
    import matplotlib.lines as mpllines
    from mpl_toolkits.axes_grid1 import AxesGrid

    label_size = 20
    #title_size = 30
    axis_size = 28

    # Set up fonts for plots
    rcParams['font.family'] = 'sans-serif'
    rcParams['font.sans-serif'] = ['arial']
    rcParams['font.size'] = label_size

    # make a figure, sized for a ppt slide
    fig = plt.figure(1, facecolor='white', figsize=(10, 7.5))

    gs = plt.GridSpec(100, 75, bottom=0.1, left=0.1, right=0.9, top=1.0)
    ax = fig.add_subplot(gs[10:100, 10:75])

    if axis == 0:
        dir_vec = x_vec
    else:
        dir_vec = y_vec

    min_sd = np.subtract(means, std_deviations)
    plus_sd = np.add(means, std_deviations)

    ax.plot(dir_vec, means, linewidth=2, color="red")
    #ax.fill_between(dir_vec, twentyfifth_percentile, seventyfifth_percentile, facecolor='green', alpha = 0.7, interpolate=True)
    ax.fill_between(dir_vec,
                    min_sd,
                    plus_sd,
                    facecolor='blue',
                    alpha=0.5,
                    interpolate=True)

    ax.set_xlim(dir_vec[0], dir_vec[-1])

    plt.show()
def ConvertAllCSVToGeoJSON(path):

    # make sure names are in correct format
    NewPath = LSDOst.AppendSepToDirectoryPath(path)

    print "The formatted path is: " + NewPath

    for FileName in glob(NewPath + "*.csv"):
        print "filename is: " + FileName

        thisPointData = LSDMPD.LSDMap_PointData(FileName)
        thisPointData.TranslateToReducedGeoJSON(FileName)
Exemple #6
0
def SpawnFoldersFromDEMList(DEM_names):

    # loop through the files checking if there are directories for the
    # DEMs. If not, make them
    for name in DEM_names:
        path = LSDost.AppendSepToDirectoryPath(name)

        if not os.access(path, os.F_OK):
            print "Making path: "
            os.mkdir(path)
            print path
        else:
            print "Path: " + path + " already exists."
def RasterMeanValue(path, file1):

    # make sure names are in correct format
    NewPath = LSDOst.AppendSepToDirectoryPath(path)

    raster_file1 = NewPath + file1

    NPixels = LSDMap_IO.GetNPixelsInRaster(raster_file1)

    Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1, raster_band=1)

    mean_value = np.sum(Raster1) / float(NPixels)

    return mean_value
Exemple #8
0
def GDALBatchConvert(DataDirectory, raster_format, target_format):

    NewDataDirectory = LSDost.ReformatSeperators(DataDirectory)
    DataDirectory = LSDost.AppendSepToDirectoryPath(NewDataDirectory)

    # Check the target format
    if target_format == "ENVI":
        target_extension = ".bil"
    elif target_format == "EHdr":
        target_extension = ".bil"
    elif target_format == "GTiff":
        target_extension = ".tiff"
    else:
        print "You have not selcted a valid raster format!"
        print "Options are ENVI, EHdr and GTiff"
        target_extension = "NULL"

    # now make a directory
    if target_extension != "NULL":

        target_directory = DataDirectory + target_format

        if not os.access(target_directory, os.F_OK):
            print "Making path: "
            os.mkdir(target_directory)
            print "I made a directory: " + target_directory
        else:
            print "Path: " + target_directory + " already exists."

    # Now check the source format
    if raster_format == "ENVI":
        raster_extension = ".bil"
    elif raster_format == "EHdr":
        raster_extension = ".bil"
    elif raster_format == "GTiff":
        raster_extension = ".tif"
    else:
        print "You have not selcted a valid raster format!"
        print "Options are ENVI, EHdr and GTiff"
        raster_extension = "NULL"

    # find all the dataset of the source format
    print "The data directory is: " + DataDirectory
    print "The raster extension is: " + raster_extension
    if raster_extension != "NULL":
        for FileName in glob(DataDirectory + "*" + raster_extension):
            print "found file: " + FileName
            subprocess.call(['gdalinfo', FileName])
def SimpleSwath(path, file1, axis):

    # make sure names are in correct format
    NewPath = LSDOst.AppendSepToDirectoryPath(path)

    raster_file1 = NewPath + file1

    # get some information about the raster
    NDV, xsize, ysize, GeoT, Projection, DataType = LSDMap_IO.GetGeoInfo(
        raster_file1)

    print "NDV is: "
    print NDV

    if NDV == None:
        NDV = -9999
        print "No NDV defined"

    Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1, raster_band=1)

    #nan_raster = Raster1[Raster1==NDV]=np.nan
    #print nan_raster

    #now mask the nodata
    masked_Raster1 = np.ma.masked_values(Raster1, NDV)

    means = np.mean(masked_Raster1, axis)
    medians = np.median(masked_Raster1, axis)
    std_deviations = np.std(masked_Raster1, axis)
    twentyfifth_percentile = np.percentile(masked_Raster1, 25, axis)
    seventyfifth_percentile = np.percentile(masked_Raster1, 75, axis)

    # This stuff only works with numpy 1.8 or later, wich we don't have
    #means = np.nanmean(nan_raster, axis)
    #medians = np.nanmedian(nan_raster, axis)
    #std_deviations = np.nanstd(nan_raster, axis)
    #twentyfifth_percentile = np.nanpercentile(nan_raster, 25, axis)
    #seventyfifth_percentile = np.nanpercentile(nan_raster, 75, axis)

    #print means
    #print medians
    #print std_deviations
    #print twentyfifth_percentile
    #print seventyfifth_percentile

    return means, medians, std_deviations, twentyfifth_percentile, seventyfifth_percentile
Exemple #10
0
def GetCRNData(path, prefix):

    #first get directory path into the correct format
    fmt_path = LSDost.ReformatSeperators(path)

    # add the trailing seperator
    fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path)

    # now find the correct file
    fname = fmt_path + prefix + "_CRNData.csv"

    Sample_names = []
    SnowShield_values = []

    #See if the parameter files exist
    if os.access(fname, os.F_OK):
        this_file = open(fname, 'r')
        lines = this_file.readlines()

        # get rid of the first line, since this has header information
        lines.pop(0)

        # now get the list of DEM prefixes
        for line in lines:
            this_line = line.split(",")
            SampleName = this_line[0]

            print "This line is: "
            print this_line

            # check to see if there is a snow shield value
            N_entries = len(this_line)
            if (N_entries == 8):
                SnowShield = float(this_line[7])
                Sample_names.append(SampleName)
                SnowShield_values.append(SnowShield)
            else:
                print "there is no snow shielding on this line"
                SnowShield_values.append(1)
                Sample_names.append(SampleName)

    else:
        print "*_CRNRData.csv file not found. Are you sure it is there and you have the correct path?"

    return Sample_names, SnowShield_values
Exemple #11
0
def CopyDataAndParam(path, prefix, new_extension):

    #first get directory path into the correct format
    fmt_path = LSDost.ReformatSeperators(path)

    # add the trailing seperator
    fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path)

    # now find the correct file
    Datafname = fmt_path + prefix + "_CRNData.csv"
    Paramfname = fmt_path + prefix + ".CRNParam"

    Datafname_out = fmt_path + prefix + "_" + new_extension + "_CRNData.csv"
    Paramfname_out = fmt_path + prefix + "_" + new_extension + ".CRNParam"

    # copy the files
    shutil.copyfile(Datafname, Datafname_out)
    shutil.copyfile(Paramfname, Paramfname_out)
def BasicMassBalance(path, file1, file2):

    # make sure names are in correct format
    NewPath = LSDOst.AppendSepToDirectoryPath(path)

    raster_file1 = NewPath + file1
    raster_file2 = NewPath + file2

    PixelArea = LSDMap_IO.GetPixelArea(raster_file1)
    print "PixelArea is: " + str(PixelArea)

    print "The formatted path is: " + NewPath
    Raster1 = LSDMap_IO.ReadRasterArrayBlocks(raster_file1, raster_band=1)
    Raster2 = LSDMap_IO.ReadRasterArrayBlocks(raster_file2, raster_band=1)

    NewRaster = np.subtract(Raster2, Raster1)

    mass_balance = np.sum(NewRaster) * PixelArea

    print "linear dif " + str(np.sum(NewRaster))

    return mass_balance
def ElevationSwaths(path, filename, axis, fprefix):

    Fileformat = 'png'
    
    # get the path to the raster file
    NewPath = LSDOst.AppendSepToDirectoryPath(path)    
    FileName = NewPath+filename
        
    # get the data vectors
    means,medians,std_deviations,twentyfifth_percentile,seventyfifth_percentile = LSDP.SimpleSwath(path, filename, axis)
    
    print "Means shape is: "
    print means.shape    
    
    x_vec,y_vec = LSDP.GetLocationVectors(FileName)
    
    
    print "X shape is: "
    print x_vec.shape
    
    print "Y shape is: "
    print y_vec.shape
    
    import matplotlib.pyplot as plt
    import matplotlib.lines as mpllines
    from mpl_toolkits.axes_grid1 import AxesGrid

    label_size = 20
    #title_size = 30
    axis_size = 28

    # Set up fonts for plots
    rcParams['font.family'] = 'sans-serif'
    rcParams['font.sans-serif'] = ['arial']
    rcParams['font.size'] = label_size

    # make a figure, sized for a ppt slide
    fig = plt.figure(1, facecolor='white',figsize=(10,7.5)) 

    gs = plt.GridSpec(100,75,bottom=0.1,left=0.1,right=0.9,top=1.0)
    ax = fig.add_subplot(gs[10:100,10:75])
    
    if axis == 0:
        dir_vec = x_vec
    else:
        dir_vec = y_vec
        
    # get the distance from shore
    dist_from_shore = np.subtract(dir_vec[-1],dir_vec)        
        
    min_sd = np.subtract(means,std_deviations)
    plus_sd = np.add(means,std_deviations) 
        
    ax.plot(dist_from_shore,means, linewidth = 2.5, color = "black")
    #ax.fill_between(dist_from_shore, twentyfifth_percentile, seventyfifth_percentile, facecolor='green', alpha = 0.7, interpolate=True)
    ax.fill_between(dist_from_shore, min_sd, plus_sd, facecolor='blue', alpha = 0.25, interpolate=True)  
    
    ax.set_xlim(dist_from_shore[0],dist_from_shore[-1])

    ax.annotate('Standard deviation envelope', xy=(dist_from_shore[10],plus_sd[10]), xycoords='data',
                xytext=(0.1, 0.8), textcoords='axes fraction',
                size=label_size,
                # bbox=dict(boxstyle="round", fc="0.8"),
                arrowprops=dict(arrowstyle="simple",
                                fc="0.6", ec="none",
                                connectionstyle="arc3,rad=0.3"),
                )


    ax.spines['top'].set_linewidth(2)
    ax.spines['left'].set_linewidth(2)
    ax.spines['right'].set_linewidth(2)
    ax.spines['bottom'].set_linewidth(2) 
    #ax.tick_params(axis='both', width=1) 

    plt.xlabel('Distance from shore (m)', fontsize = axis_size)
    plt.ylabel('Bed elevation relative to MSL (m)', fontsize = axis_size)
    plt.title(fprefix)

    # This gets all the ticks, and pads them away from the axis so that the corners don't overlap        
    ax.tick_params(axis='both', width=2, pad = 10)
    for tick in ax.xaxis.get_major_ticks():
        tick.set_pad(10)   
    
    #plt.show()
    plt.savefig(NewPath+fprefix+"_BedElev.png",format = Fileformat)
    plt.clf()
def ManageShieldingComputation(path, prefix, NJobs):

    # Do some housekeeping with the path names
    LSDost.ReformatSeperators(path)
    LSDost.AppendSepToDirectoryPath(path)

    # Now open the csv files
    LSDRasters_fname = path + prefix + "_CRNRasters.csv"
    LSDData_fname = path + prefix + "_CRNData.csv"
    LSDParams_fname = path + prefix + ".CRNParam"

    # And a command prompt fname this can be cut and paste into the command prompt
    LSDCommandPromt_fname = path + prefix + "_ShieldCommandPrompt.txt"

    # now go through the rasters listed, getting the n_nodes in each raster
    if os.access(LSDRasters_fname, os.F_OK):

        # this is the total lnumber of pixels to be analysed
        Ntotal_pixels = 0

        # open the file and get the data
        Raster_file = open(LSDRasters_fname, 'r')
        lines = Raster_file.readlines()

        npixels_in_DEMs = []
        n_cum_pixels = []

        # loop through the file, collecting basin data
        for line in lines:
            split_line = line.split(",")
            basin_fname = split_line[0]

            # Add to the total number of pixels in the file
            # IMPORTANT: it is assumed all files are in ENVI bil format
            npixels_in_DEMs.append(
                LSDmt.GetNPixelsInRaster(basin_fname + ".bil"))
            Ntotal_pixels = Ntotal_pixels + npixels_in_DEMs[-1]
            n_cum_pixels.append(Ntotal_pixels)

        print "The total number of pixels are: " + str(Ntotal_pixels)

        # now go back through the loop, setting the seperation
        TargetPixels = Ntotal_pixels / NJobs
        #Next_Target = TargetPixels
        curr_line = 0
        pixels_so_far = 0
        breaks = []  # these are the indices into the breaks
        for line in lines:
            pixels_so_far = pixels_so_far + npixels_in_DEMs[curr_line]

            # if the number of pixels exceeds the target pixels,
            # this raster is not included
            if pixels_so_far > TargetPixels:
                breaks.append(curr_line + 1)
                pixels_so_far = 0

            curr_line = curr_line + 1

        # now print out the details
        print "The target pixels are: " + str(TargetPixels)
        print "the pixels are: "
        print npixels_in_DEMs

        print "\nThe breaks are: "
        if len(breaks) > NJobs:
            breaks.pop()

        if len(breaks) < NJobs:
            breaks.append(len(lines))

        print breaks

        # now spawn the files
        Param_file = open(LSDParams_fname, 'r')
        Plines = Param_file.readlines()
        Param_file.close()

        Data_file = open(LSDData_fname, 'r')
        Dlines = Data_file.readlines()
        Data_file.close()

        # make the command prompt file
        CP_file = open(LSDCommandPromt_fname, 'w')
        CP_file.write(
            "Commands for running shielding calculations from command line.\n")
        CP_file.write(
            "To be used with the University of Edinburgh's basinwide CRN programs.\n"
        )
        CP_file.write(
            "These are designed for use on a cluster without a job management system.\n"
        )
        CP_file.write(
            "If you want to use qsub you will need to write your own script!\n"
        )
        CP_file.write("Copy and paste these into the command line.\n")

        bb = 1
        last_brk = 0
        for brk in breaks:
            new_param_name = path + prefix + "_brk" + str(bb) + ".CRNParam"
            new_data_name = path + prefix + "_brk" + str(bb) + "_CRNData.csv"
            new_raster_name = path + prefix + "_brk" + str(
                bb) + "_CRNRasters.csv"

            New_Param_file = open(new_param_name, 'w')
            New_Param_file.writelines(Plines)
            New_Param_file.close()

            New_Data_file = open(new_data_name, 'w')
            New_Data_file.writelines(Dlines)
            New_Data_file.close()

            print "Last break: " + str(last_brk) + " and this break: " + str(
                brk)

            New_Raster_file = open(new_raster_name, 'w')
            thispx = npixels_in_DEMs[last_brk:brk]
            New_Raster_file.writelines(lines[last_brk:brk])
            New_Raster_file.close()

            last_brk = brk

            print "these pixels are: "
            print thispx
            print "and sum is: " + str(sum(thispx))

            CP_file.write("nohup nice ./Shielding_for_CRN.exe " + path + " " +
                          prefix + "_brk" + str(bb) + "\n")
            bb = bb + 1

        CP_file.close()
Exemple #15
0
def UpdateRasterWithShielding(path, prefix, Sample_names, Snowshield_values):

    #first get directory path into the correct format
    fmt_path = LSDost.ReformatSeperators(path)

    # add the trailing seperator
    fmt_path = LSDost.AppendSepToDirectoryPath(fmt_path)

    # now find the correct file
    fname = fmt_path + prefix + "_CRNRasters.csv"

    # also make the outfile
    outfname = fmt_path + prefix + "_SS_CRNRasters.csv"
    outfile = open(outfname, 'w')

    new_lines = []

    print "The sample names are"
    print Sample_names

    print "The snow shield values are: "
    print Snowshield_values

    #See if the parameter files exist
    if os.access(fname, os.F_OK):
        this_file = open(fname, 'r')
        lines = this_file.readlines()

        # now get the list of DEM prefixes
        for line in lines:
            this_line = line.split(",")
            DEM_prefix = this_line[0]

            print "The DEM prefix is: " + DEM_prefix

            # Now get the sample name
            split_dem_prefix = DEM_prefix.split("_")
            sample_name = split_dem_prefix[-1]

            print "The sample name is: " + sample_name

            # get the index of the sample name to reference the shielding value
            i = Sample_names.index(sample_name)

            print "the index of the sample names is: " + str(i)

            # calculate the effective depth. The 160 is the attenuation thickness in g/cm^2
            this_snow_depth = -160 * np.log(Snowshield_values[i])
            print "The shielding is: " + str(
                Snowshield_values[i]) + " and eff_depth is: " + str(
                    this_snow_depth)

            # update the snow effective depth
            this_line[1] = str(this_snow_depth)

            # update the line
            this_new_line = ",".join(this_line)
            new_lines.append(this_new_line)

    # this will get printed to file
    for line in new_lines:
        # you have to get rid of the control characters
        this_line = LSDost.RemoveEscapeCharacters(line)
        outfile.write("%s\n" % this_line)
def plot_production_rates():

    #Directory = "C://basin_data//CosmoPaper//Results//"
    Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//"
    Dirname = LSDost.ReformatSeperators(Directory)
    Dirname = LSDost.AppendSepToDirectoryPath(Dirname)
    
    # get the data
    prod_data = get_production_data(Directory)
    pdata = np.asarray(prod_data)
    
    depth = pdata[:,0]
    mu_CRONUS = pdata[:,1]
    mu_Braucher = pdata[:,4]
    mu_newCRONUS = pdata[:,5]
    
    total_CRONUS = pdata[:,6]
    total_Braucher = pdata[:,9]
    total_newCRONUS = pdata[:,10]    
    
    # set up figure formatting    
    Fileformat = 'svg'
    
    label_size = 10
    axis_size = 12

    # Set up fonts for plots
    rcParams['font.family'] = 'sans-serif'
    rcParams['font.sans-serif'] = ['arial']
    rcParams['font.size'] = label_size
    rcParams['xtick.major.size'] = 4    
    rcParams['ytick.major.size'] = 4
    rcParams['legend.fontsize'] = label_size
    rcParams['legend.handletextpad'] = 0.05
    rcParams['legend.labelspacing'] =0.1
    rcParams['legend.columnspacing'] =0.1
    
    # 3.26 inches = 83 mm, the size of a 1 column figure
    #Fig1 = plt.figure(1, facecolor='white',figsize=(3.26,3.26)) 
    Fig1 = plt.figure(1, facecolor='white',figsize=(3.26,6)) 

    # gendepth a grid. 
    gs = GridSpec(100,100,bottom=0.1,left=0.1,right=1.0,top=1.0) 
    ax = Fig1.add_subplot(gs[2:42,10:95])    
    
    ax.plot(mu_CRONUS,depth, "r-", label = "CRONUS",linewidth = 2)
    ax.plot(mu_Braucher,depth, "k-", label = "Braucher",linewidth = 2)
    ax.plot(mu_newCRONUS,depth, "b--", label = "new CRONUS",linewidth = 2)

    ax.spines['top'].set_linewidth(1)
    ax.spines['left'].set_linewidth(1)
    ax.spines['right'].set_linewidth(1)
    ax.spines['bottom'].set_linewidth(1) 
    #ax.tick_params(axis='both', width=2.5)
    ax.set_yscale('log')
    ax.set_ylim([0,10**5])
    ax.invert_yaxis()
    
    # This gets all the ticks, and pads them away from the axis so that the corners don't overlap
    # the which command tells the program to get major and minor ticks 
    ax.tick_params(axis='both', width=1, pad = 1, which = 'both')
    for tick in ax.xaxis.get_major_ticks():
        tick.set_pad(3)   

    for tick in ax.yaxis.get_major_ticks():
        tick.set_pad(3)     

    plt.xlabel('Muon production (atoms g$^{-1}$ yr$^{-1}$)', fontsize = axis_size)
    plt.ylabel('Depth (g cm$^{-2}$)', fontsize = axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax.get_legend_handles_labels()
    #plt.legend(handles, labels, numpoints = 3, bbox_to_anchor=(0., 1.02, 1., .102), 
    #           loc=4, ncol=1, mode="expand", borderaxespad=0.0)    
    #plt.legend(handles, labels, numpoints = 1, loc=1, ncol=3, mode="expand", borderaxespad=0.0)   
    plt.legend(handles, labels, numpoints = 3, loc=4, ncol=1, borderaxespad=0.5)  
              
    #plt.show()
 
    # 3.26 inches = 83 mm, the size of a 1 column figure
    #Fig2 = plt.figure(1, facecolor='white',figsize=(3.26,3.26))  

    # generate a grid. 
    #gs2 = GridSpec(100,75,bottom=0.13,left=0.13,right=0.90,top=0.95) 
    ax2 = Fig1.add_subplot(gs[57:97,10:95])    
    
    ax2.plot(total_CRONUS,depth, "r-", label = "CRONUS",linewidth = 2)
    ax2.plot(total_Braucher,depth, "k-", label = "Braucher",linewidth = 2)
    ax2.plot(total_newCRONUS,depth, "b--", label = "new CRONUS",linewidth = 2)

    ax2.spines['top'].set_linewidth(1)
    ax2.spines['left'].set_linewidth(1)
    ax2.spines['right'].set_linewidth(1)
    ax2.spines['bottom'].set_linewidth(1) 
    #ax.tick_params(axis='both', width=2.5)
    ax2.set_yscale('log')
    ax2.set_ylim([0,10**3])
    ax2.invert_yaxis()
    
    # This gets all the ticks, and pads them away from the axis so that the corners don't overlap
    # the which command tells the program to get major and minor ticks 
    ax2.tick_params(axis='both', width=1, pad = 1, which = 'both')
    for tick in ax2.xaxis.get_major_ticks():
        tick.set_pad(3)   

    for tick in ax2.yaxis.get_major_ticks():
        tick.set_pad(3)     

    plt.xlabel('Total production (atoms g$^{-1}$ yr$^{-1}$)', fontsize = axis_size)
    plt.ylabel('Depth (g cm$^{-2}$)', fontsize = axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax2.get_legend_handles_labels()
    #plt.legend(handles, labels, numpoints = 3, bbox_to_anchor=(0., 1.02, 1., .102), 
    #           loc=4, ncol=1, mode="expand", borderaxespad=0.2, fontsize = 8)   
    plt.legend(handles, labels, numpoints = 3, loc=4, ncol=1, borderaxespad=0.5)     

    plt.savefig(Dirname+"MuonProductionComparison.svg",format = Fileformat)     
    plt.show()
def prepare_CRNRasters_file(path, prefix):

    print "\n\nHello! I am here to help you format your LSDRasters file."
    print "I will look in your directories for DEMs and shielding rasters, and write them into your LSDRasters file."
    print "It is my pleasure to perform this service for you!"

    # first, make sure path is working
    path = LSDOst.ReformatSeperators(path)
    path = LSDOst.AppendSepToDirectoryPath(path)

    # initiate some empty lists
    raster_names = []
    toposhield_names = []
    basin_names = []
    self_shield_names = []
    snow_shield_names = []
    csv_strings = []

    # now check to see if the path exists
    if not os.access(path, os.F_OK):
        print "The path you have chosen: " + path + " does not exist. Try with a new path."
    else:

        # now find all the rasters. These all have *.bil in them
        for FileName in glob(path + "*.bil"):

            # now remove the extension from the file
            print "The filename is: " + FileName
            print "Removing the bil extension"
            Prefix = FileName[:-4]
            print "new_filename is" + Prefix

            # now we see if the files have shielding rasters
            if Prefix[-3:] == "_SH":
                print "This is a shielding raster"
                toposhield_names.append(Prefix)
            elif Prefix[-7:] == "_snowBL":
                print "This is a snow raster"
                snow_shield_names.append(Prefix)
            elif Prefix[-7:] == "_SnowBL":
                print "This is a snow raster"
                snow_shield_names.append(Prefix)
            elif Prefix[-9:] == "_snowclip":
                print "This is a snow raster"
                snow_shield_names.append(Prefix)
            elif Prefix[-9:] == "_selfclip":
                print "This is a self raster"
                self_shield_names.append(Prefix)
            elif Prefix[-7:] == "_BASINS":
                print "This is a basins raster"
                basin_names.append(Prefix)
            elif Prefix[-7:] == "_HS":
                print "This is a hillshade raster"
            else:
                print "No matching pattern for shielding (topo, self or snow) so I am assuming this is a DEM\n"
                raster_names.append(Prefix)

        # now print these to a rasters csv file
        for raster_name in raster_names:

            this_ts_name = "NULL"
            this_sns_name = "NULL"
            this_slfs_name = "NULL"

            # search for toposhield
            # There is probably a more efficient way to do this but it gets the job done
            # Loop through all the toposhield names and find the one that contains the
            # raster name
            for ts in toposhield_names:
                if raster_name in ts:
                    this_ts_name = ts

            # now the snow shield
            for sns in snow_shield_names:
                if raster_name in sns:
                    this_sns_name = sns

            # now the snow shield
            for slfs in self_shield_names:
                if raster_name in slfs:
                    this_slfs_name = slfs

            if (this_ts_name == "NULL" and this_sns_name == "NULL"
                    and this_slfs_name == "NULL"):
                print "I have a DEM: " + raster_name + " only (no snow, self or toposhield rasters)."
                this_csv_line = raster_name
            else:
                this_csv_line = raster_name + ","

                if this_sns_name == "NULL":
                    this_csv_line = this_csv_line + "0,"
                else:
                    this_csv_line = this_csv_line + this_sns_name + ","

                if this_slfs_name == "NULL":
                    this_csv_line = this_csv_line + "0"
                else:
                    this_csv_line = this_csv_line + this_slfs_name

                if not this_ts_name == "NULL":
                    this_csv_line = this_csv_line + "," + this_ts_name

            # low append to the list
            csv_strings.append(this_csv_line)

        # now print the file
        fname_csv = path + prefix + "_CRNRasters.csv"
        f = open(fname_csv, 'w')

        for item in csv_strings:
            print >> f, item

        f.close()
Exemple #18
0
def CRONUS_diff_params():

    #Directory = "C://basin_data//CosmoPaper//Results//Compiled//"
    Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//"
    Dirname = LSDost.ReformatSeperators(Directory)
    Dirname = LSDost.AppendSepToDirectoryPath(Dirname)

    Fileformat = 'svg'

    label_size = 8
    axis_size = 12

    # Set up fonts for plots
    rcParams['font.family'] = 'sans-serif'
    rcParams['font.sans-serif'] = ['arial']
    rcParams['font.size'] = label_size
    rcParams['xtick.major.size'] = 4
    rcParams['ytick.major.size'] = 4
    rcParams['legend.fontsize'] = label_size
    rcParams['legend.handletextpad'] = 0.05
    rcParams['legend.labelspacing'] = 0.1
    rcParams['legend.columnspacing'] = 0.1

    fname = Dirname + "Scherler_CRNResults.csv"

    # get only the file without the data directory
    NoDirFname = LSDost.GetFileNameNoPath(fname)

    # Now get the prefix of the file
    splitfname = NoDirFname.split('_CRNResults.csv')
    fprefix = splitfname[0]

    # now produce the cronus name from this prefix
    CRONUS_name1 = Dirname + fprefix + "_CRONUSEmulator.csv"
    CRONUS_name2 = Dirname + fprefix + "_CRONUSEmulator_Modified.csv"
    CRONUS_name3 = Dirname + fprefix + "_CRONUSEmulator_Modified2.csv"

    print "File prefix is: " + fprefix
    print "Cronus_name is: " + CRONUS_name1

    # now read in the data. We need 3 objects for 3 different results
    thisCRNData1 = CRNR.CRNResults(fname)
    thisCRNData2 = CRNR.CRNResults(fname)
    thisCRNData3 = CRNR.CRNResults(fname)

    # read in the Cronus data and get the errors
    thisCRNData1.ReadCRONUSData(CRONUS_name1)
    thisCRNData1.GetErrorsBetweenMethods()
    thisCRNData1.GetErrorsBetweenCRONUS()

    thisCRNData2.ReadCRONUSData(CRONUS_name2)
    thisCRNData2.GetErrorsBetweenMethods()
    thisCRNData2.GetErrorsBetweenCRONUS()

    thisCRNData3.ReadCRONUSData(CRONUS_name3)
    thisCRNData3.GetErrorsBetweenMethods()
    thisCRNData3.GetErrorsBetweenCRONUS()

    #===========================================================================
    # now make plots based on these data
    # 3.26 inches = 83 mm, the size of a 1 column figure
    Fig1 = plt.figure(1, facecolor='white', figsize=(3.26, 3.26))

    # generate a 120,90 grid.
    # gendepth a grid.
    gs = GridSpec(100, 100, bottom=0.06, left=0.1, right=1.0, top=1.0)
    ax = Fig1.add_subplot(gs[10:90, 10:95])

    ax.plot(thisCRNData1.GetAverageCombinedScaling(),
            thisCRNData1.GetError_CR(),
            "o",
            markersize=4,
            color="black",
            label="CRONUS2.2 default",
            markeredgewidth=1)
    ax.plot(thisCRNData2.GetAverageCombinedScaling(),
            thisCRNData2.GetError_CR(),
            "o",
            markersize=4,
            color="grey",
            label="Updated spallation",
            markeredgewidth=1)
    ax.plot(thisCRNData3.GetAverageCombinedScaling(),
            thisCRNData3.GetError_CR(),
            "o",
            markersize=4,
            color="white",
            label="Updated spallation, muons",
            markeredgewidth=1)

    ax.spines['top'].set_linewidth(1)
    ax.spines['left'].set_linewidth(1)
    ax.spines['right'].set_linewidth(1)
    ax.spines['bottom'].set_linewidth(1)
    ax.tick_params(axis='both', width=1)

    plt.xlabel('Production factor ($S_{tot}$)', fontsize=axis_size)
    plt.ylabel('($\epsilon_{CR2.2}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$',
               fontsize=axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax.get_legend_handles_labels()
    plt.legend()
    plt.legend(handles, labels, numpoints=1, loc=1, ncol=1, borderaxespad=1.)

    #plt.show()
    plt.savefig(Dirname + "CRONUS_update_spallation_and_muons.svg",
                format=Fileformat)
def GetRequiredFilesFromFolder(DataDirectory):
    
    #print "Current directory is: " + os.getcwd()   
    
    #Append a closing slash to the data directory if one is not already there
    NewDataDirectory = LSDost.ReformatSeperators(DataDirectory)   
    DataDirectory = LSDost.AppendSepToDirectoryPath(NewDataDirectory)
    
    #print "DataDirectory is (2): " + DataDirectory    

    # Start with a master list of required files
    required_files = []

    # find all the makefiles in the directory
    for FileName in glob(DataDirectory+"*.make"):
        #print "FileName is: " + FileName
        
        # Now you need to find all the sources in the makefile
        f = open(FileName,'r')  # open file
        lines = f.readlines()   # read in the data
        f.close()
        
        # Initiate an empty list that will contain the filenames
        cppfiles = []
        
        # add the makefile to the files to be copied
        required_files.append(FileName)
        
        # loop through the lines, flag a start where SOURCES line starts
        start_flag = 0
        for line in lines:
            if "SOURCE" in line:
                start_flag = 1
            
            # If there is OBJECTS in the line, stop looking for ".cpp"
            if "OBJECTS" in line:
                start_flag = 0
            
            # Look for .cpp between SOURCES and OBJECTS
            if start_flag == 1:
                if ".cpp" in line:
                    # seperate the line using spaces
                    split_line = line.split(' ')
                    this_item = ""
                    
                    # Go through the split line looking for .cpp
                    for item in split_line:
                        if ".cpp" in item:
                            this_item = item
                            
                            # get rid of the SOURCES
                            new_this_item = this_item.replace("SOURCES=","")
                            
                    #print "This cpp file is: " + new_this_item
                    
                    # get rid of stupid escape characters
                    this_file = LSDost.RemoveEscapeCharacters(new_this_item)
                    
                    cppfiles.append(this_file)
                    
        # now print to screen the files required for this makefile
        #print "The files required for this makefile are: "
        #print cppfiles
        
        # now append to directory...this requires some logic because of the ../ seperators
        for filename in cppfiles:
            
            #print "Filename is: " + filename            
            
            # special logic for the ../ seperator            
            if "../" in filename:
                #print "There is a lower level in this filename, this means it is an object"
                thisfile =  filename.replace("../","")
                thisdirectory = LSDost.RemoveDirectoryLevel(DataDirectory)
                fullfile = thisdirectory+thisfile
                
                fullfile2 = fullfile.replace(".cpp",".hpp")
                required_files.append(fullfile2)
            else:
                fullfile = DataDirectory+filename  
        
            # append to the required files list
            required_files.append(fullfile)
                
    # now thin out the required files to remove duplicates
    nd = set(required_files)
    required_files_noduplicates = list(nd)

    #print "/n/n================================="    
    #print "Required files are: " 
    #print required_files
    #print "--------"
    #print "And removing duplicates:"
    #print required_files_noduplicates
    #print "====================================="
    
    return required_files_noduplicates
def CheckFileStructuresForCopy(ObjectsDirectory,DriverDirectory,TargetDirectory):
    # Format the target directories
    Td = LSDost.ReformatSeperators(TargetDirectory)   
    TargetDirectory = LSDost.AppendSepToDirectoryPath(Td)  
    TDd = TargetDirectory + DriverDirectory
    TargetDriverDirectory = LSDost.AppendSepToDirectoryPath(TDd)

    # Format the source directories
    Od = LSDost.ReformatSeperators(ObjectsDirectory)   
    ObjectsDirectory = LSDost.AppendSepToDirectoryPath(Od)
    Dd = ObjectsDirectory+DriverDirectory
    DriverDirectory = LSDost.AppendSepToDirectoryPath(Dd)
            
    # Check if the source directories exist
    if not os.access(ObjectsDirectory,os.F_OK):
        print "The object directory for the code doesn't exist!"
        print "You wanted this directory: " + ObjectsDirectory
        return 0
    if not os.access(ObjectsDirectory,os.F_OK):
        print "The driver directory for the code doesn't exist!"
        print "You wanted this directory: " + DriverDirectory
        return 0        
    if not os.access(ObjectsDirectory+"TNT"+os.sep,os.F_OK):
        print "The TNT directory for the code doesn't exist!"
        print "You wanted this directory: " + ObjectsDirectory+"TNT"+os.sep
        return 0 
     
    # check if the target object directory exists
    if not os.access(TargetDirectory,os.F_OK):
        print "The target directory for the code doesn't exist!"
        print "You wanted this directory: " + TargetDirectory
        print "I am making that now, along with the driver directory"
        os.mkdir(TargetDirectory)
        if not os.access(TargetDirectory,os.F_OK):
            print "WTF the directory was not made??!"
        os.mkdir(TargetDriverDirectory)
        
    # check just the driver directory
    if not os.access(TargetDriverDirectory,os.F_OK):
        print "The target driver directory for the code doesn't exist!"
        print "You wanted this directory: " + TargetDriverDirectory
        print "I am making that now"
        os.mkdir(TargetDriverDirectory)    
        
    # Check if the TNT directory exists. If it does, remove and replace it
    # If it doesn't , just copy it across
    TNTTargetDirectory = TargetDirectory+'TNT'+os.sep
    TNTSourceDirectory = ObjectsDirectory+'TNT'+os.sep
    if not os.access(TNTTargetDirectory,os.F_OK):
        print "The target TNT directory for the code doesn't exist!"
        print "You wanted this directory: " + TargetDriverDirectory
        print "I am making that now"
        shutil.copytree(TNTSourceDirectory,TNTTargetDirectory)
    else:
        print "There is a TNT directory here already. Removing and replacing"
        shutil.rmtree(TNTTargetDirectory)
        shutil.copytree(TNTSourceDirectory,TNTTargetDirectory)

    print "========================="
    print "DriverDirectory: " + DriverDirectory 
    print "ObjectsDirectory: " + ObjectsDirectory
    print "TargetDirectory: " + TargetDirectory
    print "TargetDriverDirectory: " + TargetDriverDirectory
    print "========================="
        
    return ObjectsDirectory,DriverDirectory,TargetDirectory,TargetDriverDirectory 
Exemple #21
0
def GDALBatchMerge(DataDirectory, merge_subfolder_name, merge_filename,
                   raster_format, target_format):

    NewDataDirectory = LSDost.ReformatSeperators(DataDirectory)
    DataDirectory = LSDost.AppendSepToDirectoryPath(NewDataDirectory)

    # get the name of the data directory into which the file should be merged
    merge_DataDirectory = DataDirectory + merge_subfolder_name
    mDataDriectory = LSDost.AppendSepToDirectoryPath(merge_DataDirectory)

    # make the directory
    if not os.access(mDataDriectory, os.F_OK):
        print "Making path: "
        os.mkdir(mDataDriectory)
        print "I made a directory: " + mDataDriectory
    else:
        print "Path: " + mDataDriectory + " already exists."

    # Check the source format
    if raster_format == "ENVI":
        raster_extension = ".bil"
    elif raster_format == "EHdr":
        raster_extension = ".bil"
    elif raster_format == "GTiff":
        raster_extension = ".tif"
    else:
        print "You have not selcted a valid raster format!"
        print "Options are ENVI, EHdr and GTiff"
        raster_extension = "NULL"

    # Check the target format. Default is geotiff
    if target_format == "ENVI":
        target_extension = ".bil"
    elif target_format == "EHdr":
        target_extension = ".bil"
    elif target_format == "GTiff":
        target_extension = ".tif"
    else:
        print "You have not selcted a valid raster format!"
        print "Defaulting to GTiff"
        target_format == "GTiff"
        target_extension = ".tif"

    # set the name of the target file
    target_FileName = mDataDriectory + merge_filename + target_extension

    # find all the dataset of the source format
    print "The data directory is: " + DataDirectory
    print "The raster extension is: " + raster_extension
    if raster_extension != "NULL":

        # Set up the list for holding command prompt commands
        command_prompt = []
        command_prompt.append("gdal_merge.py")
        command_prompt.append("-of")
        command_prompt.append(target_format)
        command_prompt.append("-o")
        command_prompt.append(target_FileName)

        for FileName in glob(DataDirectory + "*" + raster_extension):
            print "found file: " + FileName
            command_prompt.append(FileName)

        print "The subprocess call is: "
        print command_prompt
        subprocess.call(command_prompt)
def CollatenewCRONUScomparisonCRNData():

    #Directory = "C://basin_data//CosmoPaper//Results//Compiled//Brauch_vs_newCRONUS//"
    Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//Brauch_vs_newCRONUS//"
    Dirname = LSDost.ReformatSeperators(Directory)
    Dirname = LSDost.AppendSepToDirectoryPath(Dirname)

    Fileformat = 'svg'

    # This list will store the crn data
    CRNDataList = []
    CRNprefixes = []
    PaperNames = []
    PaperColours = []
    ScalingNames = []

    label_size = 8
    axis_size = 12

    # Set up fonts for plots
    rcParams['font.family'] = 'sans-serif'
    rcParams['font.sans-serif'] = ['arial']
    rcParams['font.size'] = label_size
    rcParams['xtick.major.size'] = 4
    rcParams['ytick.major.size'] = 4
    rcParams['legend.fontsize'] = label_size
    rcParams['legend.handletextpad'] = 0.05
    rcParams['legend.labelspacing'] = 0.1
    rcParams['legend.columnspacing'] = 0.1

    # loop through the directory, getting the results from the data
    for fname in glob(Dirname + "*_CRNResults.csv"):

        # get only the file without the data directory
        NoDirFname = LSDost.GetFileNameNoPath(fname)

        # Now get the prefix of the file
        splitfname = NoDirFname.split('_CRNResults.csv')
        fprefix = splitfname[0]

        print "File prefix is: " + fprefix

        # now read in the data
        thisCRNData = CRNR.CRNResults(fname)

        CRNDataList.append(thisCRNData)
        CRNprefixes.append(fprefix)

        # now get the prefixes
        if "Dethier" in fprefix:
            PaperNames.append("Dethier et al., 2014")
        elif "Palumbo" in fprefix:
            PaperNames.append("Palumbo et al., 2010")

        # now get lists based on this data and place into a dictionary
        if "newCRONUS" in fprefix:
            #print "I found newCRONUS in the file prefix: " + fprefix
            ScalingNames.append("newCRONUS")
        else:
            #print "Sorry, I didn't find the file prefix newCRONUS in " + fprefix
            ScalingNames.append("Braucher")

    # now get the errors
    for index, CRNObj in enumerate(CRNDataList):
        print "Looking for scaling and paper names"
        print "paper name is: " + PaperNames[index]
        print "scaling name is: " + ScalingNames[index]
        if "Dethier" in PaperNames[index]:
            if "newCRONUS" in ScalingNames[index]:
                dethier_index_newCRONUS = index

            else:
                dethier_index_braucher = index
        elif "Palumbo" in PaperNames[index]:
            if "newCRONUS" in ScalingNames[index]:
                palumbo_index_newCRONUS = index
                #print "I got the palumbo newcronus index"
            else:
                palumbo_index_braucher = index

    P_erate_brauch = CRNDataList[palumbo_index_braucher].GetErosionRates()
    print "Braucher erate palumbo: "
    print P_erate_brauch
    P_erate_newCRONUS = CRNDataList[palumbo_index_newCRONUS].GetErosionRates()
    print "newCRONUS erate palumbo: "
    print P_erate_newCRONUS
    P_erate_newCRONUS

    P_B = np.asarray(P_erate_brauch)
    P_nC = np.asarray(P_erate_newCRONUS)

    P_err = np.divide(np.subtract(P_nC, P_B), P_B)
    print "P_err: "
    print P_err

    D_erate_brauch = CRNDataList[dethier_index_braucher].GetErosionRates()
    D_erate_newCRONUS = CRNDataList[dethier_index_newCRONUS].GetErosionRates()

    D_B = np.asarray(D_erate_brauch)
    D_nC = np.asarray(D_erate_newCRONUS)

    D_err = np.divide(np.subtract(D_nC, D_B), D_B)
    print "D_err: "
    print D_err

    #print "The palumbo error is: "
    #print P_err

    #print "The dethier error is: "
    #print D_err

    #===========================================================================
    # now make plots based on these data
    # 3.26 inches = 83 mm, the size of a 1 column figure
    Fig1 = plt.figure(1, facecolor='white', figsize=(3.26, 3.26))

    # generate a 120,90 grid.
    gs = GridSpec(100, 75, bottom=0.13, left=0.13, right=0.98, top=0.85)
    ax = Fig1.add_subplot(gs[10:100, 5:75])

    ax.plot(P_B,
            P_err,
            "o",
            markersize=4,
            color="magenta",
            label="Palumbo et al., 2010",
            markeredgewidth=1)
    ax.plot(D_B,
            D_err,
            "ro",
            markersize=4,
            label="Dethier et al., 2014",
            markeredgewidth=1)

    ax.spines['top'].set_linewidth(1)
    ax.spines['left'].set_linewidth(1)
    ax.spines['right'].set_linewidth(1)
    ax.spines['bottom'].set_linewidth(1)
    ax.tick_params(axis='both', width=1)
    #ax.set_ylim([0.02,0.06])

    plt.title(
        'We are not using this in the paper!! Use CRNCAIRNvsnewCRONUS_erates.py instead!'
    )
    plt.xlabel('CAIRN denudation rate (g cm$^{-2}$ yr$^{-1}$)',
               fontsize=axis_size)
    plt.ylabel('($\epsilon_{CRCalc}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$',
               fontsize=axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax.get_legend_handles_labels()
    plt.legend()
    plt.legend(handles,
               labels,
               numpoints=1,
               bbox_to_anchor=(0., 1.02, 1., .102),
               loc=3,
               ncol=2,
               mode="expand",
               borderaxespad=0.)

    plt.savefig(Dirname + "CAIRNvsnewCRONUSapprox_erate.svg",
                format=Fileformat)

    Fig1.show()
Exemple #23
0
def CollatenewCRONUScomparisonCRNData():

    #Directory = "C://code//git_papers//crn_basinwide_paper//Compiled_results//Brauch_vs_newCRONUS//"
    Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//Brauch_vs_newCRONUS//"
    Dirname = LSDost.ReformatSeperators(Directory)
    Dirname = LSDost.AppendSepToDirectoryPath(Dirname)

    Fileformat = 'svg'

    label_size = 8
    axis_size = 12

    # Set up fonts for plots
    rcParams['font.family'] = 'sans-serif'
    rcParams['font.sans-serif'] = ['arial']
    rcParams['font.size'] = label_size
    rcParams['xtick.major.size'] = 4
    rcParams['ytick.major.size'] = 4
    rcParams['legend.fontsize'] = label_size
    rcParams['legend.handletextpad'] = 0.05
    rcParams['legend.labelspacing'] = 0.1
    rcParams['legend.columnspacing'] = 0.1

    P_erate_newCRONUS = []
    D_erate_newCRONUS = []
    P_newCRONUS = []
    D_newCRONUS = []

    # loop through the directory, getting the results from the data
    for fname in glob(Dirname + "*_CRNResults.csv"):

        # get only the file without the data directory
        NoDirFname = LSDost.GetFileNameNoPath(fname)

        # Now get the prefix of the file
        splitfname = NoDirFname.split('_CRNResults.csv')
        fprefix = splitfname[0]

        print "File prefix is: " + fprefix

        # now read in the data
        thisCRNData = CRNR.CRNResults(fname)

        # Only get the newCRONUS dethier and Palumbo data
        # now get the prefixes
        if "Dethier" in fprefix:
            if "newCRONUS" in fprefix:
                D_erate_newCRONUS = thisCRNData.GetErosionRates_mmperkyr_rho2650(
                )
        elif "Palumbo" in fprefix:
            if "newCRONUS" in fprefix:
                P_erate_newCRONUS = thisCRNData.GetErosionRates_mmperkyr_rho2650(
                )

    # Convert the data to arrays (to calculate errors)
    P_nC_CAIRN = np.asarray(P_erate_newCRONUS)
    D_nC_CAIRN = np.asarray(D_erate_newCRONUS)

    #print "P CAIRN is: "
    #print P_nC_CAIRN

    #print "D CAIRN is: "
    #print D_nC_CAIRN

    # Now get the CRONUScalc data
    print "Entering second glob loop"
    for fname in glob(Dirname + "*.csv"):
        print "I found comparison data! Name is " + fname

    # Now get the CRONUScalc data
    print "Entering third glob loop"
    for fname in glob(Dirname + "*Comparison.csv*"):

        # get only the file without the data directory
        NoDirFname = LSDost.GetFileNameNoPath(fname)

        print "I found comparison data! Name is " + NoDirFname

        # Now get the prefix of the file
        splitfname = NoDirFname.split('_newCRONUSCAIRNComparison.csv')
        fprefix = splitfname[0]

        print "File prefix is: " + fprefix

        print "I am woking with the dataset: " + fprefix

        #See if the parameter files exist
        if os.access(fname, os.F_OK):
            this_file = open(fname, 'r')
            lines = this_file.readlines()

            # get rid fo the first line
            lines.pop(0)

            # create the lists for populating with data
            CAIRN_erate = []
            CAIRN_uncert = []
            Report_erate = []
            Report_uncert = []
            newCRONUS_erate = []
            newCRONUS_uncert = []

            # now get the data into the dict
            for line in lines:
                this_line = LSDost.RemoveEscapeCharacters(line)
                split_line = this_line.split(',')

                #print split_line[22]+" " +split_line[23]

                #print split_line

                CAIRN_erate.append(float(split_line[16]))
                CAIRN_uncert.append(float(split_line[17]))
                Report_erate.append(float(split_line[20]))
                Report_uncert.append(float(split_line[21]))
                newCRONUS_erate.append(float(split_line[22]))
                newCRONUS_uncert.append(float(split_line[23]))

        # now get the prefixes
        if fprefix == "Dethier":
            D_newCRONUS = newCRONUS_erate
        elif fprefix == "Palumbo":
            P_newCRONUS = newCRONUS_erate

    #print "P_newCRONUS is: "
    #print  P_newCRONUS
    #print "D_newCRONUS is: "
    #print  D_newCRONUS

    P_nC = np.asarray(P_newCRONUS)
    D_nC = np.asarray(D_newCRONUS)

    Perr = np.divide(np.subtract(P_nC_CAIRN, P_nC), P_nC)
    Derr = np.divide(np.subtract(D_nC_CAIRN, D_nC), D_nC)

    print "The errors are: "
    print Perr
    print Derr

    print P_nC
    print P_nC_CAIRN

    # okay, now you should have the errors

    #===========================================================================
    # now make plots based on these data
    # 3.26 inches = 83 mm, the size of a 1 column figure
    Fig1 = plt.figure(1, facecolor='white', figsize=(3.26, 3.26))

    # generate a 120,90 grid.
    gs = GridSpec(100, 75, bottom=0.13, left=0.13, right=0.95, top=0.95)
    ax = Fig1.add_subplot(gs[10:100, 5:95])

    ax.plot(P_nC,
            Perr,
            "o",
            markersize=5,
            color="maroon",
            label="Palumbo et al., 2010",
            markeredgewidth=1)
    ax.plot(D_nC,
            Derr,
            "ro",
            markersize=5,
            color="lawngreen",
            label="Dethier et al., 2014",
            markeredgewidth=1)

    ax.spines['top'].set_linewidth(1)
    ax.spines['left'].set_linewidth(1)
    ax.spines['right'].set_linewidth(1)
    ax.spines['bottom'].set_linewidth(1)
    ax.tick_params(axis='both', width=1)
    #ax.set_ylim([0.02,0.06])

    #plt.title('We are not using this in the paper!! Use CRNCAIRNvsnewCRONUS_erates.py instead!')
    plt.xlabel('$\epsilon_{CRCalc}$ (g cm$^{-2}$ yr$^{-1}$)',
               fontsize=axis_size)
    plt.ylabel(
        '($\epsilon_{CAIRN-CRCalc}$-$\epsilon_{CRCalc}$)/$\epsilon_{CRCalc}$',
        fontsize=axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax.get_legend_handles_labels()
    plt.legend()
    plt.legend(handles, labels, numpoints=1, loc=4, ncol=1, borderaxespad=1.)

    plt.savefig(Dirname + "CAIRN_newCRONUS_emulator.svg", format=Fileformat)
def CollateCRNData():
    
    #Directory = "C://basin_data//CosmoPaper//Results//Compiled//"
    Directory = "T://Papers_LaTeX//crn_basinwide_paper//Compiled_results//"
    Dirname = LSDost.ReformatSeperators(Directory)
    Dirname = LSDost.AppendSepToDirectoryPath(Dirname)
    
    Fileformat = 'svg'
    
    # This list will store the crn data
    CRNDataList = []  
    CRNprefixes = []
    PaperNames = []
    
    label_size = 8
    axis_size = 12

    # Set up fonts for plots
    rcParams['font.family'] = 'sans-serif'
    rcParams['font.sans-serif'] = ['arial']
    rcParams['font.size'] = label_size
    rcParams['xtick.major.size'] = 4    
    rcParams['ytick.major.size'] = 4
    rcParams['legend.fontsize'] = label_size
    rcParams['legend.handletextpad'] = 0.05
    rcParams['legend.labelspacing'] =0.1
    rcParams['legend.columnspacing'] =0.1
    
       
    # loop through the directory, getting the results from the data    
    for fname in glob(Dirname+"*_CRNResults.csv"):
        
        # get only the file without the data directory
        NoDirFname = LSDost.GetFileNameNoPath(fname)
        
        # Now get the prefix of the file
        splitfname = NoDirFname.split('_CRNResults.csv')
        fprefix = splitfname[0]
        
        # now produce the cronus name from this prefix
        CRONUS_name = Dirname+fprefix+"_CRONUS.csv"
        
        print "File prefix is: " + fprefix 
        print "Cronus_name is: " + CRONUS_name
        
        # now read in the data
        thisCRNData = CRNR.CRNResults(fname)        

        # read in the Cronus data and get the errors
        thisCRNData.ReadCRONUSData(CRONUS_name)
        thisCRNData.GetErrorsBetweenMethods()
        thisCRNData.GetErrorsBetweenCRONUS()
        
        CRNDataList.append(thisCRNData)
        CRNprefixes.append(fprefix)
        
        # now get the prefixes
        if fprefix == "Bierman":
            PaperNames.append("Bierman et al., 2005")
        elif fprefix == "Dethier":
            PaperNames.append("Dethier et al., 2014")
        elif fprefix == "Kirchner":
            PaperNames.append("Kirchner et al., 2001")                
        elif fprefix == "Munack":
            PaperNames.append("Munack et al., 2014")            
        elif fprefix == "Scherler":
            PaperNames.append("Scherler et al., 2014")
        elif fprefix == "Safran":
            PaperNames.append("Safran et al., 2005") 
        elif fprefix == "Palumbo":
            PaperNames.append("Palumbo et al., 2010")             
            
    #===========================================================================    
    # now make plots based on these data
    # 3.26 inches = 83 mm, the size of a 1 column figure
    Fig1 = plt.figure(1, facecolor='white',figsize=(3.26,3.26))  

    # generate a 120,90 grid. 
    gs = GridSpec(100,75,bottom=0.13,left=0.13,right=0.98,top=0.85) 
    ax = Fig1.add_subplot(gs[10:100,5:75])
    
    # this gets the colors to map to specific sites
    cmap = plt.cm.jet    
    colo = 0       
    
    
    for index,CRNObj in enumerate( CRNDataList):
        colo = colo + (1.000/len(CRNprefixes))
        ax.plot(CRNObj.GetAverageCombinedScaling(),CRNObj.GetError_CR(), "o",
                markersize=4, color=cmap(colo), label = PaperNames[index],markeredgewidth=1)

    ax.spines['top'].set_linewidth(1)
    ax.spines['left'].set_linewidth(1)
    ax.spines['right'].set_linewidth(1)
    ax.spines['bottom'].set_linewidth(1) 
    ax.tick_params(axis='both', width=1) 
    
 
    
    
    plt.xlabel('Production factor ($S_{tot}$)', fontsize = axis_size)
    plt.ylabel('($\epsilon_{CR2.2}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize = axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax.get_legend_handles_labels()
    plt.legend()
    plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), 
               loc=3, ncol=2, mode="expand", borderaxespad=0.)
        
    plt.savefig(Dirname+"Production_vs_error.svg",format = Fileformat)
    
    #Fig1.show()
     
    Fig1.clf()     



    # These don't work in my version of matplotlib...I must update (!)
    # have found a workaround below    
    #plt.rcParams['xtick.major.linewidth'] = 4
    #plt.rcParams['xtick.minor.width'] = 2
 

    #===========================================================================   
    # now make plots based on these data
    # 3.26 inches = 83 mm, the size of a 1 column figure
    Fig2 = plt.figure(1, facecolor='white',figsize=(3.26,3.5))  

    # generate a 120,90 grid. 
    gs = GridSpec(100,75,bottom=0.14,left=0.13,right=0.98,top=0.85) 
    ax = Fig2.add_subplot(gs[10:100,5:75])

    plt.rcParams['xtick.major.size'] = 4    
    plt.rcParams['xtick.minor.size'] = 3
    plt.rcParams['ytick.major.size'] = 4
    
    # this gets the colors to map to specific sites
    cmap = plt.cm.jet    
    colo = 0       

    for index,CRNObj in enumerate( CRNDataList):
        colo = colo + (1.000/len(CRNprefixes))
        ax.plot(CRNObj.GetErosionRates(),CRNObj.GetError_CR(), "o", markersize=4, 
                color=cmap(colo), label = PaperNames[index],markeredgewidth=1)

    ax.spines['top'].set_linewidth(1)
    ax.spines['left'].set_linewidth(1)
    ax.spines['right'].set_linewidth(1)
    ax.spines['bottom'].set_linewidth(1) 
    #ax.tick_params(axis='both', width=2.5)
    ax.set_xscale('log') 
    
    # This gets all the ticks, and pads them away from the axis so that the corners don't overlap
    # the which command tells the program to get major and minor ticks 
    ax.tick_params(axis='both', width=1, pad = 1, which = 'both')
    for tick in ax.xaxis.get_major_ticks():
        tick.set_pad(3)   

    for tick in ax.yaxis.get_major_ticks():
        tick.set_pad(3)  

    #for tick in ax.xaxis.get_minor_ticks():
    #    tick.tick_params(width = 2.5)  
        
        
    plt.xlabel('$\epsilon_{CAIRN}$ (g cm$^{-2}$ yr$^{-1}$)', fontsize = axis_size)
    plt.ylabel('($\epsilon_{CR2.2}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize = axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax.get_legend_handles_labels()
    plt.legend()
    plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), 
               loc=3, ncol=2, mode="expand", borderaxespad=0.)    
    #plt.show()    
    plt.savefig(Dirname+"Erosion_vs_error.svg",format = Fileformat)    
    Fig2.clf()    
        
    #===========================================================================   
    # now make plots based on these data
    Fig3 = plt.figure(1, facecolor='white',figsize=(3.26,3.5))  

    # generate a 120,90 grid. 
    gs = GridSpec(100,75,bottom=0.14,left=0.14,right=0.97,top=0.85) 
    ax = Fig3.add_subplot(gs[10:100,5:75])

    plt.rcParams['xtick.major.size'] = 4    
    plt.rcParams['ytick.major.size'] = 4
    
    # this gets the colors to map to specific sites
    cmap = plt.cm.jet    
    colo = 0       

    for index,CRNObj in enumerate( CRNDataList):
        colo = colo + (1.000/len(CRNprefixes))
        ax.plot(CRNObj.GetAverageCombinedScaling(),CRNObj.GetError_CC(), "o",
                markersize=4, color=cmap(colo), label = PaperNames[index],markeredgewidth=1)

    ax.spines['top'].set_linewidth(1)
    ax.spines['left'].set_linewidth(1)
    ax.spines['right'].set_linewidth(1)
    ax.spines['bottom'].set_linewidth(1) 
    #ax.tick_params(axis='both', width=2.5)
    
    # This gets all the ticks, and pads them away from the axis so that the corners don't overlap
    # the which command tells the program to get major and minor ticks 
    ax.tick_params(axis='both', width=1, pad = 1, which = 'both')
    for tick in ax.xaxis.get_major_ticks():
        tick.set_pad(3)   

    for tick in ax.yaxis.get_major_ticks():
        tick.set_pad(3)  

    #for tick in ax.xaxis.get_minor_ticks():
    #    tick.tick_params(width = 2.5)  
        
        
    plt.xlabel('Production factor ($S_{CCtot}$)', fontsize = axis_size)
    plt.ylabel('($\epsilon_{CC}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize = axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax.get_legend_handles_labels()
    plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), 
               loc=3, ncol=2, mode="expand", borderaxespad=0.)    
        
    #plt.show()    
    plt.savefig(Dirname+"COSMOCALC_vs_error.svg",format = Fileformat)        
    Fig3.clf() 

        
    #===========================================================================   
    # now make plots based on these data
    Fig4 = plt.figure(1, facecolor='white',figsize=(3.26,3.5))  

    # generate a 120,90 grid. 
    gs = GridSpec(100,75,bottom=0.14,left=0.14,right=0.97,top=0.85) 
    ax = Fig4.add_subplot(gs[10:100,5:75])

    plt.rcParams['xtick.major.size'] = 4   
    
    # this gets the colors to map to specific sites
    cmap = plt.cm.jet    
    colo = 0       

    for index,CRNObj in enumerate( CRNDataList):
        colo = colo + (1.000/len(CRNprefixes))
        ax.plot(CRNObj.GetAverageCombinedScaling(),CRNObj.GetError_CR_em(), "o",
                markersize=4, color=cmap(colo), label = PaperNames[index],markeredgewidth=1)

    ax.spines['top'].set_linewidth(1)
    ax.spines['left'].set_linewidth(1)
    ax.spines['right'].set_linewidth(1)
    ax.spines['bottom'].set_linewidth(1) 
    #ax.tick_params(axis='both', width=2.5)
    
    # This gets all the ticks, and pads them away from the axis so that the corners don't overlap
    # the which command tells the program to get major and minor ticks 
    ax.tick_params(axis='both', width=1, pad = 1, which = 'both')
    for tick in ax.xaxis.get_major_ticks():
        tick.set_pad(3)   

    for tick in ax.yaxis.get_major_ticks():
        tick.set_pad(3)  

    #for tick in ax.xaxis.get_minor_ticks():
    #    tick.tick_params(width = 2.5)  
        
        
    plt.xlabel('Production factor ($S_{CRShield}$*$S_{effp}$)', fontsize = axis_size)
    plt.ylabel('($\epsilon_{CC-CR}$-$\epsilon_{CAIRN}$)/$\epsilon_{CAIRN}$', fontsize = axis_size)
    #plt.title('Cosmocalc / New_code',fontsize = label_size+6)
    handles, labels = ax.get_legend_handles_labels()
    plt.legend(handles, labels, numpoints = 1, bbox_to_anchor=(0., 1.02, 1., .102), 
               loc=3, ncol=2, mode="expand", borderaxespad=0.)    
        
    #plt.show()    
    plt.savefig(Dirname+"COSMOCALC_CRem_vs_error.svg",format = Fileformat)           
    Fig4.clf()