Ejemplo n.º 1
0
def Y3Zo(ages, errors, sigma=2):
    """
    Calculates the weighted mean average of the youngest three zircons that overlap within uncertainty of sigma (default is 2-sigma) (see discussion in Coutts et al. (2019): Geoscience Frontiers)

    Parameters
    ages : a 2-D array of ages, len(ages)=number of samples or sample groups
    errors : a 2-D array of 1-sigma errors for each sample or sample group, len(errors)=number of samples or sample groups
    sigma : (optional) level of uncertainty to evaluate overlap (default is 2-sigma)

    Returns
    -------
    Y3Zo : [the weighted mean age, the 2-sigma uncertainty, and the MSWD]

    """

    # Check to see if ages is a list of arrays or just a single list of ages
    if not hasattr(ages[0], '__len__'):
        ages = [ages]
        errors = [errors]

    Y3Zo = []

    for i in range(len(ages)):

        if sigma == 1:
            data_err = list(zip(ages[i], errors[i]))
            data_err.sort(
                key=lambda d: d[0] + d[1])  # Sort based on age + 1s error
        if sigma == 2:
            data_err = list(zip(ages[i], errors[i] * 2))
            data_err.sort(
                key=lambda d: d[0] + d[1])  # Sort based on age + 2s error

        Y3Zo_cluster, Y3Zo_imax = find_youngest_cluster(data_err, 3)
        if sigma == 1:
            Y3Zo_WM, Y3Zo_WM_err2s, Y3Zo_WM_MSWD = dFunc.weightedMean(
                np.array([d[0] for d in Y3Zo_cluster[:3]]),
                np.array([d[1] for d in Y3Zo_cluster[:3]]))
        if sigma == 2:
            Y3Zo_WM, Y3Zo_WM_err2s, Y3Zo_WM_MSWD = dFunc.weightedMean(
                np.array([d[0] for d in Y3Zo_cluster[:3]]),
                np.array([d[1] / 2 for d in Y3Zo_cluster[:3]]))

        # Return NaN if Y3Zo did not find a cluster
        if Y3Zo_WM == 0.0:
            Y3Zo.append([np.nan, np.nan, np.nan])
        else:
            Y3Zo.append([Y3Zo_WM, Y3Zo_WM_err2s, Y3Zo_WM_MSWD])

    return Y3Zo
Ejemplo n.º 2
0
def Y3Za(ages, errors):
    """
    Calculates the weighted mean average of the youngest three zircons, regardless of whether they overlap within error (see discussion in Coutts et al. (2019): Geoscience Frontiers)

    Parameters
    ages : a 2-D array of ages, len(ages)=number of samples or sample groups
    errors : a 2-D array of 1-sigma errors for each sample or sample group, len(errors)=number of samples or sample groups

    Returns
    -------
    Y3Za : [the weighted mean age, the 2-sigma uncertainty, and the MSWD]

    """

    # Check to see if ages is a list of arrays or just a single list of ages
    if not hasattr(ages[0], '__len__'):
        ages = [ages]
        errors = [errors]

    Y3Za = []

    for i in range(len(ages)):
        data_err1s_ageSort = (list(zip(ages[i], errors[i])))
        data_err1s_ageSort.sort(key=lambda d: d[0]) # Sort based on age
        Y3Za_WM, Y3Za_WMerr2s, Y3Za_WM_MSWD = dFunc.weightedMean([x[0] for x in data_err1s_ageSort[:3]], [x[1] for x in data_err1s_ageSort[:3]])
        if len(ages[i]) < 3: # Return nulls if the samples has less than 3 analyses
            Y3Za.append([np.nan,np.nan,np.nan])
        else:
            Y3Za.append([Y3Za_WM, Y3Za_WMerr2s, Y3Za_WM_MSWD])

    return Y3Za
Ejemplo n.º 3
0
def YC2s(ages, errors, min_cluster_size=3):
    """
    Calculate the youngest grain cluster that overlaps at 2-sigma error (see Dickinson and Gehrels (2009): Earth and Planetary Science Letters and Sharman et al. (2018): The Depositional Record for an explanation).

    Paramters
    ---------
    ages : a 2-D array of ages, len(ages)=number of samples or sample groups
    errors : a 2-D array of 1-sigma errors for each sample or sample group, len(errors)=number of samples or sample groups
    min_cluster_size : (optional) minimum number of grains in the cluster (default = 3)

    Returns
    -------
    YC2s : [the weighted mean age of the youngest cluster, the 2-sigma uncertainty of the weighted mean age of the youngest cluster, the MSWD of the youngest cluster, the number of analyses in the youngest cluster] such that len(YC2s) = len(ages)

    """

    # Check to see if ages is a list of arrays or just a single list of ages
    if not hasattr(ages[0], '__len__'):
        ages = [ages]
        errors = [errors]

    YC2s = []

    for i in range(len(ages)):

        data_err2s = list(zip(ages[i], errors[i] * 2))
        data_err2s_ageSort = list(zip(ages[i], errors[i] * 2))
        data_err2s_ageSort.sort(key=lambda d: d[0])  # Sort based on age
        data_err2s.sort(
            key=lambda d: d[0] + d[1])  # Sort based on age + 2s error

        YC2s_cluster, YC2s_imax = find_youngest_cluster(
            data_err2s, min_cluster_size)
        YC2s_WM = dFunc.weightedMean(np.array([d[0] for d in YC2s_cluster]),
                                     np.array([d[1] for d in YC2s_cluster]))

        # Return NaN if YC2s did not find a cluster
        if YC2s_WM[0] == 0.0:
            YC2s.append([np.nan, np.nan, np.nan, np.nan])
        else:
            YC2s.append(
                [YC2s_WM[0], YC2s_WM[1], YC2s_WM[2],
                 len(YC2s_cluster)])

    return YC2s
Ejemplo n.º 4
0
def tauMethod(ages, errors, min_cluster_size=3, thres=0.01, minDist=1, xdif=1, chartOutput = False, x1=0, x2=4000):
    """
    Calculates the tau parameter, which is the mean weighted average of analyses that fall between probability minima (troughs) of a PDP plot (after Barbeau et al. (2009): EPSL)

    Parameters
    ----------
    ages : a 2-D array of ages, len(ages)=number of samples or sample groups
    errors : a 2-D array of 1-sigma errors for each sample or sample group, len(errors)=number of samples or sample groups
    min_cluster_size : (optional) the minimum number of analyses to calculate mean weighted average (default = 3)
    thres : (optional) threshold of what constitues a peak (from 0 to 1). Default = 0.01
    minDist : (optional) minimum distance (Myr) between adjacent peaks. Default = 1
    xdif : (optional) bin size to compute PDP (default = 1 Ma)
    chartOutput : (optional) set to True to create plots
    x1 : (optional) minimum x-axis value (default = 0 Ma)
    x2 : (optional) maximum x-axis value (default = 4000 Ma)

    Returns
    -------
    tauMethod : [the weighted mean age in Ma, the 2-sigma uncertainty of the weighted mean age, the MSWD of the weighted mean age, and the number of analyses included in the weighted mean age]

    """

    import peakutils

    # Check to see if ages is a list of arrays or just a single list of ages
    if not hasattr(ages[0], '__len__'):
        ages = [ages]
        errors = [errors]

    # Calculate the PDP - note that a small xdif may be desired for increased precision
    PDP_age, PDP = dFunc.PDPcalcAges(ages, errors, xdif)

    tauMethod = []
    for i in range(len(ages)):  

        # Calculate peak indexes
        peakIndexes = list(peakutils.indexes(PDP[i], thres=thres, min_dist=minDist))
        # Peak ages
        peakAges = PDP_age[peakIndexes]
        # Number of grains per peak
        peakAgeGrain = dFunc.peakAgesGrains([peakAges], [ages[i]], [errors[i]])[0]

        # Calculate trough indexes
        troughIndexes = list(peakutils.indexes(PDP[i]*-1, thres=thres, min_dist=minDist))
        # Trough ages
        troughAges = [0] + list(PDP_age[troughIndexes]) + [4500] # Append a 0 because there is no trough on the young size of the youngest peak and no trough on the old side of the oldest peak

        # Zip peak ages and grains per peak
        peakAgesGrains = list(zip(peakAges, peakAgeGrain))
        # Filter out peaks with less than min_cluster_size grains (default is 3, following Barbeau et al., 2009: EPSL)
        peakAgesGrainsFiltered = list(filter(lambda x: x[1] >= min_cluster_size, peakAgesGrains))

        # Stop the loop if no peaks are present with the min_cluster_size
        if peakAgesGrainsFiltered == []:
            tauMethod.append([np.nan, np.nan, np.nan, np.nan])
            continue

        # Select the nearest trough that is younger than the youngest peak with at least min_cluster_size analyses
        troughYoung = np.max(list(filter(lambda x: x < peakAgesGrainsFiltered[0][0], troughAges)))

        # Select the nearest trough that is older than the youngest peak with at least min_cluster_size analyses
        troughOld = np.min(list(filter(lambda x: x > peakAgesGrainsFiltered[0][0], troughAges)))

        # Select ages and errors that fall between troughYoung and troughOld
        ages_errors1s = list(zip(ages[i], errors[i]))
        ages_errors1s_filtered = list(filter(lambda x: x[0] < troughOld and x[0] > troughYoung, ages_errors1s))

        tauMethod_WM, tauMethod_WM_err2s, tauMethod_WM_MSWD = dFunc.weightedMean(np.array([d[0] for d in ages_errors1s_filtered]), np.array([d[1] for d in ages_errors1s_filtered]))

        tauMethod.append([tauMethod_WM, tauMethod_WM_err2s, tauMethod_WM_MSWD, len(ages_errors1s_filtered)])

        if chartOutput:
            fig, ax = plt.subplots(1)
            # Creates a plot output to check results
            ax.plot(PDP_age, PDP[i])
            ax.plot(PDP_age[peakIndexes], PDP[i][peakIndexes],'o')
            ax.plot(PDP_age[troughIndexes], PDP[i][troughIndexes],'o')
            ax.plot(tauMethod_WM,0,'s')
            ax.plot(ages_errors1s_filtered,np.zeros_like(ages_errors1s_filtered),'s')
            #ax.plot(tauMethod_WM-tauMethod_WM_err2s,0,'s')     
            ax.set_xlim(0,300)

    return tauMethod
Ejemplo n.º 5
0
def YSP(ages, errors, min_cluster_size=2, MSWD_threshold=1):
    """
    Calculates the youngest statistical population after Coutts et al. (2019): Geoscience Frontiers. The YSP is the weighted average of the youngest group of 2 or more analyses that have a MSWD close to the MSWD_threshold (default=1),
    where the the MSWD of the youngest two analyses is less than the MSWD_threshold. The algorithm first considers the youngest two analyses. If they have an MSWD < 1, then a third grain is added and so forth.
    The final analyses to be included in the weighted average is the one with the closest value to MSWD_threshold (default of 1).

    Parameters
    ----------
    ages : a 2-D array of ages, len(ages)=number of samples or sample groups
    errors : a 2-D array of 1-sigma errors for each sample or sample group, len(errors)=number of samples or sample groups
    min_cluster_size : (optional) the minimum number of analyses to calculate a MSWD from (default = 2)
    MSWD_threshold : (optional) the MSWD threshold from which to select analyses from

    Returns
    -------
    YSP : [the weighted mean age in Ma, the 2-sigma uncertainty of the weighted mean age, the MSWD of the weighted mean age, and the number of analyses included in the weighted mean age]

    """ 

    # Check to see if ages is a list of arrays or just a single list of ages
    if not hasattr(ages[0], '__len__'):
        ages = [ages]
        errors = [errors]   

    YSP = []
    for i in range(len(ages)): # One loop for each sample or sample group
        
        # Zip ages and errors and sort by age
        data_err1s_ageSort = list(zip(ages[i], errors[i]))
        data_err1s_ageSort.sort(key=lambda d: d[0]) # Sort based on age
        for j in range(len(data_err1s_ageSort)): # One loop for each analysis. Loop repeated if MSWD of the first pair is not <1.

            # Creat list of MSWD
            MSWD = []
            for k in range(len(data_err1s_ageSort)):
                MSWD.append(dFunc.weightedMean(np.array([d[0] for d in data_err1s_ageSort[:(k+2)]]), np.array([d[1] for d in data_err1s_ageSort[:(k+2)]]))[2])

            # Add MSWD to the ages & errors tuple   
            data_err1s_MSWD = []
            for k in range(len(data_err1s_ageSort)):
                if k == 0: # Assign the first age an MSWD of 0 (so it is always included in the MSWD)
                    data_err1s_MSWD.append((data_err1s_ageSort[k][0], data_err1s_ageSort[k][1], 0))
                else: # Assign analyses the MSWD of the previos analysis, such that the filtering returns the correct analyses
                    data_err1s_MSWD.append((data_err1s_ageSort[k][0], data_err1s_ageSort[k][1], MSWD[k-1]))

            # Need to exit the algorithm if no YSP is found
            if j == len(ages[i])-1:
                YSP.append([float('nan'), float('nan'), float('nan'), float('nan')])
                break

            # Find the index of the analysis with an MSWD closest to 1
            idx = (np.abs(np.array([d[2] for d in data_err1s_MSWD][1:])-MSWD_threshold)).argmin()+1 # Need to add 1 because we excluded the first one that had an assigned MSWD of 0

            # Filter analyses beyond the one which has a MSWD closest to MSWD_threshold
            agesFiltered = data_err1s_MSWD[0:idx+1]

            YSP_WM, YSP_WM_err2s, YSP_WM_MSWD = dFunc.weightedMean(np.array([d[0] for d in agesFiltered]), np.array([d[1] for d in agesFiltered]))

            if agesFiltered[1][2] < 1: # The first one is excluded because the MSWD is made to be 0. The second youngest analysis must have a MSWD < 1 to proceed
                YSP.append([YSP_WM, YSP_WM_err2s, YSP_WM_MSWD, len(agesFiltered)])
                break
            else:
                del data_err1s_ageSort[0] # Delete the first analysis, which was no use at all, and try again

    return YSP