コード例 #1
0
def best_fit_lines_Zijderveld(sc):
    """
    Function to calculate the direction of the Best - Fit lines for the Zijderveld, use the Free floating direction

    input: Free-floating direction (Mdec_free, Minc_free) and the center-of-mass vector (CMvec) to calculate the size of the plot
    output: The (x,y) coordinates for the horizontal (line_H_) and vertical (line_V_) lines for the Zijderveld diagram, for the two options, Up-North (UpN) or Up-West (UpW)
    """
    # input:    directional_statistics/ mean_dir_stat[CMvec, Mdec_free, Minc_free ]
    # output:   best_fit_lines/         best_fit_lines_Zijderveld[line_H_UpN, line_V_UpN, line_H_UpW, line_V_UpW]

    CMvec = sc["directional_statistics"]["mean_dir_stat"]["CMvec"]
    Mdec_free = sc["directional_statistics"]["mean_dir_stat"]["Mdec_free"]
    Minc_free = sc["directional_statistics"]["mean_dir_stat"]["Minc_free"]

    # first find in which order of magnitude the point for the lines should be, they should be outside the area of the zijderveld plot
    # find the maximum absolute value for the center of mass and take an order of magnitude bigger to be sure to outside of the plot
    Abs_CMvec = []
    for i in range(len(CMvec)):
        Abs_CMvec.append(abs(CMvec[i]))

    M = max(Abs_CMvec) * 10

    # get the direction vector with the correct magnitude M
    Dvec = helpers.dir2cart(Mdec_free, Minc_free, M)

    # Center of mass - Direction vector is the first point
    P1 = helpers.list_min_list(CMvec, Dvec)

    # Center of mass + Direction vector is the second point
    P2 = helpers.list_plus_list(CMvec, Dvec)

    # North Up
    line_H_UpN = [[P1[1], P1[0]], [P2[1], P2[0]]]
    line_V_UpN = [[P1[1], -1 * P1[2]], [P2[1], -1 * P2[2]]]

    # West Up
    line_H_UpW = [[P1[0], -1 * P1[1]], [P2[0], -1 * P2[1]]]
    line_V_UpW = [[P1[0], -1 * P1[2]], [P2[0], -1 * P2[2]]]

    sc["best_fit_lines"]["best_fit_lines_Zijderveld"][
        "line_H_UpN"] = line_H_UpN
    sc["best_fit_lines"]["best_fit_lines_Zijderveld"][
        "line_V_UpN"] = line_V_UpN

    sc["best_fit_lines"]["best_fit_lines_Zijderveld"][
        "line_H_UpW"] = line_H_UpW
    sc["best_fit_lines"]["best_fit_lines_Zijderveld"][
        "line_V_UpW"] = line_V_UpW
    return sc
コード例 #2
0
    def params_noCorr_corr(c_str, alpha, m0, m1, m2, m3, m4):
        Q_DB = []
        Q_DSC = []
        mu_ds = []
        H_max = []
        H_est = []
        Eps_alt = []
        Eps_alt_abs = []
        Err_alt = []
        Err_ds = []
        Err_total = []
        Err_alt_abs = []

        num_specimens = len(m0)
        for i in range(num_specimens):

            name = m0[i]["specimen"]  # specimen name is the same for m0-m4
            H_lab = m1[i][
                "lab_field"]  # the x-axis coordinate is the AF field of the m1-m4 steps

            # do the calculations
            m_m0 = m0[i]["total_m"]
            m_m1 = m1[i]["total_m"]
            m_m2 = m2[i]["total_m"]
            m_m3 = m3[i]["total_m"]
            m_m4 = m4[i]["total_m"]

            if (m_m2 == None) or (m_m3 == None) or (m_m4 == None):
                Q_DB.append([name, H_lab, (m_m1 - m_m0) / m_m0])
                Q_DSC.append([name, None, None])
                mu_ds.append([name, None])
                H_max.append([name, None])
                H_est.append([name, None])
                Eps_alt.append([name, None])
                Eps_alt_abs.append([name, None])
                Err_alt.append([name, None])
                Err_ds.append([name, None])
                Err_total.append([name, None])

            else:  # calculate also the Q_DSC ratio and all the parameters

                Q_DB.append([name, H_lab, (m_m1 - m_m0) / m_m0
                             ])  # no corrected version, only "normal version"

                #  check for the corrected version of un-corrected version for Q_DSC & prameter calculations
                if (c_str == "_corr"):
                    m0M = [m0[i]["x"], m0[i]["y"], m0[i]["z"]]
                    m1M = [m1[i]["x"], m1[i]["y"], m1[i]["z"]]
                    m2M = [m2[i]["x"], m2[i]["y"], m2[i]["z"]]
                    m3M = [m3[i]["x"], m3[i]["y"], m3[i]["z"]]
                    m4M = [m4[i]["x"], m4[i]["y"], m4[i]["z"]]

                    NRMrem = helpers.list_mult_num(
                        helpers.list_plus_list(m1M, m2M), 0.5)

                    m1pTRM = helpers.list_min_list(m1M, NRMrem)
                    m2pTRM = helpers.list_min_list(m2M, NRMrem)
                    m3pTRM = helpers.list_min_list(m3M, NRMrem)
                    m4pTRM = helpers.list_min_list(m4M, NRMrem)

                    m_m0 = m0[i]["total_m"]  # m_m0_corr
                    m_m1 = helpers.norm(NRMrem) + helpers.norm(
                        m1pTRM)  # m_m1_corr
                    m_m2 = helpers.norm(NRMrem) - helpers.norm(
                        m2pTRM)  # exception to the rule
                    m_m3 = helpers.norm(NRMrem) + helpers.norm(m3pTRM)
                    m_m4 = helpers.norm(NRMrem) + helpers.norm(m4pTRM)

                Q_DSC.append([
                    name, H_lab,
                    2 * ((1 + alpha) * m_m1 - m_m0 - alpha * m_m3) /
                    (2 * m_m0 - m_m1 - m_m2)
                ])
                mu_ds.append(
                    [name, (m_m1 - m_m3) / (m_m3 - 0.5 * (m_m1 + m_m2))])
                H_max.append(
                    [name, (2 * m_m0 - m_m1 - m_m2) / (m_m1 - m_m2) * H_lab])
                H_est.append([
                    name, (2 * m_m0 - m_m1 - m_m2) /
                    ((1 + 2 * alpha) * m_m1 - 2 * alpha * m_m3 - m_m2) * H_lab
                ])

                Eps = (m_m4 - m_m1) / m_m1

                Eps_alt.append([name, Eps])
                Eps_alt_abs.append([name, abs((m_m1 - m_m4) / m_m1)])

                # calculate the error estimates
                # nummerator & denominator
                num = 2 * ((1 + alpha) * m_m1 - m_m0 - alpha * m_m3)
                den = (2 * m_m0) - m_m1 - m_m2

                # partial derivatives of Q_DSC
                d_num_m1 = 2 * (1 + alpha)
                d_num_m2 = 0
                d_num_m3 = -2 * alpha

                d_den_m1 = -1
                d_den_m2 = -1
                d_den_m3 = 0

                # terms
                Term_1 = m_m1 * ((den * d_num_m1) -
                                 (num * d_den_m1)) / (den)**2
                Term_2 = m_m2 * ((den * d_num_m2) -
                                 (num * d_den_m2)) / (den)**2
                Term_3 = m_m3 * ((den * d_num_m3) -
                                 (num * d_den_m3)) / (den)**2

                dQ_DSC_alt = Eps**2 * (Term_1**2 + Term_2**2 + Term_3**2)

                dQ_DSC_ds = (((m_m3 - m_m1) / den)**2) / 3

                Err_alt.append([name, dQ_DSC_alt])
                Err_ds.append([name, dQ_DSC_ds])
                Err_total.append([name, dQ_DSC_alt + dQ_DSC_ds])

        return [
            Q_DB, Q_DSC, mu_ds, H_max, H_est, Eps_alt, Eps_alt_abs, Err_alt,
            Err_ds, Err_total
        ]
コード例 #3
0
    def specimen_fail_pass(Q_str, c_str, site, Boot_min, Boot_max):

        # split in measurements m0 m1 m2 m3 m4 with multiple specimens per list
        m0 = list(filter(lambda m: m['type'] == 0, site))
        m1 = list(filter(lambda m: m['type'] == 1, site))
        m2 = list(filter(lambda m: m['type'] == 2, site))
        m3 = list(filter(lambda m: m['type'] == 3, site))
        m4 = list(filter(lambda m: m['type'] == 4, site))

        # determine if specimen is above / below bootstrap interval and FAIL or PASS specimen

        num_specimens = len(m0)

        # calculate original Q_DB & Q_DSC (also corrected) for ALL specimens
        Hlab = []
        Q_DB = []
        Q_DSC = []
        B_specimen_pass_fail = []
        for i in range(num_specimens):
            # get labfield and name specimen
            Hlab = m1[i]["lab_field"]
            name = m0[i]["specimen"]  # specimen name is the same for m0-m4

            m_m0 = m0[i]["total_m"]
            m_m1 = m1[i]["total_m"]
            m_m2 = m2[i]["total_m"]
            m_m3 = m3[i]["total_m"]
            m_m4 = m4[i]["total_m"]

            if (m_m2 == None) or (m_m3 == None) or (m_m4 == None):
                Q_DB.append([name, Hlab, (m_m1 - m_m0) / m_m0])
                Q_DSC.append([name, None, None])
            else:
                # MSP-DSC calculate  Q_DB & Q_DSC (check for corr)

                Q_DB.append([name, Hlab, (m_m1 - m_m0) / m_m0])  # no Corr

                if (c_str == "_corr"):
                    m0M = [m0[i]["x"], m0[i]["y"], m0[i]["z"]]
                    m1M = [m1[i]["x"], m1[i]["y"], m1[i]["z"]]
                    m2M = [m2[i]["x"], m2[i]["y"], m2[i]["z"]]
                    m3M = [m3[i]["x"], m3[i]["y"], m3[i]["z"]]
                    m4M = [m4[i]["x"], m4[i]["y"], m4[i]["z"]]

                    NRMrem = helpers.list_mult_num(
                        helpers.list_plus_list(m1M, m2M), 0.5)

                    m1pTRM = helpers.list_min_list(m1M, NRMrem)
                    m2pTRM = helpers.list_min_list(m2M, NRMrem)
                    m3pTRM = helpers.list_min_list(m3M, NRMrem)
                    m4pTRM = helpers.list_min_list(m4M, NRMrem)

                    m_m0 = m0[i]["total_m"]  # m_m0_corr
                    m_m1 = helpers.norm(NRMrem) + helpers.norm(
                        m1pTRM)  # m_m1_corr
                    m_m2 = helpers.norm(NRMrem) - helpers.norm(
                        m2pTRM)  # exception to the rule
                    m_m3 = helpers.norm(NRMrem) + helpers.norm(m3pTRM)
                    m_m4 = helpers.norm(NRMrem) + helpers.norm(m4pTRM)

                Q_DSC.append([
                    name, Hlab,
                    2 * ((1 + alpha) * m_m1 - m_m0 - alpha * m_m3) /
                    (2 * m_m0 - m_m1 - m_m2)
                ])

        # loop over all specimens and check closest labfields from the Boot_min and Boot_max
        for i in range(num_specimens):
            Hsam = Q_DB[i][1]
            if (Q_str == "DB"):  # get the y for specimen for Q_DB
                ysam = Q_DB[i][2]
            elif (Q_str == "DSC"):  # get the y for specimen for Q_DSC
                ysam = Q_DSC[i][2]

            name = m0[i]["specimen"]  # specimen name is the same for m0-m4

            if ysam == None:
                B_specimen_pass_fail.append([name, "None"])
            else:
                ind_min = 999
                for j in range(len(Boot_min) - 1):
                    if (Boot_min[j][0] <= Hsam) and (Boot_min[j + 1][0] >
                                                     Hsam):
                        ind = j

                a_min = (Boot_min[ind + 1][1] - Boot_min[ind][1]) / (
                    Boot_min[ind + 1][0] - Boot_min[ind][0])
                ycalc_min = (Hsam -
                             Boot_min[ind][0]) * a_min + Boot_min[ind][1]

                a_max = (Boot_max[ind + 1][1] - Boot_max[ind][1]) / (
                    Boot_max[ind + 1][0] - Boot_max[ind][0])
                ycalc_max = (Hsam -
                             Boot_max[ind][0]) * a_max + Boot_max[ind][1]

                if (ysam > ycalc_max) or (ysam < ycalc_min):
                    B_specimen_pass_fail.append([name, "fail"])
                else:
                    B_specimen_pass_fail.append([name, "pass"])

        return [B_specimen_pass_fail]
コード例 #4
0
    def boostrap(Q_str, c_str, site, selection, alpha, NumCycles, Confidence):
        # split in measurements m0 m1 m2 m3 m4 with multiple specimens per list
        m0 = list(filter(lambda m: m['type'] == 0, selection))
        m1 = list(filter(lambda m: m['type'] == 1, selection))
        m2 = list(filter(lambda m: m['type'] == 2, selection))
        m3 = list(filter(lambda m: m['type'] == 3, selection))
        m4 = list(filter(lambda m: m['type'] == 4, selection))

        m1_all = list(filter(lambda m: m['type'] == 1, site))

        # get the steps for the labfield array, this is done by looking at all the data from one site and find the min and maximum used labfields.

        fields = []
        num_specimens = len(m1_all)  # all the data and not only the selection
        for j in range(num_specimens):
            fields.append(m1_all[j]["lab_field"])  # append all used labfields

        # find min and max labfield used and determine the step
        minField = min(fields)
        maxField = max(fields)
        numsteps = 11  # Moster et al., 2015 shows that 11 lab steps give the best results
        step = (minField + maxField) / (numsteps - 1.
                                        )  # This is (Hmin+Hmax)/10

        # append the step to a list of labfields -> Hlist
        Hlist = []
        for i in range(numsteps):
            Hlist.append(i * step)

        # set minimum standard deviation for Hlab
        stdevH_min = 10

        N2 = []
        stdevHl = []
        aa = []
        bb = []
        intercept = []

        H0 = []
        H1 = []
        H2 = []
        H3 = []
        H4 = []
        H5 = []
        H6 = []
        H7 = []
        H8 = []
        H9 = []
        H10 = []

        m = 0
        killCounter = 0
        while m < (NumCycles) and killCounter < (NumCycles * 5):

            Hlab_DB = []
            Hlab_DSC = []
            Q_DB_error = []
            Q_DSC_error = []

            num_specimens = len(m0)

            for j in range(num_specimens):  # get N times a random specimen

                # get the index of a random specimen
                i = int(helpers.rand_num() *
                        num_specimens)  # random number between 0 & N

                # get moment per random specimen
                m_m0 = m0[i]["total_m"]
                m_m1 = m1[i]["total_m"]

                # get corresponding error for that specimen, for Q_DB only m0 & m1
                e_m0 = m0[i]["error"]
                e_m1 = m1[i]["error"]

                # calculate new m0_err and m1_err to calculate new Q_DB_error
                frac_m0 = helpers.rand_num() * (0.02 * e_m0) + 1 - 0.01 * e_m0
                m0_err = frac_m0 * m_m0

                frac_m1 = helpers.rand_num() * (0.02 * e_m1) + 1 - 0.01 * e_m1
                m1_err = frac_m1 * m_m1

                Q_DB_error.append((m1_err - m0_err) / m0_err)
                Hlab_DB.append(m1[i]["lab_field"])

                if Q_str == "DSC":
                    if m2[i]["total_m"] != None:

                        m_m2 = m2[i]["total_m"]
                        m_m3 = m3[i]["total_m"]
                        m_m4 = m4[i]["total_m"]

                        e_m2 = m2[i]["error"]
                        e_m3 = m3[i]["error"]
                        e_m4 = m4[i]["error"]

                        # and check for the corrected version, if so replace the moments
                        if (c_str == "_corr"):
                            m0M = [m0[i]["x"], m0[i]["y"], m0[i]["z"]]
                            m1M = [m1[i]["x"], m1[i]["y"], m1[i]["z"]]
                            m2M = [m2[i]["x"], m2[i]["y"], m2[i]["z"]]
                            m3M = [m3[i]["x"], m3[i]["y"], m3[i]["z"]]
                            m4M = [m4[i]["x"], m4[i]["y"], m4[i]["z"]]

                            NRMrem = helpers.list_mult_num(
                                helpers.list_plus_list(m1M, m2M), 0.5)

                            m1pTRM = helpers.list_min_list(m1M, NRMrem)
                            m2pTRM = helpers.list_min_list(m2M, NRMrem)
                            m3pTRM = helpers.list_min_list(m3M, NRMrem)
                            m4pTRM = helpers.list_min_list(m4M, NRMrem)

                            m_m0 = m0[i]["total_m"]  # m_m0_corr
                            m_m1 = helpers.norm(NRMrem) + helpers.norm(
                                m1pTRM)  # m_m1_corr
                            m_m2 = helpers.norm(NRMrem) - helpers.norm(
                                m2pTRM)  # exception to the rule
                            m_m3 = helpers.norm(NRMrem) + helpers.norm(m3pTRM)
                            m_m4 = helpers.norm(NRMrem) + helpers.norm(m4pTRM)

                        frac_m0 = helpers.rand_num() * (0.02 *
                                                        e_m0) + 1 - 0.01 * e_m0
                        m0_err = frac_m0 * m_m0

                        frac_m1 = helpers.rand_num() * (0.02 *
                                                        e_m1) + 1 - 0.01 * e_m1
                        m1_err = frac_m1 * m_m1
                        frac_m2 = helpers.rand_num() * (0.02 *
                                                        e_m2) + 1 - 0.01 * e_m2
                        m2_err = frac_m2 * m_m2

                        frac_m3 = helpers.rand_num() * (0.02 *
                                                        e_m3) + 1 - 0.01 * e_m3
                        m3_err = frac_m3 * m_m3

                        Q_DSC_error.append(
                            2 *
                            ((1 + alpha) * m1_err - m0_err - alpha * m3_err) /
                            (2 * m0_err - m1_err - m2_err))
                        Hlab_DSC.append(m2[i]["lab_field"])

            if (Q_str == "DB"):
                Q_error = Q_DB_error
                Hlab = Hlab_DB
            elif (Q_str == "DSC"):
                Q_error = Q_DSC_error
                Hlab = Hlab_DSC

            N = len(Q_error)

            if N > 1:
                avgH = sum(Hlab) / N

                # calculate standard deviation on Hlab, and determine x and y
                stdevH1 = []
                x = []
                y = []
                for k in range(N):
                    stdevH1.append((Hlab[k] - avgH)**2)
                    x.append(Hlab[k])
                    y.append(Q_error[k])
                stdevH = math.sqrt(sum(stdevH1) / (N - 1))

                # calculate Sx, Sy, Sxx, Syy, Sxy
                Sx = sum(x)
                Sy = sum(y)
                Sxy = helpers.dot_product(x, y)
                Sxx = helpers.dot_product(x, x)

                # calculate linear fit is not all at the same Hlab
                if stdevH > stdevH_min:
                    b = (N * Sxy - Sx * Sy) / (N * Sxx - Sx**2)
                    a = Sy / N - b * Sx / N

                    PI = -1 * a / b

                    N2.append(N)
                    stdevHl.append(stdevH)
                    aa.append(a)
                    bb.append(b)
                    intercept.append(PI)

                    H0.append(a + b * Hlist[0])
                    H1.append(a + b * Hlist[1])
                    H2.append(a + b * Hlist[2])
                    H3.append(a + b * Hlist[3])
                    H4.append(a + b * Hlist[4])
                    H5.append(a + b * Hlist[5])
                    H6.append(a + b * Hlist[6])
                    H7.append(a + b * Hlist[7])
                    H8.append(a + b * Hlist[8])
                    H9.append(a + b * Hlist[9])
                    H10.append(a + b * Hlist[10])

                    # end of the big while loop, add one to m (this should be within the if statement)
                    m += 1
            killCounter += 1

        # sort columns and apply cut-off
        cutOffValue = 0.01 * (100 - Confidence) / 2
        cutOff = int(NumCycles * cutOffValue)

        H0.sort()
        H1.sort()
        H2.sort()
        H3.sort()
        H4.sort()
        H5.sort()
        H6.sort()
        H7.sort()
        H8.sort()
        H9.sort()
        H10.sort()

        Q_Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9, H10]

        # determine the average of the bootstrap over the 11 labfields
        # take the average of each of the labfield specified in Q_Hlist

        Boot_int_min = []
        Boot_int_max = []
        Boot_avg = []

        if len(Q_Hlist[0]) != 0:
            h = 0
            for el in Q_Hlist:
                Boot_avg.append([Hlist[h], sum(el) / len(el)])
                h += 1

            F = cutOff  # the minimum value F first
            L = m - cutOff - 1  # the maximum value L last ( -1 because python counts from 0)

            y_min = []
            y_max = []
            for w in range(len(Q_Hlist)):
                y_min.append(Q_Hlist[w][F])
                y_max.append(Q_Hlist[w][L])

            for w in range(len(Hlist)):
                Boot_int_min.append([Hlist[w], y_min[w]])
                Boot_int_max.append([Hlist[w], y_max[w]])

            # determine the x axis intercept for lower bound
            ind_min = 999
            for i in range(len(y_min) - 1):
                if (y_min[i] < 0) & (y_min[i + 1] > 0):
                    ind_min = i

            if ind_min == 999:
                ictLow = None
            else:
                slope_min = (y_min[ind_min + 1] - y_min[ind_min]) / (
                    Hlist[ind_min + 1] - Hlist[ind_min])
                ictLow = -1 * (y_min[ind_min] -
                               Hlist[ind_min] * slope_min) / slope_min

            # determine the x axis intercept for upper bound
            ind_max = 999
            for j in range(len(y_max) - 1):
                if (y_max[j] < 0) & (y_max[j + 1] > 0):
                    ind_max = j

            if ind_max == 999:
                ictHigh = None
            else:
                slope_max = (y_max[ind_max + 1] - y_max[ind_max]) / (
                    Hlist[ind_max + 1] - Hlist[ind_max])
                ictHigh = -1 * (y_max[ind_max] -
                                Hlist[ind_max] * slope_max) / slope_max

            # write corresponding PI min and max values, these are the intercepts of the bootstrap intervals
            PI_min = ictHigh
            PI_max = ictLow
        else:
            PI_min = None
            PI_max = None

        return [PI_min, PI_max, Boot_int_min, Boot_int_max, Boot_avg]
コード例 #5
0
def dpal_ptrm_check_stat(sc):
    """
    Function to calculate dpal statistic. A measure of cumulative alteration determined by the difference of the alteration corrected intensity estimate (Valet et al., 1996) and the uncorrected estimate, normalized by the uncorrected estimate (Leonhardt et al., 2004a).

    input: slope of the best-fit line, ptrm_checks, ptrm, y_nrm, yBar
    output: d_pal
    """
    # input:    preprocessed/       checks[ptrm_check]
    #           preprocessed/       basics[ptrm, y_nrm, yBar]
    #           arai_statistics/    PI_est[b_slope]
    # output:   check_statistics/   dpal_ptrm_check_stat[d_pal]

    ptrm_check = sc["preprocessed"]["checks"]["ptrm_check"]
    ptrm = sc["preprocessed"]["basics"]["ptrm"]
    y_nrm = sc["preprocessed"]["basics"]["y_nrm"]
    yBar = sc["preprocessed"]["basics"]["yBar"]
    b_slope = sc["arai_statistics"]["PI_est"]["b_slope"]

    d_pal = []
    dT = []

    # first check is you have ptrm_checks in the data, if not do nothing
    if len(ptrm_check) != 0:
        # first determine d_pTRM, which is the vector difference between ptrm - ptrmcheck for each step, if no ptrm check performed this is 0, 0, 0
        s = 0
        sprev = 0
        for T in ptrm:  # look trough the calculated ptrm gained specimen data
            for C in ptrm_check:
                if (T["step"] == C["step"]):
                    dT.append(
                        [T["x"] - C["x"], T["y"] - C["y"], T["z"] - C["z"]])
                    s += 1
            if s <= sprev:
                dT.append([0, 0, 0])
            sprev = s

        # make the cumulative sum vector C
        C = []
        Ci = [0, 0, 0]
        for i in range(len(ptrm)):
            Ci = helpers.list_plus_list(Ci, dT[i])
            C.append(Ci)

        ptrm_list = []
        # add C to TRM
        for T in ptrm:
            ptrm_list.append([T["x"], T["y"], T["z"]])

        TRM_star = []
        for j in range(len(ptrm_list)):
            TRM_star.append(helpers.list_plus_list(ptrm_list[j], C[j]))

        x_ptrm_star = []
        for i in range(len(TRM_star)):
            x_ptrm_star.append(helpers.norm(TRM_star[i]))

        # calculate the "new slope"
        # copy form sc_arai_statiscics
        n = len(x_ptrm_star)
        xBar_star = sum(x_ptrm_star) / len(x_ptrm_star)

        # Part (1) of b_slope equation
        sum_xy = 0
        for i in range(0, len(x_ptrm_star)):
            sum_xy += (x_ptrm_star[i] - xBar_star) * (y_nrm[i] - yBar)

        if sum_xy < 0:
            sign = -1
        elif sum_xy > 0:
            sign = 1
        else:
            sign = 0

        # part (2) of b_slope equation sumx en sumy
        sumx = 0
        sumy = 0
        for i in range(0, len(x_ptrm_star)):
            sumx += (x_ptrm_star[i] - xBar_star)**2
            sumy += (y_nrm[i] - yBar)**2

        # part(1) * part(2) gives b_slope
        b_slope_star = sign * math.sqrt(sumy / sumx)

        ## stop copy
        d_pal = abs((b_slope - b_slope_star) / b_slope) * 100

    sc["check_statistics"]["dpal_ptrm_check_stat"]["d_pal"] = d_pal
    return sc
コード例 #6
0
    def params_noCorr_corr(c_str, alpha, m0, m1, m2, m3, m4):
        Q_DB = []
        Q_DSC = []
        mu_ds = []
        H_max = []
        H_est = []
        Eps_alt = []
        Eps_alt_abs = []
        Err_alt = []
        Err_ds = []
        Err_total = []
        Err_alt_abs = []

        num_specimens = len(m0)
        for i in range(num_specimens):

            name = m0[i]["specimen"]  # specimen name is the same for m0-m4
            H_lab = m1[i][
                "lab_field"]  # the x-axis coordinate is the AF field of the m1-m4 steps

            # do the calculations
            m_m0 = m0[i]["total_m"]
            m_m1 = m1[i]["total_m"]
            m_m2 = m2[i]["total_m"]
            m_m3 = m3[i]["total_m"]
            m_m4 = m4[i]["total_m"]

            if (m_m2 == None) or (m_m3 == None) or (m_m4 == None):
                Q_DB.append([name, H_lab, (m_m1 - m_m0) / m_m0])
                Q_DSC.append([name, None, None])
                Eps_alt.append([name, None])

            else:  # calculate also the Q_DSC ratio and all the parameters
                Q_DB.append([name, H_lab, (m_m1 - m_m0) / m_m0
                             ])  # no corrected version, only "normal version"

                # first check for the corrected version of un-corrected version for Q_DSC & parameter calculations
                if (c_str == "_corr"):
                    # calculate corrections
                    m0M = [m0[i]["x"], m0[i]["y"], m0[i]["z"]]
                    m1M = [m1[i]["x"], m1[i]["y"], m1[i]["z"]]
                    m2M = [m2[i]["x"], m2[i]["y"], m2[i]["z"]]
                    m3M = [m3[i]["x"], m3[i]["y"], m3[i]["z"]]
                    m4M = [m4[i]["x"], m4[i]["y"], m4[i]["z"]]

                    NRMrem = helpers.list_mult_num(
                        helpers.list_plus_list(m1M, m2M), 0.5)

                    m1pTRM = helpers.list_min_list(m1M, NRMrem)
                    m2pTRM = helpers.list_min_list(m2M, NRMrem)
                    m3pTRM = helpers.list_min_list(m3M, NRMrem)
                    m4pTRM = helpers.list_min_list(m4M, NRMrem)

                    m_m0 = m0[i]["total_m"]  # m_m0_corr
                    m_m1 = helpers.norm(NRMrem) + helpers.norm(
                        m1pTRM)  # m_m1_corr
                    m_m2 = helpers.norm(NRMrem) - helpers.norm(
                        m2pTRM)  # exception to the rule
                    m_m3 = helpers.norm(NRMrem) + helpers.norm(m3pTRM)
                    m_m4 = helpers.norm(NRMrem) + helpers.norm(m4pTRM)

                Q_DSC.append([
                    name, H_lab,
                    2 * ((1 + alpha) * m_m1 - m_m0 - alpha * m_m3) /
                    (2 * m_m0 - m_m1 - m_m2)
                ])

                Eps = (m_m4 - m_m1) / m_m1
                Eps_alt.append([name, Eps])

        sc["MSP_Q_calc" + c_str]["Q_DB"] = Q_DB
        sc["MSP_Q_calc" + c_str]["Q_DSC"] = Q_DSC
        sc["MSP_Q_calc" + c_str]["Eps_alt"] = Eps_alt