Esempio n. 1
0
def fft_psd(
    fft_data,
    recharge,
    threshold=1,
    aquifer_thickness=None,
    aquifer_length=None,
    distance_to_river=None,
    path_to_project="no_path_given",
    single_file="no_path_given",
    method="scipyffthalf",
    fit=False,
    savefig=False,
    saveoutput=True,
    dupuit=False,
    a_l=None,
    t_l=None,
    a_d=None,
    t_d=None,
    weights_l=[1, 1, 1, 1, 1, 1],
    weights_d=[1, 1, 1, 1, 1, 1],
    o_i="oi",
    time_step_size=None,
    windows=10,
    wiener_window=100,
    obs_point="no_obs_given",
    comment="",
):

    print(
        "LAST ENTRY OF HEAD and RECHARGE TIME SERIES WAS SET EQUAL TO PREVIOUS ONE DUE TO UNREASONABLE RESULTS"
    )
    fft_data[-1] = fft_data[-2]
    recharge[-1] = recharge[-2]

    o_i_txt = ""
    threshold_txt = ""
    fit_txt = ""

    len_input = len(recharge)
    len_output = len(fft_data)

    # check if recharge and fft_data have the same length and erase values in the end
    if len(recharge) > len(fft_data):
        print(
            "The length of your input data is bigger than the length of you output data. Equalizing by deleting last entries from output data."
        )
        recharge = recharge[:len(fft_data)]
    elif len(recharge) < len(fft_data):
        print(
            "The length of your output data is bigger than the length of you input data. Equalizing by deleting last entries from input data."
        )
        fft_data = fft_data[:len(recharge)]

    # define the sampling frequency/time step
    # -------------------------------------------------------------------------
    sampling_frequency = 1.0 / time_step_size  # [Hz] second: 1, day: 1.1574074074074E-5

    # detrend input and output signal
    # -------------------------------------------------------------------------
    recharge_detrend = signal.detrend(recharge, type="linear")
    fft_data_detrend = signal.detrend(fft_data, type="linear")

    # different methodologies for power spectral density
    # -------------------------------------------------------------------------

    if method == "scipyfftnormt":
        # =========================================================================
        # method x: Periodogram: Power Spectral Density: abs(X(w))^2 / T
        #           http://staff.utia.cas.cz/barunik/files/QFII/04%20-%20Seminar/04-qf.html
        # =========================================================================
        power_spectrum_input = (abs(
            fftpack.fft(recharge_detrend)[:len(fft_data_detrend) / 2])**
                                2) / (len(fft_data) * time_step_size)
        power_spectrum_output = (abs(
            fftpack.fft(fft_data_detrend)[:len(fft_data_detrend) / 2])**
                                 2) / (len(fft_data) * time_step_size)
        power_spectrum_result = power_spectrum_output / power_spectrum_input
        frequency_input = abs(
            fftpack.fftfreq(len(fft_data_detrend),
                            time_step_size))[:len(fft_data_detrend) / 2]
        if o_i == "i":
            power_spectrum_result = power_spectrum_input
        elif o_i == "o":
            power_spectrum_result = power_spectrum_output

    if method == "scipyfftnormn":
        # =========================================================================
        # method x: Periodogram: Power Spectral Density: abs(X(w))^2 / N
        #           http://staff.utia.cas.cz/barunik/files/QFII/04%20-%20Seminar/04-qf.html
        # =========================================================================
        power_spectrum_input = (abs(
            fftpack.fft(recharge_detrend)[:len(fft_data_detrend) / 2])**
                                2) / len(fft_data)
        power_spectrum_output = (abs(
            fftpack.fft(fft_data_detrend)[:len(fft_data_detrend) / 2])**
                                 2) / len(fft_data)
        power_spectrum_result = power_spectrum_output / power_spectrum_input
        frequency_input = abs(
            fftpack.fftfreq(len(fft_data_detrend),
                            time_step_size))[:len(fft_data_detrend) / 2]
        if o_i == "i":
            power_spectrum_result = power_spectrum_input
        elif o_i == "o":
            power_spectrum_result = power_spectrum_output

    if method == "scipyfftdouble":
        # =========================================================================
        # method x: Periodogram: Power Spectral Density: abs(X(w))^2
        #           http://staff.utia.cas.cz/barunik/files/QFII/04%20-%20Seminar/04-qf.html
        # =========================================================================
        power_spectrum_input = (abs(
            fftpack.fft(recharge_detrend)[:len(fft_data_detrend) / 2])**2) * 2
        power_spectrum_output = (abs(
            fftpack.fft(fft_data_detrend)[:len(fft_data_detrend) / 2])**2) * 2
        power_spectrum_result = power_spectrum_output / power_spectrum_input
        frequency_input = abs(
            fftpack.fftfreq(len(fft_data_detrend),
                            time_step_size))[:len(fft_data_detrend) / 2]
        if o_i == "i":
            power_spectrum_result = power_spectrum_input
        elif o_i == "o":
            power_spectrum_result = power_spectrum_output

    if method == "scipyrfft":
        # =========================================================================
        # method x: Periodogram: Power Spectral Density: abs(X(w))^2
        #           http://staff.utia.cas.cz/barunik/files/QFII/04%20-%20Seminar/04-qf.html
        # =========================================================================
        power_spectrum_input = (abs(fftpack.rfft(recharge_detrend,
                                                 len_input))**2)[1:]
        power_spectrum_result = power_spectrum_output / power_spectrum_input
        frequency_input = (abs(fftpack.rfftfreq(len_output,
                                                time_step_size)))[1:]

    if method == "scipyrffthalf":
        # =========================================================================
        # method x: Periodogram: Power Spectral Density: abs(X(w))^2
        #           http://staff.utia.cas.cz/barunik/files/QFII/04%20-%20Seminar/04-qf.html
        # =========================================================================
        power_spectrum_input = ((abs(fftpack.rfft(
            recharge_detrend, len_input))[:len_output / 2])**2)[1:]
        power_spectrum_output = ((abs(
            fftpack.rfft(fft_data_detrend,
                         len_output))[:len_output / 2])**2)[1:]
        power_spectrum_result = power_spectrum_output / power_spectrum_input
        frequency_input = (abs(fftpack.rfftfreq(
            len_output, time_step_size))[:len_output / 2])[1:]

    if method == "scipyfft":
        # =========================================================================
        # method x: Periodogram: Power Spectral Density: abs(X(w))^2
        #           http://staff.utia.cas.cz/barunik/files/QFII/04%20-%20Seminar/04-qf.html
        # =========================================================================
        power_spectrum_input = (abs(fftpack.fft(recharge_detrend,
                                                len_input))**2)[1:]
        power_spectrum_output = (abs(fftpack.fft(fft_data_detrend,
                                                 len_output))**2)[1:]
        power_spectrum_result = power_spectrum_output / power_spectrum_input
        frequency_input = (abs(fftpack.fftfreq(len_output,
                                               time_step_size)))[1:]

    if method == "scipyffthalf":
        # =========================================================================
        # method x: Periodogram: Power Spectral Density: abs(X(w))^2
        #           http://staff.utia.cas.cz/barunik/files/QFII/04%20-%20Seminar/04-qf.html
        # =========================================================================
        power_spectrum_input = (abs(
            fftpack.fft(recharge_detrend, len_input)[:len_output / 2])**2)[1:]
        power_spectrum_output = (abs(
            fftpack.fft(fft_data_detrend, len_output)[:len_output / 2])**2)[1:]
        power_spectrum_result = power_spectrum_output / power_spectrum_input
        frequency_input = (abs(fftpack.fftfreq(
            len_output, time_step_size))[:len_output / 2])[1:]

        if o_i == "i":
            power_spectrum_result = power_spectrum_input
            o_i_txt = "in_"
        elif o_i == "o":
            power_spectrum_result = power_spectrum_output
            o_i_txt = "out_"

    if method == "scipywelch":
        # =========================================================================
        # method x: scipy.signal.welch
        #           https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.welch.html#r145
        # =========================================================================
        frequency_input, power_spectrum_input = signal.welch(
            recharge_detrend,
            sampling_frequency,
            nperseg=16000,
            window="hamming")
        frequency_output, power_spectrum_output = signal.welch(
            fft_data_detrend,
            sampling_frequency,
            nperseg=16000,
            window="hamming")
        frequency_output = frequency_output[1:]
        frequency_input = frequency_input[1:]
        power_spectrum_result = (abs(
            (power_spectrum_output / power_spectrum_input))**2)[1:]
        if o_i == "i":
            power_spectrum_result = power_spectrum_input[1:]
        elif o_i == "o":
            power_spectrum_result = power_spectrum_output[1:]

    if method == "pyplotwelch":
        # =========================================================================
        # method x: Pyplot PSD by Welch
        #           https://matplotlib.org/api/_as_gen/matplotlib.pyplot.psd.html
        # =========================================================================
        power_spectrum_input, frequency_input = plt.psd(recharge_detrend,
                                                        Fs=sampling_frequency)
        power_spectrum_output, frequency_output = plt.psd(
            fft_data_detrend, Fs=sampling_frequency)
        # delete first value (which is 0) because it makes trouble with fitting
        frequency_output = frequency_output  # [1:]
        frequency_input = frequency_input  # [1:]
        power_spectrum_result = power_spectrum_output / power_spectrum_input  # [1:]
        if o_i == "i":
            power_spectrum_result = power_spectrum_input[1:]
        elif o_i == "o":
            power_spectrum_result = power_spectrum_output[1:]

    if method == "scipyperio":
        # =========================================================================
        # method x: Scipy.signal.periodogram
        #           https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.signal.periodogram.html
        # =========================================================================
        frequency_input, power_spectrum_input = signal.periodogram(
            recharge_detrend, fs=sampling_frequency)
        frequency_output, power_spectrum_output = signal.periodogram(
            fft_data_detrend, fs=sampling_frequency)
        frequency_output = frequency_output[1:]
        frequency_input = frequency_input[1:]
        power_spectrum_result = (power_spectrum_output /
                                 power_spectrum_input)[1:]
        if o_i == "i":
            power_spectrum_result = power_spectrum_input[1:]
        elif o_i == "o":
            power_spectrum_result = power_spectrum_output[1:]

    if method == "spectrumperio":
        # =========================================================================
        # method x: Spectrum.periodogram
        #           http://thomas-cokelaer.info/software/spectrum/html/user/ref_fourier.html#spectrum.periodogram.Periodogram
        # =========================================================================
        from spectrum import WelchPeriodogram

        power_spectrum_input, empty = WelchPeriodogram(recharge_detrend, 256)
        plt.close()
        frequency_input = power_spectrum_input[1]
        frequency_input = frequency_input[1:]
        power_spectrum_input = power_spectrum_input[0]
        power_spectrum_output, empty = WelchPeriodogram(fft_data_detrend, 256)
        plt.close()
        frequency_output = power_spectrum_output[1]
        frequency_output = frequency_output[1:]
        power_spectrum_output = power_spectrum_output[0]
        power_spectrum_result = (power_spectrum_output /
                                 power_spectrum_input)[1:]
        if o_i == "i":
            power_spectrum_result = power_spectrum_input[1:]
        elif o_i == "o":
            power_spectrum_result = power_spectrum_output[1:]
    """
    Further methods, not working or still under construction
    elif method == 'spectrum_sperio':    
        # =========================================================================
        # method x: Spectrum.speriodogram
        #           http://thomas-cokelaer.info/software/spectrum/html/user/ref_fourier.html#spectrum.periodogram.Periodogram
        # =========================================================================          
        from spectrum import speriodogram
        power_spectrum_input = speriodogram(recharge_detrend,
                                            detrend = False,
                                            sampling = sampling_frequency)
        power_spectrum_output = speriodogram(fft_data_recharge,
                                            detrend = False,
                                            sampling = sampling_frequency)
        power_spectrum_result = power_spectrum_output / power_spectrum_input

    elif method == 'correlation':    
        # =========================================================================
        # method x: CORRELOGRAMPSD.periodogram
        #           http://thomas-cokelaer.info/software/spectrum/html/user/ref_fourier.html#spectrum.periodogram.Periodogram
        # =========================================================================      
        from spectrum import CORRELOGRAMPSD
        tes = CORRELOGRAMPSD(recharge_detrend, recharge_detrend, lag=15)
        psd = tes[len(tes)/2:]
    """

    # delete values with small frequencies at given threshold
    i = 0
    for i, value in enumerate(frequency_input):
        if value > threshold:
            cutoff_index = i
            print("PSD was cut by threshold. Remaining data points: " +
                  str(cutoff_index))
            for j in range(i, len(frequency_input)):
                frequency_input = np.delete(frequency_input, i)
                power_spectrum_result = np.delete(power_spectrum_result, i)
            break

    # plot the resulting power spectrum
    # -------------------------------------------------------------------------
    fig = plt.figure(figsize=(16, 7))
    ax = fig.add_subplot(1, 1, 1)
    plt.subplots_adjust(left=None,
                        bottom=0.25,
                        right=None,
                        top=None,
                        wspace=None,
                        hspace=None)
    ax.set_xscale("log")
    ax.set_yscale("log")
    ax.set_xlabel("1/s")
    # ax.set_ylim(1e-3,1e6)
    # ax.plot(freq_month[ind],psd)
    ax.plot(frequency_input, power_spectrum_result, label="PSD")
    ax.set_title("Power Spectral Density for observation point " +
                 str(obs_point) + "\n" + "method: " + str(method))
    ax.grid(color="grey", linestyle="--", linewidth=0.5, which="both")

    # =========================================================================
    # Fit the power spectrum
    # =========================================================================

    if fit == True:

        # employ a filter on the spectrum to optimize the fit
        # ---------------------------------------------------------------------
        # method a: savgol
        # window_size = np.around((len(power_spectrum_result)/windows),0)
        # if window_size % 2 == 0:
        #    window_size = window_size + 1
        # elif window_size < 2:
        #    window_size = 2
        # power_spectrum_result_filtered = signal.savgol_filter(power_spectrum_result, window_size, 2)
        # method b: wiener
        # power_spectrum_result_filtered = signal.wiener(power_spectrum_result, wiener_window)
        power_spectrum_result_filtered = running_mean(power_spectrum_result,
                                                      10)
        # ax.plot(frequency_input[:len(power_spectrum_result_filtered)], power_spectrum_result_filtered, label='filtered PSD')

        # =====================================================================
        # linear model
        # =====================================================================
        # least squares automatic fit for linear aquifer model (Gelhar, 1993):
        # abs(H_h(w))**2 = 1 / (a**2 * ( 1 + ((t_l**2) * (w**2))))
        # ---------------------------------------------------------------------

        if a_l == None and t_l == None:
            # make an initial guess for a_l, and t_l
            initial_guess = np.array([1e-15, 40000])

            # generate a weighing array
            # ---------------------------------------------------------------------
            # based on dividing the data into segments
            sigma_l = []
            data_per_segment = len(power_spectrum_result) / len(weights_l)
            for weight_l in weights_l:
                sigma_l = np.append(sigma_l, np.full(data_per_segment,
                                                     weight_l))
            if len(power_spectrum_result) % len(weights_l) != 0:
                for residual in range(
                        len(power_spectrum_result) % len(weights_l)):
                    sigma_l = np.append(sigma_l, weights_l[-1])

            # define the function to fit (linear aquifer model):
            def linear_fit(w_l, a_l, t_l):
                return 1.0 / (a_l**2 * (1 + ((t_l**2) * (w_l**2))))  # method 1
                # return (1. / (a_l * ( 1 + ((t_l**2) * (w_l**2)))))                 # method 2
                # return (1. / (a_l**2 * ( 1 + ((t_l**2) * ((w_l/2./np.pi)**2)))))   # method 3
                # return (1. / (a_l * ( 1 + ((t_l**2) * ((w_l/2./np.pi)**2)))))      # method 4

            try:
                # perform the fit
                popt_l, pcov_l = optimization.curve_fit(
                    linear_fit,
                    frequency_input,
                    power_spectrum_result,
                    p0=initial_guess,
                    sigma=sigma_l,
                )
                # abs to avoid negative values from optimization
                t_l = abs(popt_l[1])
                a_l = abs(popt_l[0])
                t_l = t_l

                # Plot the linear fit model
                # ---------------------------------------------------------------------
                linear_model = []
                # fitting model for the linear reservoir (Gelhar, 1993)
                for i in range(0, len(frequency_input)):
                    line = 1.0 / (a_l**2 *
                                  (1 + ((t_l**2) *
                                        (frequency_input[i]**2))))  # method 1
                    # line = 1 / (a_l * ( 1 + ((t_l**2) * (frequency_input[i]**2))))                  # method 2
                    # line = 1 / (a_l**2 * ( 1 + ((t_l**2) * ((frequency_input[i]/2./np.pi)**2))))    # method 3
                    # line = 1 / (a_l * ( 1 + ((t_l**2) * ((frequency_input[i]/2./np.pi)**2))))       # method 4
                    linear_model.append(line)
                ax.plot(frequency_input, linear_model, label="linear model")

                # calculate aquifer parameters
                # ---------------------------------------------------------------------
                T_l = a_l * aquifer_length**2 / 3.0
                kf_l = T_l / aquifer_thickness
                S_l = a_l * t_l
                Ss_l = S_l / aquifer_thickness
                D_l = aquifer_length**2 / (3.0 * t_l)
                # D_l = aquifer_length**2 * 4 / (np.pi**2 * t_l)
                print("T_l = ", a_l, "*", aquifer_length, "**2 / 3.")
                print("'T_l = ', a_l, '*', aquifer_length, '**2 / 3.'")
                print("kf_l = ", T_l, "/", aquifer_thickness)
                print("'kf_l = ', T_l, '/', aquifer_thickness")
                print("S_l = ", a_l, "*", t_l)
                print("'S_l = ', a_l, '*', t_l")
                print("Ss_l = ", S_l, "/", aquifer_thickness)
                print("'Ss_l = ', S_l, '/', aquifer_thickness")
                print("D_l = ", aquifer_length, "**2 / (3 * ", t_l, ")")
                print("'D_l = ', aquifer_length, '**2 / (3 * ', t_l,')'")
                output_l = ("Linear model:\n " + "T [m2/s]: " + "%0.4e" % T_l +
                            "\n  " + "Ss [1/m]: " + "%0.4e" % Ss_l + "\n  " +
                            "kf [m/s]: " + "%0.4e" % kf_l + "\n  " +
                            "D [m2/s]: " + "%0.4e" % D_l + "\n  " + "a: " +
                            "%0.4e" % a_l + "\n  " + "t_c [s]: " +
                            "%0.4e" % t_l)
                print(output_l)
                fig_txt = tw.fill(output_l, width=250)
            except RuntimeError:

                print(
                    "Automatic linear model fit failed... Provide a_l and t_l manually."
                )
                # calculate aquifer parameters
                # ---------------------------------------------------------------------
                T_l = np.nan
                kf_l = np.nan
                S_l = np.nan
                Ss_l = np.nan
                D_l = np.nan
                t_l = np.nan
                a_l = np.nan
                # D_l = aquifer_length**2 * 4 / (np.pi**2 * t_l)
                output_l = ""
                print(output_l)
        else:
            # Plot the linear fit model
            # ---------------------------------------------------------------------
            linear_model = []
            # fitting model for the linear reservoir (Gelhar, 1993)
            for i in range(0, len(frequency_input)):
                line = 1.0 / (a_l**2 * (1 +
                                        ((t_l**2) *
                                         (frequency_input[i]**2))))  # method 1
                # line = 1 / (a_l * ( 1 + ((t_l**2) * (frequency_input[i]**2))))                  # method 2
                # line = 1 / (a_l**2 * ( 1 + ((t_l**2) * ((frequency_input[i]/2./np.pi)**2))))    # method 3
                # line = 1 / (a_l * ( 1 + ((t_l**2) * ((frequency_input[i]/2./np.pi)**2))))       # method 4
                linear_model.append(line)
            ax.plot(frequency_input, linear_model, label="linear model")

            # calculate aquifer parameters
            # ---------------------------------------------------------------------
            T_l = a_l * aquifer_length**2 / 3.0
            kf_l = T_l / aquifer_thickness
            S_l = a_l * t_l
            Ss_l = S_l / aquifer_thickness
            D_l = aquifer_length**2 / (3 * t_l)
            # D_l = aquifer_length**2 * 4 / (np.pi**2 * t_l)
            print("T_l = ", a_l, "*", aquifer_length, "**2 / 3.")
            print("'T_l = ', a_l, '*', aquifer_length, '**2 / 3.'")
            print("kf_l = ", T_l, "/", aquifer_thickness)
            print("'kf_l = ', T_l, '/', aquifer_thickness")
            print("S_l = ", a_l, "*", t_l)
            print("'S_l = ', a_l, '*', t_l")
            print("Ss_l = ", S_l, "/", aquifer_thickness)
            print("'Ss_l = ', S_l, '/', aquifer_thickness")
            print("D_l = ", aquifer_length, "**2 / (3 * ", t_l, ")")
            print("'D_l = ', aquifer_length, '**2 / (3 * ', t_l,')'")
            output_l = ("Linear model:\n " + "T [m2/s]: " + "%0.4e" % T_l +
                        "\n  " + "Ss [1/m]: " + "%0.4e" % Ss_l + "\n  " +
                        "kf [m/s]: " + "%0.4e" % kf_l + "\n  " + "D [m2/s]: " +
                        "%0.4e" % D_l + "\n  " + "a: " + "%0.4e" % a_l +
                        "\n  " + "t_c [s]: " + "%0.4e" % t_l)
            print(output_l)
            fig_txt = tw.fill(output_l, width=250)

        # =====================================================================
        # Dupuit Model
        # =====================================================================
        # Step 5: least squares automatic fit for Dupuit-Aquifer model
        # (e.g. Gelhar and Wilson, 1974):
        # abs(H_h(w))**2 = (b/E)**2 * ( (1/O)*tanh)((1+j)*sqrt(1/2*O))*tanh((1-j)*sqrt(1/2*O))
        # O = td * w
        # E = x - x_o    distance from river
        # ---------------------------------------------------------------------
        if a_d == None and t_d == None and dupuit == True:
            # make an initial guess for a_l, and t_l
            initial_guess = np.array([0.98e-15, 2000000])

            # generate a weighing array
            # ---------------------------------------------------------------------
            # based on dividing the data into segments
            sigma_d = []
            # weights = [1,1,1] # give the weights for each segment, amount of values specifies the amount of segments
            data_per_segment = len(power_spectrum_result) / len(weights_d)
            for weight_d in weights_d:
                sigma_d = np.append(sigma_d, np.full(data_per_segment,
                                                     weight_d))
            if len(power_spectrum_result) % len(weights_d) != 0:
                for residual in range(
                        len(power_spectrum_result) % len(weights_d)):
                    sigma_d = np.append(sigma_d, weights_d[-1])

            # define the function to fit (linear aquifer model):
            def dupuit_fit(w_d, a_d, t_d):
                return (1.0 / a_d)**2 * ((1.0 / (t_d * w_d)) * np.tanh(
                    (1 + 1j) * np.sqrt(1.0 / 2 * t_d * w_d)).real * np.tanh(
                        (1 - 1j) * np.sqrt(1.0 / 2 * t_d * w_d)).real)

            # perform the fit
            popt_d, pcov_d = optimization.curve_fit(
                dupuit_fit,
                frequency_input,
                power_spectrum_result,
                p0=initial_guess,
                sigma=sigma_d,
            )
            # changed optimization from 2 to 1 variable to optimize
            # abs to avoid negative values
            # a_d = popt_d[0]
            # a_d = popt_d[0]
            a_d = popt_d[0]
            t_d = popt_d[1]

            # assign nan to alls parameters if duptui model is not used
        else:
            T_d, kf_d, Ss_d, D_d = np.nan, np.nan, np.nan, np.nan

        # Plot the Dupuit model
        # ---------------------------------------------------------------------
        try:
            dupuit_model = []
            # fitting model for the linear reservoir (Gelhar, 1993)
            for i in range(0, len(frequency_input)):
                line = ((1.0 / a_d)**2 * (
                    (1.0 / (t_d * frequency_input[i])) * np.tanh(
                        (1 + 1j) * np.sqrt(1.0 / 2 * t_d * frequency_input[i]))
                    * np.tanh(
                        (1 - 1j) *
                        np.sqrt(1.0 / 2 * t_d * frequency_input[i])))).real
                dupuit_model.append(line)
            ax.plot(frequency_input, dupuit_model, label="Dupuit model")

            # calculate aquifer parameters
            # ---------------------------------------------------------------------

            T_d = a_d * aquifer_thickness * distance_to_river
            kf_d = T_d / aquifer_thickness
            S_d = t_d * T_d / aquifer_length**2
            Ss_d = S_d / aquifer_thickness
            D_d = T_d / S_d
            print("T_d = ", a_d, "*", aquifer_thickness, "*",
                  distance_to_river)
            print(
                "'T_d = ', a_d, '*', aquifer_thickness, '*', distance_to_river"
            )
            print("kf_d = ", T_d, "/", aquifer_thickness)
            print("'kf_d = ', T_d, '/', aquifer_thickness")
            print("S_d = ", t_d, "*", T_d, "/", aquifer_length, "**2")
            print("'S_d = ', t_d, '*', T_d, '/', aquifer_length, '**2'")
            print("Ss_d = ", S_d, "/", aquifer_thickness)
            print("'Ss_d = ', S_d, '/', aquifer_thickness")
            print("D_d = ", T_d, "/", S_d)
            print("'D_d = ', T_d, '/', S_d")
            output_d = ("Dupuit model: \n" + "T [m2/s]: " + "%0.4e" % T_d +
                        "\n  " + "Ss [1/m]: " + "%0.4e" % Ss_d + "\n  " +
                        "kf [m/s]: " + "%0.4e" % kf_d + "\n  " + "D [m2/s]: " +
                        "%0.4e" % D_d + "\n  " + "a: " + "%0.4e" % a_d +
                        "\n  " + "t_c [s]: " + "%0.4e" % t_d)
            print(output_d)
            fig_txt = tw.fill(str(output_l) + "\n" + str(output_d), width=145)

        except TypeError:
            print(
                "Automatic Dupuit-model fit failed... Provide a_d and t_d manually."
            )
            T_d, kf_d, Ss_d, D_d = np.nan, np.nan, np.nan, np.nan
            fig_txt = tw.fill(str(output_l), width=200)

        # annotate the figure
        # fig_txt = tw.fill(tw.dedent(output), width=120)
        plt.figtext(
            0.5,
            0.05,
            fig_txt,
            horizontalalignment="center",
            bbox=dict(boxstyle="square",
                      facecolor="#F2F3F4",
                      ec="1",
                      pad=0.8,
                      alpha=1),
        )

    plt.legend(loc="best")
    # plt.show()
    if savefig == True:
        if fit == True:
            fit_txt = "fit_"
        if threshold != 1:
            threshold_txt = str(threshold) + "_"
        path_name_of_file_plot = (
            str(path_to_project) + "/" + str(comment) + "PSD_" + fit_txt +
            o_i_txt + threshold_txt + str(method) + "_" +
            str(os.path.basename(str(path_to_project)[:-1])) + "_" +
            str(obs_point) + ".png")
        print("Saving figure " + str(path_name_of_file_plot[-30:]))
        fig.savefig(path_name_of_file_plot)
    fig.clf()
    plt.close(fig)

    path_name_of_file_plot = (
        str(path_to_project) + "/" + str(comment) + "PSD_" + fit_txt +
        o_i_txt + threshold_txt + str(method) + "_" +
        str(os.path.basename(str(path_to_project)[:-1])) + "_" +
        str(obs_point) + ".png")
    if fit == False:
        T_l = np.nan
        kf_l = np.nan
        S_l = np.nan
        Ss_l = np.nan
        D_l = np.nan
        t_l = np.nan
        a_l = np.nan
        T_d = np.nan
        kf_d = np.nan
        S_d = np.nan
        Ss_d = np.nan
        D_d = np.nan
        t_d = np.nan
        a_d = np.nan

    if fit == True and saveoutput == True:
        with open(str(path_to_project) + "/PSD_output.txt", "a") as file:
            file.write(
                str(datetime.datetime.now()) + " " + method + " " + str(T_l) +
                " " + str(kf_l) + " " + str(Ss_l) + " " + str(D_l) + " " +
                str(a_l) + " " + str(t_l) + " " + str(T_d) + " " + str(kf_d) +
                " " + str(Ss_d) + " " + str(D_d) + " " + str(a_d) + " " +
                str(t_d) + " " + str(path_name_of_file_plot) + "\n")
        file.close()
    print(
        "###################################################################")
    return (
        T_l,
        kf_l,
        Ss_l,
        D_l,
        a_l,
        t_l,
        T_d,
        kf_d,
        Ss_d,
        D_d,
        a_d,
        t_d,
        power_spectrum_output,
    )
data2 = np.genfromtxt(file1)
data3 = np.genfromtxt(file1)
data4 = np.genfromtxt(file1)
data_tmp = np.zeros([data1.shape[0], 2])
data_tmp[:, 0] = data1[:, 1]
data_tmp[:, 1] = data2[:, 1]
# data_tmp[:,2] = data3[:,1]
# data_tmp[:,3] = data4[:,1]
data = np.mean(data_tmp, axis=1)
data_final = data1.copy()
volcanic_data = data_final


smoothing_years = 20.0

volcanic_data[:, 1] = running_mean.running_mean(data_final[:, 1], 36.0 * smoothing_years)
volcanic_data[:, 1] = np.log(volcanic_data[:, 1])
volcanic_data[:, 1][np.where(np.isnan(volcanic_data[:, 1]))] = scipy.stats.nanmean(volcanic_data[:, 1])
volcanic_data[:, 1][np.where(volcanic_data[:, 0] > 1950)] = scipy.stats.nanmean(volcanic_data[:, 1])

plt.close("all")
# fig0 = plt.figure(figsize=(6, 4), dpi=80)
# plt.plot(volcanic_data[:,0],volcanic_data[:,1],'r')
# plt.show(block=False)


# solar_data

file1 = "/home/ph290/data0/misc_data/last_millenium_solar/tsi_SBF_11yr.txt"
data1 = np.genfromtxt(file1, skip_header=4)
solar_year = data1[:, 0]
model_names = ['year']
for model in models:
	model_names.append(np.str(model)+' 26N')
	
for model in models:
	model_names.append(np.str(model)+' 45N')
	
model_names.append('mpi 26')
model_names.append('mpi 45')
	
import running_mean
plt.close('all')
plt.figure(figsize = (15,5),dpi = 75)
for i in [1,2,3,4,7]:
	plt.plot(output[:,0],running_mean.running_mean(output[:,i]/np.mean(output[:,i]),20),linewidth = 3,alpha = 0.5,label = model_names[i])
	
plt.plot(output[:,0],running_mean.running_mean(mean_srm_fun_26/np.mean(mean_srm_fun_26),20),'k',linewidth = 3,alpha = 0.5,label = 'mean')

leg = plt.legend()
leg.get_frame().set_alpha(0.5)

plt.show(block = False)



# plt.close('all')
# plt.plot(cmip5_year[0],running_mean.running_mean(cmip5_max_strmfun_26[0],20))
# plt.plot(model_years[-1],running_mean.running_mean(strm_fun_26[2]/4,20))
# plt.show(block = False)
###
#plot pmip3 surface temperature
###

mean_data = np.zeros([1+end_year-start_year,np.size(all_models)])

for i,model in enumerate(all_models):
	tmp = pmip3_tas[model]
	loc = np.where((np.logical_not(np.isnan(tmp))) & (pmip3_year_tas[model] <= end_year) & (pmip3_year_tas[model] >= start_year))
	tmp = tmp[loc]
	yrs = pmip3_year_tas[model][loc]
	data2 = scipy.signal.filtfilt(b, a, tmp)
	x = data2
	data3 = (x-np.min(x))/(np.max(x)-np.min(x))
	l1 = ax11.plot(yrs,rm.running_mean(data3,smoothing_val),'b',alpha = 0.1,linewidth=wdth,label = 'CMIP5/PMIP3 ensemble member')
	mean_data[:,i] = data3
	
mean_data2 = np.mean(mean_data, axis = 1)
l2 = ax11.plot(yrs,mean_data2,'b',linewidth=wdth,alpha=0.9,label = 'CMIP5/PMIP3 ensemble mean')
ax11.set_ylabel('Normalised atlantic temperature anomaly')

###
#plot Mann AMO
###

ax12 = ax11.twinx()
l3 = ax12.plot(amo_yr,amo_data,'k',linewidth=wdth,alpha=0.7,label = 'AMV index (Mann et al., 2009)')
ax12.set_ylim([-0.4,1.2])
ax12.set_ylabel('Normalised AMV index')
	cube = iris.load_cube(files)[:,0,:,:]
	try:
		loc = np.where((cube.coord('grid_latitude').points >= 30) & (cube.coord('grid_latitude').points <= 50))
		lat = cube.coord('grid_latitude').points[loc]
		sub_cube = cube.extract(iris.Constraint(grid_latitude = lat))
		stream_function_tmp = sub_cube.collapsed(['depth','grid_latitude'],iris.analysis.MAX)
	except:
		loc = np.where((cube.coord('latitude').points >= 30) & (cube.coord('latitude').points <= 50))
		lat = cube.coord('latitude').points[loc]	
		sub_cube = cube.extract(iris.Constraint(latitude = lat))
		stream_function_tmp = sub_cube.collapsed(['depth','latitude'],iris.analysis.MAX)
	coord = stream_function_tmp.coord('time')
	dt = coord.units.num2date(coord.points)
	year_tmp = np.array([coord.units.num2date(value).year for value in coord.points])
	#tmp = running_mean.running_mean(signal.detrend(stream_function_tmp.data/1.0e9),1)
	tmp = running_mean.running_mean(stream_function_tmp.data/1.0e9,smoothing_val)
	cmip5_max_strmfun.append(np.ma.masked_invalid(tmp))
	cmip5_year.append(np.ma.masked_invalid(year_tmp))
	
	try:
		file2 = '/media/usb_external1/cmip5/tas_regridded/'+model+'_tas_past1000_regridded.nc'
		cube2 = iris.load_cube(file2)
	except:
		file2 = '/media/usb_external1/cmip5/last1000/'+model+'_thetao_past1000_regridded.nc'
		cube2 = iris.load_cube(file2)
		cube2 = cube2.extract(iris.Constraint(depth = 5))
		
	lon_west = -20.0
	lon_east = -10
	lat_south = 67
	lat_north = 75.0 
Esempio n. 6
0
def test_running_mean(input_argument, expected_return):
    ret = list(running_mean(input_argument))
    assert ret == expected_return
iceland_e_europe_data[:,1] = reynolds_data
iceland_e_europe_data = np.mean(iceland_e_europe_data,axis=1)

colarado_canada_data = np.zeros([np.size(c_usa_data),2])
colarado_canada_data[:,0] = c_usa_data
colarado_canada_data[:,1] = east_canada_data
colarado_canada_data = np.mean(colarado_canada_data,axis = 1)

smoothing = 30
alph_val = 0.75

plt.close('all')

fig = plt.figure(figsize = [12,8])
ax1 = fig.add_subplot(411)
ax1.plot(e_europe_yr,rm.running_mean(e_europe_data,smoothing),'b',linewidth = 3,alpha=alph_val)
ax1.plot(reynolds_yr,rm.running_mean(reynolds_data,smoothing),'b--',linewidth = 3,alpha=alph_val)

ax2 = fig.add_subplot(412)
ax2.plot(c_usa_yr,rm.running_mean(c_usa_data,smoothing),'y',linewidth = 3,alpha=alph_val)
ax2.plot(east_canada_yr,rm.running_mean(east_canada_data,smoothing),'y--',linewidth = 3,alpha=alph_val)

ax3 = fig.add_subplot(413)
#ax3.plot(amo_yr,amo_data,'k',linewidth = 3,alpha=alph_val)
ax4 = ax3.twinx()
ax4.plot(e_europe_yr,rm.running_mean(iceland_e_europe_data,smoothing),'b',linewidth = 3,alpha=alph_val)
ax4.plot(east_canada_yr,rm.running_mean(colarado_canada_data,smoothing),'y',linewidth = 3,alpha=alph_val)

ax4 = fig.add_subplot(414)
ax4.plot(amo_yr,amo_data,'k',linewidth = 3,alpha=alph_val)
ax5 = ax4.twinx()
end_year = 1850
start_year = 850
expected_years = np.arange(850,1850)

mean_data = np.zeros([1+end_year-start_year,np.size(all_models)])
mean_data[:] = np.NAN

for i,model in enumerate(all_models):
	tmp = pmip3_tas[model]
	loc = np.where((np.logical_not(np.isnan(tmp))) & (pmip3_year_tas[model] <= end_year) & (pmip3_year_tas[model] >= start_year))
	tmp = tmp[loc]
	yrs = pmip3_year_tas[model][loc]
	data2 = high_pass( tmp,smoothing_window)
	x = data2
	data3 = (x-np.min(x))/(np.max(x)-np.min(x))
	l1 = ax11.plot(yrs,rm.running_mean(data3,smoothing_val),'b',alpha = 0.1,linewidth=wdth,label = 'CMIP5/PMIP3 ensemble member')
        for index,y in enumerate(expected_years):
            loc2 = np.where(yrs == y)
            if np.size(loc2) != 0:
                mean_data[index,i] = data3[loc2]
	

mean_data2 = np.mean(mean_data, axis = 1)
l2 = ax11.plot(yrs,mean_data2,'b',linewidth=wdth,alpha=0.9,label = 'CMIP5/PMIP3 ensemble mean')
ax11.set_ylabel('Normalised atlantic temperature anomaly')

###
#plot Mann AMO
###

ax12 = ax11.twinx()
		except:
			print 'nowt to remove'


	for model in models_unique:
		print model
		files = np.array(glob.glob('/media/usb_external1/cmip5/msftmyz/piControl/*'+model+'_*.nc'))
		cube = iris.load_cube(files)[:,0,:,:]
		loc = np.where(cube.coord('grid_latitude').points >= 26.0)[0]
		lat = cube.coord('grid_latitude').points[loc[0]]
		sub_cube = cube.extract(iris.Constraint(grid_latitude = lat))
		stream_function_tmp = sub_cube.collapsed('depth',iris.analysis.MAX)
		coord = stream_function_tmp.coord('time')
		dt = coord.units.num2date(coord.points)
		year_tmp = np.array([coord.units.num2date(value).year for value in coord.points])
		tmp = running_mean.running_mean(signal.detrend(stream_function_tmp.data/1.0e9),40)
		cmip5_max_strmfun.append(tmp[np.logical_not(np.isnan(tmp))])
		cmip5_year.append(year_tmp[np.logical_not(np.isnan(tmp))])

	cmip5_max_strmfun = np.array(cmip5_max_strmfun)
	cmip5_year = np.array(cmip5_year)


	#read in variable

	cmip5_max_variable_high = []
	cmip5_max_variable_low = []
	cmip5_max_variable_diff = []
	digital_high_low = np.empty([np.size(models_unique),180,360])

	for i,model in enumerate(models_unique):
mpl.rcdefaults()
mpl.rc('font', **font)

font_size = 14
font_weight = 'bold'

plt.close('all')
plt.figure()
ax1 = plt.subplot(1,1,1)
ax1.fill_between([2000,2012,2012,2000], [-100,-100,100,160], y2=0, where=None,alpha= 0.25,color='gray')
for i,model in enumerate(models_final):
    if not model == 'MRI-ESM1':
        try:
            yrs = model_spg_cube1_mean_acc[i].coord('year').points
            #plt.plot(yrs,running_mean.running_mean(model_spg_cube1_mean_acc[i].data,5),linewidth = 5,linestyle=linestyles[i],  color=colors[i],label=model,alpha= 0.5)
            y = running_mean.running_mean(model_spg_cube1_mean_acc[i].data,10)
            colour = 'g'
            if min(y[150:-31]) < min(y[-30:-1]): colour = 'b'
            if min(y[0:150]) < min(y[151:-1]): colour = 'r'
            ax1.plot(yrs,(y-scipy.stats.nanmean(y[0:20]))*-1.0,linewidth = 6,linestyle=linestyles[i],  color=colour,label=model,alpha= 0.4)
        except:
            print 'model: '+model+' failed'


results_box = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/rcp85_spg_box_model_qump_results_3.csv',delimiter = ',')
ax1.plot(results_box[:,0],(results_box[:,1]-results_box[0,1])*(44/12.0),'k',linewidth = 6,alpha= 0.4,linestyle = '-')
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
print 'Am I right in changing the units here????? I think I need to convert all of the box model stuff - or all o fthe other stuff to teh different units.......!!!!!!!!!!!!!!!'
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'

fig = plt.figure(figsize= (12,6))
fig.add_subplot(121) 
for i,data in enumerate(consolidated_lines):
    if np.size(data) > 0:
        if np.size(np.where(data <= -0.5)) == 0:
            if np.size(np.where(data[1::]-data[0:-1] > 0.5)) == 0:
                plt.plot(consolidated_years[i],data,'k',alpha = alpha_val,linewidth=lw)
                plt.xlabel('year')
                plt.ylabel('air-sea CO$_2$ flux anomaly (mol-C m$^{-2}$ yr$^{-1}$)')
                plt.title('Global flux')

for i,data in enumerate(consolidated_lines):
    if np.size(data) > 0:
        if np.size(np.where(data <= -0.5)) == 0:
            if np.size(np.where(data[1::]-data[0:-1] > 0.5)) == 0:
                plt.plot(consolidated_years[i],run_mean.running_mean(data-np.mean(data[0:20]),smoothing),alpha = 0.5,linewidth=lw)

fig.add_subplot(122)
for i,data in enumerate(consolidated_lines_spg):
    if np.size(data) > 0:
        #if np.size(np.where(data <= -0.5)) == 0:
        #if np.size(np.where(data[1::]-data[0:-1] > 0.5)) == 0:
        plt.plot(consolidated_years[i],data,'k',alpha = alpha_val,linewidth=lw)
        plt.xlabel('year')
        plt.title('Subpolar flux')
        #plt.ylabel('Subpolar Gyre air-sea CO$_2$ flux anomaly (mol-C m$^{-2}$ yr$^{-1}$)')

for i,data in enumerate(consolidated_lines_spg):
    #if np.size(data) > 0:
    #    if np.size(np.where(data <= -0.5)) == 0:
    #        if np.size(np.where(data[1::]-data[0:-1] > 0.5)) == 0:
###
#plotting
###

linestyles = ['-','--','-.']

smoothing_val = 10

plt.close('all')
fig = plt.figure(figsize = (6,20))

for i,model in enumerate(models):
        ax1 = fig.add_subplot(np.size(models),1,i+1)
        tmp = amo_box_tas[i].data
	tmp = running_mean.running_mean(tmp,smoothing_val)
        loc = np.where((np.logical_not(np.isnan(tmp))) & (model_years_tas[i] <= 1850))
        tmp = tmp[loc]
        ax1.plot(model_years_tas[i][loc],signal.detrend(tmp),'r',linewidth=2,alpha = 0.5,linestyle = linestyles[0])
        loc = np.where((amo_yr >= 850) & (amo_yr <= 1850))
        ax1.plot(amo_yr[loc],signal.detrend(amo_data[loc]),'k',linewidth=2,alpha = 0.5)
        ax2 = ax1.twinx()
        tmp = max_strm_fun[i]
	tmp = running_mean.running_mean(tmp,smoothing_val)
	loc = np.where((np.logical_not(np.isnan(tmp))) & (model_years[i] <= 1850))
        tmp = tmp[loc]
        ax2.plot(model_years[i][loc],signal.detrend(tmp),'b',linewidth=2,alpha = 0.5,linestyle = linestyles[0])
        ax3 = ax2.twinx()
        ax3.plot(voln_n[:,0],voln_n[:,1],'k',linewidth=2,alpha = 0.2)
	#ax3.plot(voln_s[:,0],voln_n[:,1],'b',linewidth=2,alpha = 0.2)
        ax3.set_ylim([0,0.8])

#AVERAGE TOGETHER!

smoothing_val = 5

all_years = np.linspace(850,1840,(1841-850))
average_tas = np.empty([np.size(all_years),np.size(models)-5])
average_tas[:] = np.NAN

counter = -1
for i,model in enumerate(models):
    if model not in ['xxxxxx']:
        counter += 1
        tmp = amo_box_tas[i].data
        tmp = rm.running_mean(tmp,smoothing_val)
        loc = np.where((np.logical_not(np.isnan(tmp))) & (model_years_tas[i] <= 1840) & (model_years_tas[i] >= 850))
        tmp = tmp[loc]
        yrs = model_years_tas[i][loc]
        data = signal.detrend(tmp)
        for j,yr in enumerate(all_years):
                try:
                        loc2 = np.where(yrs == yr)
                        average_tas[j,counter] = data[loc2]
                except:
                        None
        

	
average_tas2 = np.mean(average_tas,axis = 1)
mean_temperature = mean_density.copy()
mean_salinity = mean_density.copy()
temperature_meaned_density = mean_density.copy()
salinity_meaned_density = mean_density.copy()
precipitation = mean_density.copy()
psl_arctic = mean_density.copy()
psl_spg = mean_density.copy()
psl_diff = mean_density.copy()
evap = mean_density.copy()

years = range(min_yr,max_yr+1)
for i,model in enumerate(density_data.viewkeys()):
	tmp_yrs = density_data[model]['years']
	data1 = density_data[model]['density']
	data1 = scipy.signal.filtfilt(b, a, data1)
	data1 = rm.running_mean(data1,smoothing_val)
	x = data1
	x=(x-np.nanmin(x))/(np.nanmax(x)-np.nanmin(x))
	data1 = x
	data2 = density_data[model]['temperature']
	data2 = scipy.signal.filtfilt(b, a, data2)
	data2 = rm.running_mean(data2,smoothing_val)
	x = data2
	x=(x-np.nanmin(x))/(np.nanmax(x)-np.nanmin(x))
	data2 = x
	data3 = density_data[model]['salinity']
	data3 = scipy.signal.filtfilt(b, a, data3)
	data3 = rm.running_mean(data3,smoothing_val)
	x = data3
	x=(x-np.nanmin(x))/(np.nanmax(x)-np.nanmin(x))
	data3 = x
	plt_x = strmfun_common_yrs2[i]/x
	y = np.mean(gs_common_yrs[np.where(model_no == model_no[i])])
	plt_y = gs_common_yrs[i]/y
	plt.scatter(plt_x,plt_y,color = colours[model_no[i]])

plt.xlabel('normalised max. atlantic stream function')
plt.ylabel('normalised latitude of gulf stream')
plt.title('diff models diff colours')
plt.savefig('/home/ph290/Documents/figures/gulfstream_analysis/amoc_v_gulf_stream_rcp85.png')
#plt.show()
plt.close("all")

fig = plt.figure()
l = []
for i,tmp in enumerate(models_gs_lat):
    l_tmp = plt.plot(models_years[i],running_mean.running_mean(tmp,10),label=models[i])
    l += l_tmp

plt.xlabel('year')
plt.ylabel('NAC latitude')
plt.legend(loc =  'lower right')
plt.savefig('/home/ph290/Documents/figures/gulfstream_analysis/gulf_stream_rcp85.png')
plt.close("all")
#plt.show()

fig = plt.figure()
l = []
for i,tmp in enumerate(cmip5_max_strmfun):
    l_tmp = plt.plot(tmp,label=models[i])
    l += l_tmp
plt.gca().xaxis.set_major_locator(MaxNLocator(nbins=10))
plt.show(block = False)



colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k','b', 'g', 'r', 'c', 'm', 'y', 'k','b', 'g', 'r', 'c', 'm', 'y', 'k','b', 'g', 'r', 'c', 'm', 'y', 'k')

'''
NOTE - I'm just processing this more neaty for andy's analysis - i.e. no gap between hist and future.
This will be in /data/temp/ph290/andy_w_analysis/processed
'''


plt.figure()
for i,model in enumerate(models_final):
    if not model == 'MRI-ESM1':
        try:
            yrs = model_spg_cube1_mean_acc[i].coord('year').points
            plt.plot(yrs,running_mean.running_mean(model_spg_cube1_mean_acc[i].data,10),linewidth = 2,  color=colors[i],label=model)
            yrs = model_spg_cube2_mean_acc[i].coord('year').points
            plt.plot(yrs,running_mean.running_mean(model_spg_cube2_mean_acc[i].data,10),linewidth = 2,  color=colors[i])
        except:
            print 'model: '+model+' failed'

plt.legend(loc = 3,prop={'size':8})
plt.gca().xaxis.set_major_locator(MaxNLocator(nbins=10))
plt.xlim([1860,2100])
plt.title('SPG air-sea difference 10yr running mean')
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/cmip5_spg_smoothed.png')
#plt.show(block = False)
Esempio n. 17
0
def DP_simp(r, AA0, nstd, sm=5):
    import pandas as pd
    import numpy as np
    from running_mean import running_mean

    AA = running_mean(AA0, sm)  #[sm-1:]
    rr = r[(sm - 1) / 2:-(sm - 1) / 2]  #pd.rolling_mean(r,sm)[sm-1:]

    Er = AA - AA0[(sm - 1) / 2:-(sm - 1) / 2]  #[sm-1:]
    n = np.arange(0, np.size(AA))
    flag = np.zeros(shape=np.size(AA))
    flag[0] = 1
    flag[-1] = 1
    flag2 = np.zeros(shape=np.size(AA))
    flag2[0] = 1
    flag2[-1] = 1

    end = False
    nlevels = 0
    while end == False:

        nlevels = nlevels + 1

        if nlevels > np.size(r) * 2: end = True

        pix = np.squeeze(np.where(flag2 == 1))

        p1 = AA[pix[0]]
        p2 = AA[pix[1]]
        n1 = n[pix[0]]
        n2 = n[pix[1]]

        L = ((p2 - p1) / (n2 - n1)) * (n - n1) + p1

        d = abs(AA - L)
        d[0:n1 + 1] = 0
        d[n2:] = 0
        d[np.where(AA < 0)] = 0
        eps_mean = np.nanmean(Er[n1:n2 + 1])
        eps_std = nstd * np.nanstd(Er[n1:n2 + 1])
        eps = eps_mean + eps_std

        if ((np.size(d) > 1) & (np.max(d) > 0)):
            nm = np.where(d == np.max(d))
            dm = d[nm]

            if dm > eps:
                #print dm, eps, eps_mean,eps_std , nm
                flag[nm] = 1
                flag2[nm] = 1
            else:
                #print "menor que el umbral"
                for i in range(n1, n2):
                    flag2[i] = 1

                for i in range(np.size(flag) - 1):
                    if ((flag2[i] == 1) & (flag2[i + 1] == 1)):
                        flag2[i] = 2
                        #print "1"
        else:
            for i in range(np.size(flag) - 1):
                if ((flag2[i] == 1) & (flag2[i + 1] == 1)):
                    flag2[i] = 2
            #print "terminado"
        if flag2[-2] == 2: end = True
    return [
        rr[np.squeeze(np.where(flag == 1))],
        AA[np.squeeze(np.where(flag == 1))], rr, AA
    ]


#print nm,dm,eps,np.squeeze(np.where(flag2 == 1))
#pylab.plot(n,AA)
#pylab.scatter(n[np.squeeze(np.where(flag == 1))],AA[np.squeeze(np.where(flag == 1))])
#print flag2