def bearish_hidden_divergence(data):

    h = data["Close"]
    rsi = data["RSI"]

    dx = 1  #interval always fixed

    d_dx = FinDiff(0, dx, 1)  #first differntial
    d1 = d_dx(h)

    d2_dx = FinDiff(0, dx, 2)  #second differntial
    d2 = d2_dx(h)

    prices_max = get_extrema_y(False, d1, d2, h)
    prices_max_index = get_extrema_x(False, d1, d2, h)

    datetime_list = []

    if (h[len(h) - 2] == prices_max[len(prices_max) - 2]):
        current_index = len(h) - 2
        current_price = h[len(h) - 2]
        current_rsi = rsi[len(rsi) - 2]

        for i in range(0, len(prices_max) - 1):
            if (prices_max[i] < current_price
                    and current_rsi < rsi[prices_max_index[i]]):
                start = data["DateTime"][prices_min_index[i]]
                end = data["DateTime"][current_index]
                datetime_list.append([start, end])

    return (datetime_list)
Beispiel #2
0
    def setGrid(self, lb, ub, h):
        # Box
        self.lb = lb
        self.ub = ub
        self.h = h
        self.n_points = int((ub - lb) / h) + 1
        # Spatial grid [lb, lb+h, ..., ub]
        self.grid = np.linspace(lb, ub, self.n_points)

        # Derivative operators
        self.d_dx = FinDiff(0, self.h, 1, acc=4)
        self.d_d2x = FinDiff(0, self.h, 2, acc=4)

        # Tabulate rho and its derivatives
        self.tab_rho = self.rho(self.grid)
        self.d1_rho = self.d_dx(self.tab_rho)
        self.d2_rho = self.d_d2x(self.tab_rho)
        # Integration factor: 4 pi r^2 rho^2
        self.rho_r2 = 4. * np.pi * self.tab_rho * np.power(self.grid, 2)
        # C0,C1,C2
        self.C0, self.C1, self.C2 = self._getCFunctions()

        # Total n. variables
        self.n_variables = self.n_orbitals * self.n_points
        # N. constraints (density + orthonormality)
        self.n_constr = self.n_points + len(self.pairs)
Beispiel #3
0
 def test_matrix_representation_doesnt_work_for_order_greater_2_issue_24(
         self):
     x = np.zeros((10))
     d3_dx3 = FinDiff((0, 1, 3))
     mat = d3_dx3.matrix(x.shape)
     self.assertAlmostEqual(-2.5, mat[0, 0])
     self.assertAlmostEqual(-2.5, mat[1, 1])
     self.assertAlmostEqual(-0.5, mat[2, 0])
Beispiel #4
0
    def momentum(self):

        dx = 1  #1 day interval
        d_dx = FinDiff(0, dx, 1)
        d2_dx2 = FinDiff(0, dx, 2)
        clarr = np.asarray(self.df[self.col])
        mom = d_dx(clarr)
        momacc = d2_dx2(clarr)

        return mom, momacc
Beispiel #5
0
def gradinet_calc(z_grid, x_grid, y_grid, n):
    # cals abs for x and y compents of delrho
    dx = np.transpose(x_grid)[0][1] - np.transpose(x_grid)[0][0]
    dy = y_grid[0][1] - y_grid[0][0]

    dn_dxn = FinDiff(0, dx, n)
    dn_dyn = FinDiff(1, dy, n)

    dnz_dxn = dn_dxn(z_grid)
    dnz_dyn = dn_dyn(z_grid)
    return [dnz_dxn, dnz_dyn]
def leewave_data():
    """Session-wide fixture containing lee wave data with an overlaid moving eddy and mean flow.

    The eddy is advected by the mean flow hourly for two weeks.

    """

    # lee wave velocity dataset
    d = xr.open_dataset("test/data/lee_wave.nc")

    # mean flow velocity
    U = 0.2
    # eddy size
    es = 10e3

    # grid
    dx = 200
    dy = 1000
    dt = 3600
    x = np.arange(0, 350e3 + 1, dx)
    y = np.arange(0, 10e3 + 1, dy)
    t = np.arange(0, 2 * 7 * 24 * 3600 + 1, dt)
    T, Y, X = np.meshgrid(t, y, x, indexing="ij")

    # finite difference operators
    d_dx = FinDiff(2, dx)
    d_dy = FinDiff(1, dy)

    # eddy centre through advection
    xc = U * T
    # eddy field
    psit1 = 0.05 * es * np.exp(-((X - xc)**2 + (Y - 50e3)**2) / es**2)
    psit2 = 0.05 * es * np.exp(-((X - (xc + x[-1]))**2 +
                                 (Y - 50e3)**2) / es**2)
    psit = psit1 + psit2
    VM = -d_dx(psit)
    UM = d_dy(psit)

    Utot = U + UM + d.U.data[None, None, :]
    Vtot = VM + d.V.data[None, None, :]

    return xr.Dataset(
        {
            "U": (["t", "y", "x"], Utot.data),
            "V": (["t", "y", "x"], Vtot.data),
            "U_orig": (["x"], d.U.data),
            "V_orig": (["x"], d.V.data),
        },
        coords={
            "x": x,
            "y": y,
            "t": t
        },
    )
Beispiel #7
0
def calc_support_resistance(prices):
    dx = 1
    d_dx = FinDiff(0, dx, 1)
    d2_dx2 = FinDiff(0, dx, 2)
    clarr = np.asarray(prices)
    mom = d_dx(clarr)
    momacc = d2_dx2(clarr)

    minimaIdxs, maximaIdxs = get_extrema(prices, True, mom,
                                         momacc), get_extrema(
                                             prices, False, mom, momacc)
    return minimaIdxs, maximaIdxs
Beispiel #8
0
    def fit(self, data, _dt, poly_degree=2, cut_off=1e-3, deriv_acc=2):
        """
        :param data: dynamics data to be processed
        :param _dt: float, represents grid spacing
        :param poly_degree: degree of polynomials to be included in theta matrix
        :param cut_off: the threshold cutoff value for sparsity
        :param deriv_acc: (positive) integer, derivative accuracy
        :return: a SINDy model
        """
        if len(data.shape) == 1:
            data = data[np.newaxis, ]

        len_t = data.shape[-1]

        if len(data.shape) > 2:
            data = data.reshape((-1, len_t))
            print(
                "The array is converted to 2D automatically: in SINDy, "
                "each dimension except for the time (default the last dimension) "
                "are treated equally.")

        # compute time derivative
        d_dt = FinDiff(data.ndim - 1, _dt, 1, acc=deriv_acc)
        x_dot = d_dt(data).T

        # prepare for the library
        lib, self._desp = self.polynomial_expansion(data.T, degree=poly_degree)

        # sparse regression
        self._coef, _ = self.sparsify_dynamics(lib, x_dot, cut_off)

        return self
Beispiel #9
0
    def test_findiff_should_raise_exception_when_applied_to_unevaluated_function(
            self):
        def f(x, y):
            return 5 * x**2 - 5 * x + 10 * y**2 - 10 * y

        d_dx = FinDiff(1, 0.01)
        self.assertRaises(ValueError, lambda ff: d_dx(ff), f)
Beispiel #10
0
def compute_deriv_alphas(cosmo, BAO_only=False):

    from scipy.interpolate import RegularGridInterpolator

    order = 4
    nmu = 100
    dk = 0.0001
    mu = np.linspace(0.0, 1.0, nmu)

    pkarray = np.empty((2 * order + 1, len(cosmo.k)))
    for i in range(-order, order + 1):
        kinterp = cosmo.k + i * dk
        if BAO_only:
            pkarray[i + order] = splev(kinterp, cosmo.pk[0]) / splev(
                kinterp, cosmo.pksmooth[0])
        else:
            pkarray[i + order] = splev(kinterp, cosmo.pk[0])
    derPk = FinDiff(0, dk, acc=4)(pkarray)[order]
    derPalpha = [
        np.outer(derPk * cosmo.k, (mu**2 - 1.0)),
        -np.outer(derPk * cosmo.k, (mu**2))
    ]
    derPalpha_interp = [
        RegularGridInterpolator([cosmo.k, mu], derPalpha[i]) for i in range(2)
    ]

    return derPalpha_interp
Beispiel #11
0
    def fit(self, data, _dt, poly_degree=2, deriv_acc=2,
            lmbda=5e-2, max_iter=1000, tol=2e-3):
        """
        :param data: dynamics data to be processed
        :param dt: float, represents grid spacing
        :param poly_degree: degree of polynomials to be included in theta matrix
        :param deriv_acc: (positive) integer, derivative accuracy
        :param lmbda: threshold for doing adm
        :param max_iter: max iteration number for adm
        :param tol: tolerance for stopping criteria for adm
        :return: an implicit SINDy model
        """
        if len(data.shape) == 1:
            data = data[np.newaxis, ]

        if len(data.shape) == 2:
            num_of_var, len_t = data.shape

        if len(data.shape) > 2:
            len_t = data.shape[-1]
            data = data.reshape((-1, len_t))
            print("The array is converted to 2D automatically: in Implicit-SINDy, "
                  "each dimension except for the time (default the last dimension) "
                  "are treated equally.")
            num_of_var = data.shape[0]


        # compute time derivative
        d_dt = FinDiff(data.ndim - 1, _dt, 1, acc=deriv_acc)
        x_dot = d_dt(data).T

        # pre-process dxt
        for i in range(x_dot.shape[1]):
            x_dot[:, i] = ISINDy.smoothing(x_dot[:, i])

        # polynomial expansion of original data
        var_names = ['u%d' % i for i in np.arange(num_of_var)]

        extended_data, extended_desp = np.array(self.polynomial_expansion(data.T,
                                                                          degree=poly_degree,
                                                                          var_names=var_names))

        # set the descriptions
        self._desp = self.expand_descriptions(extended_desp, 'uk_{t}')

        # compute sparse coefficients

        self._coef = np.zeros((len(self._desp), num_of_var))

        for k in np.arange(num_of_var):
            # formulate theta1, theta2, theta3 ...
            theta_k = self.build_theta_matrix(extended_data, x_dot[:, k])
            # compute null spaces
            null_space_k = null_space(theta_k)
            # ADM
            self._coef[:, k] = ISINDy.adm_initvary(null_space_k, lmbda, max_iter, tol)
        return self
Beispiel #12
0
def get_started_plot_1():
    x = np.linspace(-np.pi, np.pi, 31)
    dx = x[1] - x[0]
    f = np.sin(x)
    d_dx = FinDiff(0, dx)
    df_dx = d_dx(f)

    plt.xlabel('x')
    plt.ylabel('f, df_dx')
    plt.plot(x, f, '-o', label='f=sin(x)')
    plt.plot(x, df_dx, '-o', label='df_dx')
    plt.grid()
    plt.legend()

    plt.savefig('get_started_plot_1.png')
Beispiel #13
0
    def fit_1d(self, acc):
        nx_list = [30, 100, 300, 1000]
        Lx = 10

        log_err_list = []
        log_dx_list = []

        for nx in nx_list:
            x = np.linspace(0, Lx, nx)
            dx = x[1] - x[0]
            f = np.sin(x)
            d_dx = FinDiff((0, dx), acc=acc)
            fx = d_dx(f)
            fxe = np.cos(x)
            err = np.max(np.abs(fxe - fx))
            log_dx_list.append(log(dx))
            log_err_list.append(log(err))

        fit = np.polyfit(log_dx_list, log_err_list, deg=1)
        return fit[0]
Beispiel #14
0
    def test_identity_2d(self):

        x = np.linspace(-1, 1, 100)
        y = np.linspace(-1, 1, 100)

        X, Y = np.meshgrid(x, y, indexing='ij')
        u = X**2 + Y**2
        identity = Identity()

        np.testing.assert_array_equal(u, identity(u))

        twice_id = Coefficient(2) * Identity()
        np.testing.assert_array_equal(2 * u, twice_id(u))

        x_id = Coefficient(X) * Identity()
        np.testing.assert_array_equal(X * u, x_id(u))

        dx = x[1] - x[0]
        d_dx = FinDiff(0, dx)
        linop = d_dx + 2 * Identity()
        np.testing.assert_array_almost_equal(2 * X + 2 * u, linop(u))
Beispiel #15
0
    def orders_to_op(order, _dx, acc):
        """
        :param order: orders of the derivative
        :param _dx: a float or a list of floats, grid spacings
        :param acc: a positive integer, accuracy of the derivatives
        :return:
        """
        if not isinstance(order, list):
            raise ValueError(
                "order argument must be a list of positive integers!")

        if isinstance(_dx, list):
            assert len(order) == len(
                _dx), "length of order and _dx are not the same!"
        elif isinstance(_dx, float):
            _dx = [_dx] * len(order)
        else:
            raise ValueError("dx must be a float or a list of floats, "
                             "specifying the grid spacing information!")

        args = [(int(i), _dx[i], order[i]) for i in np.arange(len(order))
                if order[i] != 0]
        return FinDiff(*args, acc=acc)
Beispiel #16
0
    def fit_2d(self, acc):
        nx_list = [10, 30, 100, 300]
        ny_list = [10, 30, 100, 300]
        Lx, Ly = 3, 3

        log_err_list = []
        log_dx_list = []

        for nx, ny in zip(nx_list, ny_list):
            x = np.linspace(0, Lx, nx)
            y = np.linspace(0, Ly, ny)
            dx, dy = x[1] - x[0], y[1] - y[0]
            X, Y = np.meshgrid(x, y, indexing='ij')
            f = np.sin(X) * np.sin(Y)
            d_dx = FinDiff((0, dx), acc=acc)
            fx = d_dx(f)
            fxe = np.cos(X) * np.sin(Y)
            err = np.max(np.abs(fxe - fx))
            log_dx_list.append(log(dx))
            log_err_list.append(log(err))

        fit = np.polyfit(log_dx_list, log_err_list, deg=1)
        return fit[0]
Beispiel #17
0
#list_of_indexs = [[20],[40],[60],[80]]

#list_of_indexs = [[60]]
list_of_indexs = big_data_set['idx']

if diff_check == True:
    id_no = 42
    driver_time = big_data_set['idx'][id_no][0]
    h_data = big_data_set['dfs'][id_no]['Height [Mm]'].dropna()
    t_data = big_data_set['dfs'][id_no]['time [s]'].dropna()
    time_stop_index = np.argmin(abs(t_data - driver_time))
    x = h_data.index.values
    dx = x[1] - x[0]

    d_dx = FinDiff(0, dx)
    d2_dx2 = FinDiff(0, dx, 2)

    dh_dx = d_dx(h_data)
    d2h_dx2 = d2_dx2(h_data)

    mean = d2h_dx2[:time_stop_index].mean()
    std = d2h_dx2[:time_stop_index].std()
    sigma = 1

    range_of_vales = [mean - sigma * std, mean + sigma * std]
    test = d2h_dx2 - mean
    step = np.hstack((np.ones(len(test)), -1 * np.ones(len(test))))
    dary_step = np.convolve(test, step, mode='valid')
    step_indx = np.argmax(dary_step)
Beispiel #18
0
    def fit(self,
            data,
            dt,
            _dx,
            poly_degree=2,
            space_deriv_order=2,
            cut_off=1e-3,
            deriv_acc=2,
            sample_rate=1.):
        """
        :param data: a numpy array or a dict of arrays, dynamics data to be processed
        :param dt: float, for temporal grid spacing
        :param _dx: float or list of floats, for spatial grid spacing
        :param poly_degree: degree of polynomials to be included in theta matrix
        :param space_deriv_order: maximum order of derivatives applied on spatial dimensions
        :param cut_off: the threshold cutoff value for sparsity
        :param deriv_acc: (positive) integer, derivative accuracy
        :param sample_rate: float, proportion of the data to use
        :return: a SINDyPDE model
        """
        if isinstance(data, np.ndarray):
            data = {'u': data}

        array_shape = data[list(data.keys())[0]].shape

        for i, k in enumerate(data.keys()):
            if len(data[k].shape) not in [2, 3, 4]:
                raise ValueError(
                    "SINDyPDE supports 2D, 3D and 4D arrays only, "
                    "with the last dimension be the time dimension.")
            if data[k].shape != array_shape:
                raise ValueError(
                    "The arrays that you provide should have the same shapes!")

        if not isinstance(dt, float):
            raise ValueError(
                "dt should of type float, specifying the temporal grids ...")

        if isinstance(_dx, list):
            if len(_dx) != len(array_shape) - 1:
                raise ValueError(
                    "The length of _dx does not match the shape of the array!")
        elif isinstance(_dx, float):
            _dx = [_dx] * (len(array_shape) - 1)
        else:
            raise ValueError(
                "_dx could either be float or a list of floats ...")

        # compute time derivative
        d_dt = FinDiff(len(array_shape) - 1, dt, 1, acc=deriv_acc)
        time_deriv = np.zeros((np.prod(array_shape), len(data.keys())))
        for i, k in enumerate(data.keys()):
            time_deriv[:, i] = d_dt(data[k]).flatten()
        print("Progress: finished computing time derivatives  ...")

        # compute spatial derivatives
        if space_deriv_order < 1 or not isinstance(space_deriv_order, int):
            raise ValueError(
                'Order of the spatial derivative should be a positive integer!'
            )

        space_deriv, space_deriv_desp = self.compute_spatial_derivatives(
            data, _dx, space_deriv_order, deriv_acc)
        print("Progress: finished computing spatial derivatives  ...")

        # prepare the library
        all_data, data_var_names = self.dict_to_2darray(data)
        extended_data, extended_data_desp = self.polynomial_expansion(
            all_data, degree=poly_degree, var_names=data_var_names)
        lib, self._desp = self.product_expansion(extended_data, space_deriv,
                                                 extended_data_desp,
                                                 space_deriv_desp)

        # sparse regression
        if not isinstance(sample_rate,
                          float) or sample_rate < 0 or sample_rate > 1:
            raise ValueError("sample rate must be a float number between 0-1!")
        idxs = np.random.choice(lib.shape[0],
                                int(sample_rate * lib.shape[0]),
                                replace=False)
        self._coef, _ = self.sparsify_dynamics(lib[idxs, :],
                                               time_deriv[idxs, :],
                                               cut_off,
                                               normalize=0)
        print("Progress: finished sparse regression  ...")

        return self
Beispiel #19
0
    def test_order_as_numpy_integer(self):

        order = np.ones(3, dtype=np.int32)[0]
        d_dx = FinDiff(0, 0.1, order)  # raised an AssertionError with the bug

        np.testing.assert_allclose(d_dx(np.linspace(0, 1, 11)), np.ones(11))
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from findiff import FinDiff

# Extracting Data for plotting
x = input("Enter  file name :")
x = x + ".csv"
data = pd.read_csv(x)
rows, columns = data.shape

h = np.array(data["Close"])  # h as a one dimensional function ie.f(h)

dx = 1  #interval always fixed

d_dx = FinDiff(0, dx, 1)  #first differntial
d1 = d_dx(h)

d2_dx = FinDiff(0, dx, 2)  #second differntial
d2 = d2_dx(h)


def get_extrema_x(isMin):
    return [
        x for x in range(len(d1)) if (d2[x] > 0 if isMin else d2[x] < 0) and (
            d1[x] == 0 or  #slope is 0
            (
                x != len(d1) - 1 and  #check next day
                (d1[x] > 0 and d1[x + 1] < 0 and h[x] >= h[x + 1]
                 or d1[x] < 0 and d1[x + 1] > 0 and h[x] <= h[x + 1])
                or x != 0 and  #check prior day
    def __init__(self,
                 density,
                 y,
                 orders,
                 density_interp,
                 std_n,
                 ls_n,
                 std_s,
                 ls_s,
                 ref_n,
                 ref_s,
                 breakdown,
                 err_y=0,
                 derivs=(0, 1, 2),
                 include_3bf=True,
                 verbose=False,
                 rho=None):
        self.density = density
        self.Density = Density = density[:, None]
        self.kf = None
        self.Kf = None

        self.density_interp = density_interp
        self.Density_interp = Density_interp = density_interp[:, None]
        self.kf_interp = None
        self.Kf_interp = None
        self.X_interp = Density_interp

        self.y = y
        self.N_interp = N_interp = len(density_interp)
        err_y = np.broadcast_to(err_y,
                                y.shape[0])  # Turn to vector if not already
        self.err_y = err_y
        self.Sigma_y = np.diag(err_y**2)  # Make a diagonal covariance matrix
        self.derivs = derivs

        self.gps_interp = {}
        self.gps_trunc = {}

        self._y_interp_all_derivs = {}
        self._cov_interp_all_derivs = {}
        self._y_interp_vecs = {}
        self._std_interp_vecs = {}
        self._cov_interp_blocks = {}

        self._dy_dn = {}
        self._d2y_dn2 = {}
        self._dy_dk = {}
        self._d2y_dk2 = {}
        self._y_dict = {}

        d_dn = FinDiff(0, density, 1)
        d2_dn2 = FinDiff(0, density, 2, acc=2)
        # d_dk = FinDiff(0, kf, 1)
        # d2_dk2 = FinDiff(0, kf, 2, acc=2)

        self._cov_total_all_derivs = {}
        self._cov_total_blocks = {}
        self._std_total_vecs = {}

        # The priors on the interpolator parameters
        self.mean0 = 0
        self.cov0 = 0
        self._best_max_orders = {}
        self._start_poly_order = 2

        self.ref_n = ref_n
        self.ref_s = ref_s

        kf_conversion = 2**(1 / 3.)

        if rho is not None:
            ls_s = ls_n / kf_conversion
        else:
            ls_s_scaled = kf_conversion * ls_s

        from functools import partial
        # transform_n = partial(fermi_momentum, degeneracy=2)
        # transform_s = partial(fermi_momentum, degeneracy=4)

        self.coeff_kernel_n = gptools.SquaredExponentialKernel(
            initial_params=[std_n, ls_n], fixed_params=[True, True])
        # Assumes the symmetric nuclear matter kernel takes kf_s as an argument, so use ls_s
        self.coeff_kernel_s = gptools.SquaredExponentialKernel(
            initial_params=[std_s, ls_s], fixed_params=[True, True])

        if rho is not None:
            # only use ls_n, and assume rho is the correlation of the off-diagonal
            std_off = np.sqrt(std_s * std_n) * rho
            ls_off = ls_n
        else:
            # But the off-diagonal will take kf_n as an argument, so use scaled length scale
            std_off = np.sqrt(
                std_s * std_n) * (2 * ls_n * ls_s_scaled /
                                  (ls_n**2 + ls_s_scaled**2))**0.25
            ls_off = np.sqrt((ls_s_scaled**2 + ls_n**2) / 2)
        ref_off = np.sqrt(ref_s * ref_n)
        self.coeff_kernel_off = gptools.SquaredExponentialKernel(
            initial_params=[std_off, ls_off], fixed_params=[True, True])

        print(ls_n, ls_s, ls_off)

        for i, n in enumerate(orders):
            first_omitted = n + 1
            if first_omitted == 1:
                first_omitted += 1  # the Q^1 contribution is zero, so bump to Q^2
            _kern_lower_n = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref_n,
                                  lowest_order=0,
                                  highest_order=n,
                                  include_3bf=include_3bf))
            _kern_lower_s = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref_s,
                                  lowest_order=0,
                                  highest_order=n,
                                  include_3bf=include_3bf))
            _kern_lower_ns = CustomKernel(
                ConvergenceKernel(
                    breakdown=breakdown,
                    ref=ref_off,
                    lowest_order=0,
                    highest_order=n,
                    include_3bf=include_3bf,
                    k_f1_scale=1,
                    k_f2_scale=1. / kf_conversion,  # Will turn kf_n to kf_s
                    # off_diag=True
                ))
            _kern_lower_sn = CustomKernel(
                ConvergenceKernel(
                    breakdown=breakdown,
                    ref=ref_off,
                    lowest_order=0,
                    highest_order=n,
                    include_3bf=include_3bf,
                    k_f1_scale=1. / kf_conversion,
                    k_f2_scale=1,  # Will turn kf_n to kf_s
                    # off_diag=True
                ))
            kern_interp_n = _kern_lower_n * self.coeff_kernel_n
            kern_interp_s = _kern_lower_s * self.coeff_kernel_s
            kern_interp_ns = _kern_lower_ns * self.coeff_kernel_off
            kern_interp_sn = _kern_lower_sn * self.coeff_kernel_off
            kern_interp = SymmetryEnergyKernel(
                kernel_n=kern_interp_n,
                kernel_s=kern_interp_s,
                kernel_ns=kern_interp_ns,
                kernel_sn=kern_interp_sn,
            )

            _kern_upper_n = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref_n,
                                  lowest_order=first_omitted,
                                  include_3bf=include_3bf))
            _kern_upper_s = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref_s,
                                  lowest_order=first_omitted,
                                  include_3bf=include_3bf))
            _kern_upper_ns = CustomKernel(
                ConvergenceKernel(
                    breakdown=breakdown,
                    ref=ref_off,
                    lowest_order=first_omitted,
                    include_3bf=include_3bf,
                    k_f1_scale=1,
                    k_f2_scale=1 / kf_conversion,
                    # off_diag=True
                ))
            _kern_upper_sn = CustomKernel(
                ConvergenceKernel(
                    breakdown=breakdown,
                    ref=ref_off,
                    lowest_order=first_omitted,
                    include_3bf=include_3bf,
                    k_f1_scale=1 / kf_conversion,
                    k_f2_scale=1,
                    # off_diag=True
                ))
            kern_trunc_n = _kern_upper_n * self.coeff_kernel_n
            kern_trunc_s = _kern_upper_s * self.coeff_kernel_s
            kern_trunc_ns = _kern_upper_ns * self.coeff_kernel_off
            kern_trunc_sn = _kern_upper_sn * self.coeff_kernel_off
            kern_trunc = SymmetryEnergyKernel(
                kernel_n=kern_trunc_n,
                kernel_s=kern_trunc_s,
                kernel_ns=kern_trunc_ns,
                kernel_sn=kern_trunc_sn,
            )

            y_n = y[:, i]
            self._y_dict[n] = y_n

            # Interpolating processes
            # mu_n = gptools.ConstantMeanFunction(initial_params=[np.mean(y_n)])
            # mu_n = gptools.ConstantMeanFunction(initial_params=[np.max(y_n)+20])
            mu_n = gptools.ConstantMeanFunction(initial_params=[0])
            gp_interp = gptools.GaussianProcess(kern_interp, mu=mu_n)
            gp_interp.add_data(Density, y_n, err_y=err_y)
            # gp_interp.optimize_hyperparameters(max_tries=10)  # For the mean
            self.gps_interp[n] = gp_interp

            # Finite difference:
            self._dy_dn[n] = d_dn(y_n)
            self._d2y_dn2[n] = d2_dn2(y_n)
            # self._dy_dk[n] = d_dk(y_n)
            # self._d2y_dk2[n] = d2_dk2(y_n)

            # Fractional interpolator polynomials
            self._best_max_orders[n] = self.compute_best_interpolator(
                density,
                y=y_n,
                start_order=self._start_poly_order,
                max_order=10)
            if verbose:
                print(
                    f'For EFT order {n}, the best polynomial has max nu = {self._best_max_orders[n]}'
                )

            # Back to GPs:

            y_interp_all_derivs_n, cov_interp_all_derivs_n = predict_with_derivatives(
                gp=gp_interp, X=Density_interp, n=derivs, return_cov=True)

            y_interp_vecs_n = get_means_map(y_interp_all_derivs_n, N_interp)
            cov_interp_blocks_n = get_blocks_map(cov_interp_all_derivs_n,
                                                 (N_interp, N_interp))
            # for (ii, jj), cov_ij in cov_interp_blocks_n.items():
            #     cov_interp_blocks_n[ii, jj] += 1e-12 * np.eye(cov_ij.shape[0])
            std_interp_vecs_n = get_std_map(cov_interp_blocks_n)

            self._y_interp_all_derivs[n] = y_interp_all_derivs_n
            self._cov_interp_all_derivs[n] = cov_interp_all_derivs_n
            self._y_interp_vecs[n] = y_interp_vecs_n
            self._cov_interp_blocks[n] = cov_interp_blocks_n
            self._std_interp_vecs[n] = std_interp_vecs_n

            # Truncation Processes
            gp_trunc = gptools.GaussianProcess(kern_trunc)
            self.gps_trunc[n] = gp_trunc

            cov_trunc_all_derivs_n = predict_with_derivatives(gp=gp_trunc,
                                                              X=Density_interp,
                                                              n=derivs,
                                                              only_cov=True)
            cov_total_all_derivs_n = cov_interp_all_derivs_n + cov_trunc_all_derivs_n

            cov_total_blocks_n = get_blocks_map(cov_total_all_derivs_n,
                                                (N_interp, N_interp))
            # for (ii, jj), cov_ij in cov_total_blocks_n.items():
            #     cov_total_blocks_n[ii, jj] += 1e-12 * np.eye(cov_ij.shape[0])
            std_total_vecs_n = get_std_map(cov_total_blocks_n)

            self._cov_total_all_derivs[n] = cov_total_all_derivs_n
            self._cov_total_blocks[n] = cov_total_blocks_n
            self._std_total_vecs[n] = std_total_vecs_n
Beispiel #22
0
    def test_accuracy_should_be_passed_down_to_stencil(self):
        # issue 31

        shape = 11, 11
        dx = 1.
        d1x = FinDiff(0, dx, 1, acc=4)
        stencil1 = d1x.stencil(shape)

        expected = {
            ('L', 'L'): {
                (0, 0): -2.083333333333331,
                (1, 0): 3.9999999999999916,
                (2, 0): -2.999999999999989,
                (3, 0): 1.3333333333333268,
                (4, 0): -0.24999999999999858
            },
            ('L', 'C'): {
                (0, 0): -2.083333333333331,
                (1, 0): 3.9999999999999916,
                (2, 0): -2.999999999999989,
                (3, 0): 1.3333333333333268,
                (4, 0): -0.24999999999999858
            },
            ('L', 'H'): {
                (0, 0): -2.083333333333331,
                (1, 0): 3.9999999999999916,
                (2, 0): -2.999999999999989,
                (3, 0): 1.3333333333333268,
                (4, 0): -0.24999999999999858
            },
            ('C', 'L'): {
                (-2, 0): 0.08333333333333333,
                (-1, 0): -0.6666666666666666,
                (1, 0): 0.6666666666666666,
                (2, 0): -0.08333333333333333
            },
            ('C', 'C'): {
                (-2, 0): 0.08333333333333333,
                (-1, 0): -0.6666666666666666,
                (1, 0): 0.6666666666666666,
                (2, 0): -0.08333333333333333
            },
            ('C', 'H'): {
                (-2, 0): 0.08333333333333333,
                (-1, 0): -0.6666666666666666,
                (1, 0): 0.6666666666666666,
                (2, 0): -0.08333333333333333
            },
            ('H', 'L'): {
                (-4, 0): 0.24999999999999958,
                (-3, 0): -1.3333333333333313,
                (-2, 0): 2.9999999999999956,
                (-1, 0): -3.999999999999996,
                (0, 0): 2.0833333333333317
            },
            ('H', 'C'): {
                (-4, 0): 0.24999999999999958,
                (-3, 0): -1.3333333333333313,
                (-2, 0): 2.9999999999999956,
                (-1, 0): -3.999999999999996,
                (0, 0): 2.0833333333333317
            },
            ('H', 'H'): {
                (-4, 0): 0.24999999999999958,
                (-3, 0): -1.3333333333333313,
                (-2, 0): 2.9999999999999956,
                (-1, 0): -3.999999999999996,
                (0, 0): 2.0833333333333317
            },
        }

        for char_pt in stencil1.data:
            stl = stencil1.data[char_pt]
            self.assert_dict_almost_equal(expected[char_pt], stl)

        d1x = FinDiff(0, dx, 1)
        stencil1 = d1x.stencil(shape, acc=4)
        for char_pt in stencil1.data:
            stl = stencil1.data[char_pt]
            self.assert_dict_almost_equal(expected[char_pt], stl)
def trend_naive(hist):
    """Trend identification: naive method."""
    hs = hist.Close.loc[hist.Close.shift(-1) != hist.Close]
    x = hs.rolling(window=3, center=True).aggregate(lambda x: x[0] > x[1] and x[2] > x[1])
    minimaIdxs = [hist.index.get_loc(y) for y in x[x == 1].index]
    x = hs.rolling(window=3, center=True).aggregate(lambda x: x[0] < x[1] and x[2] < x[1])
    maximaIdxs = [hist.index.get_loc(y) for y in x[x == 1].index]


def trend_num_diff(df):
    """Trend identification numerical differentiation."""
df['fCloseR'] = df['fClose'].round()

dx = 1 #1 day interval
d_dx = FinDiff(0, dx, 1)
d2_dx2 = FinDiff(0, dx, 2)
clarr = np.asarray(df_sorted['fCloseR'])
mom = d_dx(clarr)
momacc = d2_dx2(clarr)

mom

df_sorted['num_mom'] = mom
df_sorted['num_momacc'] = momacc
h = df_sorted['fCloseR'].tolist()


cols_to_look = ['fClose', 'num_mom', 'num_momacc', 'minimaNum', 'maximaNum']
df_sorted[cols_to_look].head(25)
Beispiel #24
0
    return u


cutoff, lam, v, m_lam, hx, hy, c = 1000, 2, 0, 794, 3.2, 0, 1750000  # hy = 3.5
#cutoff, lam, v, m_lam, hx, hy, c = 1000, 2.0, 0, 794, 3.2, 0.0001, 137**2*93 # hy = 3.5
gam = 1.3  # tune gam such that diquark gap/onset for NJL model is obtained
L = 140**2
L2 = L
Nx = 40
x = np.linspace(0, L, Nx)
Ny = 40
y = np.linspace(0, L2, Ny)
dx = np.abs(x[1] - x[0])
dy = np.abs(y[1] - y[0])
acc = 2
d_dx = FinDiff(1, dx, 1, acc=acc)
d2_dx2 = FinDiff(1, dx, 2, acc=acc)
d_dy = FinDiff(0, dy, 1, acc=acc)
d2_dy2 = FinDiff(0, dy, 2, acc=acc)
d2_dxdy = FinDiff((0, dy), (1, dx), acc=acc)
xgrd, ygrd = np.meshgrid(x, y)
N_k = 50
k_IR = 100
k_stop = cutoff
k = np.linspace(
    cutoff, k_IR, N_k
)  # DEFINE TIME HERE ################################################################################
dk = k[0] - k[1]


def v0(X, Y):
Beispiel #25
0
 def __init__(self, problem=None, rho=None, v=None, grad_rho=None, data=[], \
              param_step=0.01, t0=1., t=0., ts=None, \
              R_min=1e-4, R_max=10., r_step=0.1, cutoff=1e-8, integrator=integrate.simpson,\
              scaling="l", C_code=False, output="Output", input_dir="Scaled_Potentials", load=False):
     
     # Parameters
     if ts is not None:
         # t's provided as a list
         self.T = ts
         self.reverse_t = False
     else:
         self.T, self.reverse_t = self._setTParameters(t, t0, param_step)
         
     # Spatial mesh
     self.dr = r_step
     self.R = np.arange(R_min, R_max, self.dr)  
     if ( R_max-self.R[-1] )>1e-6 : 
         self.R = np.append(self.R, R_max)     
     self.d_dx  = FinDiff(0, self.dr, 1, acc=4)
     
     #saving output directory
     self.output = output
     #saving integration method
     self.integrator = integrator
     #saving scaling preference
     self.scaling = scaling
     
     # Using Problem
     if problem is not None:
         assert ( isinstance(problem, Problem) )
         self.problem = problem
         self.rho_grid = problem.grid
         self.tab_rho = problem.tab_rho  
         self.rho = problem.rho
         
         self.R = self.rho_grid
         self.tab_grad_rho = self.d_dx(self.tab_rho)
         self.grad_rho = interpolate(self.rho_grid, self.tab_grad_rho)
         
         self.method = "IKS python"
         self.load = load
         # density cutoff
         self.cutoff = cutoff
         self.blackList = []
     
     
     # using provided density
     elif (rho is not None or len(data)>0):
         assert( v is not None or C_code is True )
         
         # Input: density function
         if(rho is not None):
             self.rho = rho
             
             # gradient
             if (grad_rho is not None):
                 self.grad_rho = grad_rho
             else:
                 self.tab_rho = self.rho(self.R)
                 self.tab_grad_rho = self.d_dx(self.rho)
                 self.grad_rho = interpolate(self.R, self.tab_grad_rho)
         # Input: density array
         else:
             self.rho, self.grad_rho = interpolate(data[0], data[1], get_der=True)
         self.method = "Rho and v from input"
         
         # Potential 
         
         # Input potential function
         if(v is not None):
             self.v = v
         # Using datas from C++ code
         else:
             #saving method and file source directory
             self.input = input_dir
             self.method = "IKS C++"
         
         # create output directory
         if len(output)>0 and not os.path.exists("Results/" + output):
                 os.makedirs("Results/" + output)
                 
     else:
         assert(False), "Invalid input parameters"
         
     # Scaled densities: functions of r and t
     self.scaled_rho = scaleDensityFun(self.rho, scaling)
     self.drho_dt = self._getDrhoDt( scaling)
     
     # Dictionary of pot. functions
     self.dict_v = dict()
     # Kinetic energies
     self.kins = dict()
Beispiel #26
0
    def spec_derivative(self, spyfile_spec=None, order=1):
        '''
        Calculates the numeric derivative spectra from spyfile_spec.

        The derivavative spectra is calculated as the slope (rise over run) of
        the input spectra, and is normalized by the wavelength unit.

        Parameters:
            spyfile_spec: The spectral spyfile object to calculate the
                derivative for.
            order (``int``): The order of the derivative (default: 1).

        Example:
            Load and initialize ``hsio``

            >>> import os
            >>> from hs_process import hsio
            >>> from hs_process import spec_mod
            >>> data_dir = r'F:\\nigo0024\Documents\hs_process_demo'
            >>> fname_hdr_spec = os.path.join(data_dir, 'Wells_rep2_20180628_16h56m_pika_gige_7_plot_611-cube-to-spec-mean.spec.hdr')
            >>> io = hsio()
            >>> io.read_spec(fname_hdr_spec)
            >>> my_spec_mod = spec_mod(io.spyfile_spec)

            Calculate the numeric derivative.

            >>> spec_dydx, metadata_dydx = my_spec_mod.spec_derivative(order=1)

            >>> io.write_spec('spec_derivative_order-1.spec.hdr', spec_dydx, df_std=None, metadata=metadata_dydx)

            Plot the numeric derivative spectra and compare against the original spectra.

            >>> import numpy as np
            >>> import seaborn as sns
            >>> sns.set_style("ticks")
            >>> wl_x = np.array([float(i) for i in metadata_dydx['wavelength']])
            >>> y_ref = io.spyfile_spec.open_memmap()[0,0,:]*100
            >>> ax1 = sns.lineplot(wl_x, y_ref)
            >>> ax2 = ax1.twinx()
            >>> ax2 = sns.lineplot(wl_x, 0, ax=ax2, color='gray')
            >>> ax2 = sns.lineplot(wl_x, spec_dydx[0,0,:]*100, ax=ax2, color=sns.color_palette()[1])
            >>> ax2.set(ylim=(-0.8, 1.5))
            >>> ax1.set_xlabel('Wavelength (nm)', weight='bold')
            >>> ax1.set_ylabel('Reflectance (%)', weight='bold')
            >>> ax2.set_ylabel('Reflectance derivative (%)', weight='bold')
            >>> ax1.set_title(r'API Example: `hstools.spec_derivative`', weight='bold')
        '''
        msg0 = ('A numpy array was passed under the ``spyfile`` parameter, '
                'so therefore metadata must be retrieved from '
                '``spec_mod.spyfile.metadata``. However, ``spec_mod.spyfile`` '
                'is not set. Please set via ``spec_mod.load_spyfile()``.')
        msg1 = ('``spyfile_spec`` was not passed and is not set; please set '
                'via ``spec_mod.load_spyfile()``.')
        msg2 = ('The passed ``Spyfile`` is not a valid "spec" file. A valid '
                '"spec" file must have 3 dimensions with each of the first '
                'two dimensions (x and y) equal to 1 (e.g., shape = '
                '(1, 1, n_bands)). Please set ``spyfile`` to a valid "spec" '
                'or pass a valid "spec" ``spyfile`` to ``spec_derivative()``.')
        if isinstance(spyfile_spec, SpyFile.SpyFile):
            spec = spyfile_spec.open_memmap()
            metadata = spyfile_spec.metadata
        elif isinstance(spyfile_spec, np.ndarray):
            assert self.spyfile is not None, msg0
            spec = spyfile_spec.copy()
            metadata = self.spyfile.metadata
        else:
            assert self.spyfile is not None, msg1
            spec = self.spyfile.open_memmap()
            metadata = self.spyfile.metadata
        assert spec.shape[:2] == (1, 1), msg2  # First two dimensions must be 1
        wl_x = np.array([float(i) for i in metadata['wavelength']])
        dydx = FinDiff(0, wl_x, order)
        # spec_dydx = np.empty_like(spec)
        # spec_dydx = np.empty(spec.shape)
        # spec_dydx[0,0,:] = np.gradient(spec[0,0,:], wl_x)
        spec_dydx = np.empty(spec.shape)
        spec_dydx[0, 0, :] = dydx(spec[0, 0, :])

        # if 'stdev' in metadata:
        #     stdev = np.array([float(i) for i in metadata['stdev']])
        #     stdev_dydx = np.gradient(stdev, wl_x)
        metadata_dydx = self._metadata_derivative(metadata, order)
        return spec_dydx, metadata_dydx
Beispiel #27
0
def example():
    #import os
    #import sys
    #module_path = os.path.abspath(os.path.join('..'))
    #if module_path not in sys.path:
    #sys.path.append(module_path)

    from pySINDy.sindypde import SINDyPDE
    import scipy.io as sio
    import numpy as np

    # this .mat file can be generated from two of our .m files in datasets directory,
    # but since it's too large, we'll leave the user to generate the .mat file by themselves
    data = sio.loadmat('./Algorithms/pySINDy/datasets/reaction_diffusion.mat')
    #data.keys()
    print(data.keys())

    U = np.real(data['u'])
    V = np.real(data['v'])
    t = np.real(data['t'].flatten())
    x = np.real(data['x'].flatten())
    y = np.real(data['y'].flatten())
    dt = t[1] - t[0]
    dx = x[1] - x[0]
    dy = y[1] - y[0]

    model = SINDyPDE(name='SINDyPDE model for Reaction-Diffusion Eqn')

    U1 = U[100:200, 100:200, 200:230]
    V1 = V[100:200, 100:200, 200:230]
    model.fit({
        'u': U1,
        'v': V1
    },
              dt, [dx, dy],
              space_deriv_order=2,
              poly_degree=2,
              sample_rate=0.01,
              cut_off=0.05,
              deriv_acc=5)

    activated1 = [
        model.descriptions[i] for i in np.arange(model.coefficients.shape[0])
        if model.coefficients[i, 0] != 0
    ]
    activated2 = [
        model.descriptions[i] for i in np.arange(model.coefficients.shape[0])
        if model.coefficients[i, 1] != 0
    ]

    #print(activated1)
    #print(activated2)

    x = model.coefficients
    y = model.descriptions

    return x, y

    from findiff import FinDiff
    deriv_acc = 5

    U1 = U[100:200, 100:200, 200:230]
    V1 = V[100:200, 100:200, 200:230]

    d1_dt = FinDiff(U1.ndim - 1, dt, 1, acc=deriv_acc)
    d2_xx = FinDiff(0, dx, 2, acc=deriv_acc)
    d2_yy = FinDiff(1, dy, 2, acc=deriv_acc)

    u_t = d1_dt(U1).flatten()
    v_t = d1_dt(V1).flatten()
    x_t = np.vstack([u_t, v_t]).T
    print('finished time derivative computation!')

    u_xx = d2_xx(U1).flatten()
    u_yy = d2_yy(U1).flatten()
    v_xx = d2_xx(V1).flatten()
    v_yy = d2_yy(V1).flatten()
    u = U1.flatten()
    v = V1.flatten()
    uv2 = (U1 * V1 * V1).flatten()
    u2v = (U1 * U1 * V1).flatten()
    u3 = (U1 * U1 * U1).flatten()
    v3 = (V1 * V1 * V1).flatten()

    lib = np.vstack([u_xx, u_yy, v_xx, v_yy, u, v, uv2, u2v, u3, v3]).T

    print(np.linalg.lstsq(lib, x_t, rcond=None)[0])
def snr(arr, win):
    if len(arr['Close']) < 4: return float('-inf'), 0
    from findiff import FinDiff  #pip3 install findiff
    dx = 1  #1 day interval
    d_dx = FinDiff(0, dx, 1)
    d2_dx2 = FinDiff(0, dx, 2)
    clarr = np.asarray(arr['Close']).astype(float)
    mom = d_dx(clarr)
    momacc = d2_dx2(clarr)

    def get_extrema(isMin):
        return [
            x for x in range(len(mom))
            if (momacc[x] > 0 if isMin else momacc[x] < 0) and
            (mom[x] == 0 or  #slope is 0
             (
                 x != len(mom) - 1 and  #check next day
                 (mom[x] > 0 and mom[x + 1] < 0
                  and arr['Close'][x] >= arr['Close'][x + 1] or mom[x] < 0 and
                  mom[x + 1] > 0 and arr['Close'][x] <= arr['Close'][x + 1])
                 or x != 0 and  #previous day
                 (mom[x - 1] > 0 and mom[x] < 0
                  and arr['Close'][x - 1] < arr['Close'][x] or mom[x - 1] < 0
                  and mom[x] > 0 and arr['Close'][x - 1] > arr['Close'][x])))
        ]

    minimaIdxs, maximaIdxs = get_extrema(True), get_extrema(False)

    profit = 0
    buy_price = 0
    sell_price = 0
    resist = False
    first = True
    buy = True

    resistance_plot_array = []
    support_plot_array = []
    trade_dec = 0  #1 for buy, -1 for sell, 0 for hold

    for end_index in range(win, len(clarr)):
        price_max = clarr[end_index - win:end_index].max()
        price_min = clarr[end_index - win:end_index].min()
        delta_5 = (price_max - price_min) * 0.05

        max_num = 0
        resistance_centre_recent = -1
        for x in maximaIdxs:
            if x < end_index - win:
                continue
            if x >= end_index:
                break

            num_points = 0
            for y in maximaIdxs:
                if y < end_index - win:
                    continue
                if y >= end_index:
                    break
                if (clarr[x] >= clarr[y]) and (clarr[x] - clarr[y]) <= delta_5:
                    num_points += y * clarr[y]
            if num_points > max_num:
                max_num = num_points
                resistance_centre_recent = x

        min_num = 1
        support_centre_recent = -1
        for x in minimaIdxs:
            if x < end_index - win:
                continue
            if x >= end_index:
                break
            num_points = 0
            for y in minimaIdxs:
                if y < end_index - win:
                    continue
                if y >= end_index:
                    break
                if clarr[y] > clarr[x] and clarr[y] - clarr[x] <= delta_5:
                    num_points -= y * (price_max - clarr[y])
            if num_points < min_num:
                min_num = num_points
                support_centre_recent = x

        resistance_price = clarr[
            resistance_centre_recent] if resistance_centre_recent != -1 else price_max
        support_price = clarr[
            support_centre_recent] if support_centre_recent != -1 else price_min
        resistance_plot_array.append(resistance_price)
        support_plot_array.append(support_price)

        x = clarr[end_index]
        if abs(x - resistance_price) <= abs(x - support_price):
            resist = True
        else:
            resist = False

        if first:
            if (resist and
                (x >= resistance_price or resistance_price - x <= delta_5)):
                trade_dec = -1
                sell_price = x
                buy = False
                first = False
            if not resist and (x <= support_price
                               or x - support_price <= delta_5):
                trade_dec = 1
                buy = True
                buy_price = x
                first = False
            continue

        if buy and resist and (x >= resistance_price
                               or resistance_price - x <= delta_5):
            trade_dec = -1
            profit += x - buy_price
            buy = False
            sell_price = x

        if not buy and not resist and (x <= support_price
                                       or x - support_price <= delta_5):
            trade_dec = 1
            profit += sell_price - x
            buy = True
            buy_price = x

    return profit, trade_dec
Beispiel #29
0
    csv_row.append(step)
    #print(step)
    if (step >= start_iter and step < end_iter):

        #   step_vars = fstep.available_variables()
        #   for name, info in step_vars.items():
        #        print("variable_name: " + name)
        #        for key, value in info.items():
        #           print("\t" + key + ": " + value)
        #        print("\n")

        if (store_U > -1):
            data = fstep.read("U")
            #print(data)
            dx = 1
            op = FinDiff(axis, dx, deriv, acc=accuracy)
            result = op(data)
            csv_row.append(result.sum())

            if (store_deriv == 1):
                fstep.write("U_deriv", result)

            op1 = FinDiff(0, dx, 2, acc=accuracy)
            op2 = FinDiff(1, dx, 2, acc=accuracy)
            op3 = FinDiff(2, dx, 2, acc=accuracy)

            result = op1(data) + op2(data) + op3(data)
            csv_row.append(result.sum())

            if (store_lap == 1):
                fstep.write("U_lap", result)
    def __init__(self,
                 density,
                 kf,
                 y,
                 orders,
                 density_interp,
                 kf_interp,
                 std,
                 ls,
                 ref,
                 breakdown,
                 err_y=0,
                 derivs=(0, 1, 2),
                 include_3bf=True,
                 verbose=False):

        self.density = density
        self.kf = kf
        self.Kf = Kf = kf[:, None]

        self.density_interp = density_interp
        self.kf_interp = kf_interp
        self.Kf_interp = Kf_interp = kf_interp[:, None]
        self.X_interp = Kf_interp

        self.y = y
        self.N_interp = N_interp = len(kf_interp)
        err_y = np.broadcast_to(err_y,
                                y.shape[0])  # Turn to vector if not already
        self.err_y = err_y
        self.Sigma_y = np.diag(err_y**2)  # Make a diagonal covariance matrix
        self.derivs = derivs

        self.gps_interp = {}
        self.gps_trunc = {}

        self._y_interp_all_derivs = {}
        self._cov_interp_all_derivs = {}
        self._y_interp_vecs = {}
        self._std_interp_vecs = {}
        self._cov_interp_blocks = {}

        self._dy_dn = {}
        self._d2y_dn2 = {}
        self._dy_dk = {}
        self._d2y_dk2 = {}
        self._y_dict = {}

        d_dn = FinDiff(0, density, 1)
        d2_dn2 = FinDiff(0, density, 2, acc=2)
        d_dk = FinDiff(0, kf, 1)
        d2_dk2 = FinDiff(0, kf, 2, acc=2)

        self._cov_total_all_derivs = {}
        self._cov_total_blocks = {}
        self._std_total_vecs = {}

        # The priors on the interpolator parameters
        self.mean0 = 0
        self.cov0 = 0
        self._best_max_orders = {}
        self._start_poly_order = 2

        # from scipy.interpolate import splrep
        from scipy.interpolate import UnivariateSpline
        self.splines = {}

        self.coeff_kernel = gptools.SquaredExponentialKernel(
            initial_params=[std, ls], fixed_params=[True, True])
        for i, n in enumerate(orders):
            first_omitted = n + 1
            if first_omitted == 1:
                first_omitted += 1  # the Q^1 contribution is zero, so bump to Q^2
            _kern_lower = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref,
                                  lowest_order=0,
                                  highest_order=n,
                                  include_3bf=include_3bf))
            kern_interp = _kern_lower * self.coeff_kernel
            _kern_upper = CustomKernel(
                ConvergenceKernel(breakdown=breakdown,
                                  ref=ref,
                                  lowest_order=first_omitted,
                                  include_3bf=include_3bf))
            kern_trunc = _kern_upper * self.coeff_kernel

            # try:
            #     err_y_i = err_y[i]
            # except TypeError:
            #     err_y_i = err_y

            y_n = y[:, i]
            self._y_dict[n] = y_n

            # Interpolating processes
            # mu_n = gptools.ConstantMeanFunction(initial_params=[np.mean(y_n)])
            # mu_n = gptools.ConstantMeanFunction(initial_params=[np.max(y_n)+20])
            mu_n = gptools.ConstantMeanFunction(initial_params=[0])
            gp_interp = gptools.GaussianProcess(kern_interp, mu=mu_n)
            gp_interp.add_data(Kf, y_n, err_y=err_y)
            # gp_interp.optimize_hyperparameters(max_tries=10)  # For the mean
            self.gps_interp[n] = gp_interp

            # Finite difference:
            self._dy_dn[n] = d_dn(y_n)
            self._d2y_dn2[n] = d2_dn2(y_n)
            self._dy_dk[n] = d_dk(y_n)
            self._d2y_dk2[n] = d2_dk2(y_n)

            # Fractional interpolator polynomials
            self._best_max_orders[n] = self.compute_best_interpolator(
                density,
                y=y_n,
                start_order=self._start_poly_order,
                max_order=10)
            self.splines[n] = UnivariateSpline(density, y_n, s=np.max(err_y))
            if verbose:
                print(
                    f'For EFT order {n}, the best polynomial has max nu = {self._best_max_orders[n]}'
                )

            # Back to GPs:

            y_interp_all_derivs_n, cov_interp_all_derivs_n = predict_with_derivatives(
                gp=gp_interp, X=Kf_interp, n=derivs, return_cov=True)

            y_interp_vecs_n = get_means_map(y_interp_all_derivs_n, N_interp)
            cov_interp_blocks_n = get_blocks_map(cov_interp_all_derivs_n,
                                                 (N_interp, N_interp))
            std_interp_vecs_n = get_std_map(cov_interp_blocks_n)

            self._y_interp_all_derivs[n] = y_interp_all_derivs_n
            self._cov_interp_all_derivs[n] = cov_interp_all_derivs_n
            self._y_interp_vecs[n] = y_interp_vecs_n
            self._cov_interp_blocks[n] = cov_interp_blocks_n
            self._std_interp_vecs[n] = std_interp_vecs_n

            # Truncation Processes
            gp_trunc = gptools.GaussianProcess(kern_trunc)
            self.gps_trunc[n] = gp_trunc

            cov_trunc_all_derivs_n = predict_with_derivatives(gp=gp_trunc,
                                                              X=Kf_interp,
                                                              n=derivs,
                                                              only_cov=True)
            cov_total_all_derivs_n = cov_interp_all_derivs_n + cov_trunc_all_derivs_n

            cov_total_blocks_n = get_blocks_map(cov_total_all_derivs_n,
                                                (N_interp, N_interp))
            std_total_vecs_n = get_std_map(cov_total_blocks_n)

            self._cov_total_all_derivs[n] = cov_total_all_derivs_n
            self._cov_total_blocks[n] = cov_total_blocks_n
            self._std_total_vecs[n] = std_total_vecs_n