Beispiel #1
0
def test_centered_difference_length():
    x = 2 * np.linspace(1, 100, 100)
    centered_difference = FiniteDifference(order=2)
    assert len(centered_difference(x)) == len(x)

    centered_difference_nans = FiniteDifference(order=2, drop_endpoints=True)
    assert len(centered_difference_nans(x)) == len(x)
Beispiel #2
0
def test_forward_difference_length():
    x = 2 * np.linspace(1, 100, 100)
    forward_difference = FiniteDifference(order=1)
    assert len(forward_difference(x)) == len(x)

    forward_difference_nans = FiniteDifference(order=1, drop_endpoints=True)
    assert len(forward_difference_nans(x)) == len(x)
Beispiel #3
0
 def __init__(
     self,
     optimizer=None,
     feature_library=None,
     differentiation_method=None,
     feature_names=None,
     t_default=1,
     discrete_time=False,
     n_jobs=1,
 ):
     if optimizer is None:
         optimizer = STLSQ()
     self.optimizer = optimizer
     if feature_library is None:
         feature_library = PolynomialLibrary()
     self.feature_library = feature_library
     if differentiation_method is None:
         differentiation_method = FiniteDifference()
     self.differentiation_method = differentiation_method
     if not isinstance(t_default, float) and not isinstance(t_default, int):
         raise ValueError("t_default must be a positive number")
     elif t_default <= 0:
         raise ValueError("t_default must be a positive number")
     else:
         self.t_default = t_default
     self.feature_names = feature_names
     self.discrete_time = discrete_time
     self.n_jobs = n_jobs
Beispiel #4
0
 def __init__(
     self,
     optimizer=STLSQ(),
     feature_library=PolynomialFeatures(),
     differentiation_method=FiniteDifference(),
     feature_names=None,
     discrete_time=False,
     n_jobs=1,
 ):
     self.optimizer = optimizer
     self.feature_library = feature_library
     self.differentiation_method = differentiation_method
     self.feature_names = feature_names
     self.discrete_time = discrete_time
     self.n_jobs = n_jobs
Beispiel #5
0
def data_linear_oscillator_corrupted():
    t = np.linspace(0, 1, 100)
    x = 3 * np.exp(-2 * t)
    y = 0.5 * np.exp(t)
    np.random.seed(1)
    corrupt_idxs = np.random.choice(np.arange(1, t.size - 1), t.size // 20)
    x[corrupt_idxs] = 0
    X = np.stack((x, y), axis=-1)
    X_dot = FiniteDifference(order=2)(X, t)

    # build an array of the indices of samples that should be trimmed
    trimmed_idxs = np.concatenate(
        (corrupt_idxs - 1, corrupt_idxs, corrupt_idxs + 1))
    trimming_array = np.ones(X.shape[0])
    trimming_array[trimmed_idxs] = 0.0

    return X, X_dot, trimming_array
Beispiel #6
0
 def __init__(
     self,
     n_forcing_params,
     forcing_functions,
     feature_library=PolynomialFeatures(),
     differentiation_method=FiniteDifference(),
     feature_names=None,
     discrete_time=False,
     n_jobs=1,
     **optimizer_kws,
 ):
     optimizer = SR3Forcing(n_forcing_params, forcing_functions, **optimizer_kws)
     super(SINDyForcing, self).__init__(
         optimizer=optimizer,
         feature_library=feature_library,
         differentiation_method=differentiation_method,
         feature_names=feature_names,
         discrete_time=discrete_time,
         n_jobs=n_jobs,
     )
Beispiel #7
0
def test_centered_difference(data):
    x, x_dot = data
    centered_difference = FiniteDifference(order=2)
    np.testing.assert_allclose(centered_difference(x), x_dot)
Beispiel #8
0
def test_centered_difference_2d(data_derivative_2d):
    x, x_dot = data_derivative_2d
    centered_difference = FiniteDifference(order=2)
    np.testing.assert_allclose(centered_difference(x), x_dot)
Beispiel #9
0
def test_forward_difference(data):
    x, x_dot = data
    forward_difference = FiniteDifference(order=1)
    np.testing.assert_allclose(forward_difference(x), x_dot)
Beispiel #10
0
def test_centered_difference_variable_timestep_length():
    t = np.linspace(1, 10, 100) ** 2
    x = 2 * t
    centered_difference = FiniteDifference(order=2)
    assert len(centered_difference(x, t) == len(x))
Beispiel #11
0
def test_forward_difference_2d(data_derivative_2d):
    x, x_dot = data_derivative_2d
    forward_difference = FiniteDifference(order=1)
    np.testing.assert_allclose(forward_difference(x), x_dot)
Beispiel #12
0
def test_forward_difference_variable_timestep_length():
    t = np.linspace(1, 10, 100) ** 2
    x = 2 * t
    forward_difference = FiniteDifference(order=1)
    assert len(forward_difference(x, t) == len(x))
def test_finite_difference(data, order):
    x, x_dot = data
    method = FiniteDifference(order=order)
    np.testing.assert_allclose(method(x), x_dot)
def compressible_Framework(inner_prod, time, poly_order, threshold, r, tfac,
                           SR3Enhanced, make_3Dphaseplots):
    """
    Performs the entire vector_POD + SINDy framework for a given polynomial
    order and thresholding for the SINDy method.

    Parameters
    ----------
    inner_prod: 2D numpy array of floats
    (M = number of time samples, M = number of time samples)
        The scaled matrix of inner products X*X

    time: numpy array of floats
    (M = number of time samples)
        Time in microseconds

    poly_order: int
    (1)
        Highest polynomial order to use in the SINDy library

    threshold: float
    (1)
        Threshold in the SINDy algorithm, below which coefficients
        will be zeroed out.

    r: int
    (1)
        Truncation number of the SVD

    tfac: float
    (1)
        Fraction of the data to treat as training data

    SR3Enhanced: SINDy optimizer object
    (1)
        The SR3 optimizer with linear equality constraints

    make_3Dphaseplots: bool
    (1)
        Flag to make 3D phase plots or not

    Returns
    -------
    t_test: numpy array of floats
    (M_test = number of time samples in the test data region)
        Time in microseconds in the test data region

    x_true: 2D numpy array of floats
    (M_test = number of time samples in the test data region,
    r = truncation number of the SVD)
        The true evolution of the temporal BOD modes

    x_sim: 2D numpy array of floats
    (M_test = number of time samples in the test data region,
    r = truncation number of the SVD)
        The model evolution of the temporal BOD modes

    S2: 2D numpy array of floats
    (M = number of time samples, M = number of time samples)
        The singular value matrix

    """
    plt.clf()
    plt.close('all')
    M_train = int(len(time) * tfac)
    t_train = time[:M_train]
    t_test = time[M_train:]
    x, feature_names, S2, Vh, = vector_POD(inner_prod, time, r)
    print('Now fitting SINDy model')
    if poly_order == 1:
        library_functions = [lambda x: x]
        library_function_names = [lambda x: x]
    if poly_order == 2:
        library_functions = [lambda x: x, lambda x, y: x * y, lambda x: x**2]
        library_function_names = [
            lambda x: x, lambda x, y: x + y, lambda x: x + x
        ]
    if poly_order == 3:
        library_functions = [
            lambda x: x, lambda x, y: x * y, lambda x: x**2,
            lambda x, y, z: x * y * z, lambda x, y: x**2 * y,
            lambda x, y: x * y**2, lambda x: x**3
        ]
        library_function_names = [
            lambda x: x, lambda x, y: x + y, lambda x: x + x,
            lambda x, y, z: x + y + z, lambda x, y: x + x + y,
            lambda x, y: x + y + y, lambda x: x + x + x
        ]
    if poly_order == 4:
        library_functions = [
            lambda x: x, lambda x, y: x * y, lambda x: x**2,
            lambda x, y, z: x * y * z, lambda x, y: x**2 * y,
            lambda x, y: x * y**2, lambda x: x**3,
            lambda x, y, z, w: x * y * z * w, lambda x, y, z: x * y * z**2,
            lambda x, y: x**2 * y**2, lambda x, y: x**3 * y, lambda x: x**4
        ]
        library_function_names = [
            lambda x: x, lambda x, y: x + y, lambda x: x + x,
            lambda x, y, z: x + y + z, lambda x, y: x + x + y,
            lambda x, y: x + y + y, lambda x: x + x + x,
            lambda x, y, z, w: x + y + z + w, lambda x, y, z: x + y + z + z,
            lambda x, y: x + x + y + y, lambda x, y: x + x + x + y,
            lambda x: x + x + x + x
        ]
    sindy_library = CustomLibrary(library_functions=library_functions, \
        function_names=library_function_names)
    constraint_zeros = np.zeros(int(r * (r + 1) / 2))
    if poly_order == 1:
        constraint_matrix = np.zeros((int(r * (r + 1) / 2), r**2))
        for i in range(r):
            constraint_matrix[i, i * (r + 1)] = 1.0
        q = r
        for i in range(r):
            counter = 1
            for j in range(i + 1, r):
                constraint_matrix[q, i * r + j] = 1.0
                constraint_matrix[q, i * r + j + counter * (r - 1)] = 1.0
                counter = counter + 1
                q = q + 1
    else:
        if poly_order == 2:
            #constraint_zeros = np.zeros(6+int(r*(r+1)/2))
            #constraint_matrix = np.zeros((6+int(r*(r+1)/2),int(r*(r**2+3*r)/2)))
            constraint_matrix = np.zeros(
                (int(r * (r + 1) / 2), int(r * (r**2 + 3 * r) / 2)))
        if poly_order == 3:
            #constraint_matrix = np.zeros((int(r*(r+1)/2),int(r*(r**2+3*r)/2)+336))
            constraint_matrix = np.zeros(
                (int(r * (r + 1) / 2),
                 int(r * (r**2 + 3 * r) / 2) + 588))  # 30
        if poly_order == 4:
            constraint_matrix = np.zeros(
                (int(r * (r + 1) / 2), int(r * (r**2 + 3 * r) / 2) + 60))
        for i in range(r):
            constraint_matrix[i, i * (r + 1)] = 1.0
        q = r
        for i in range(r):
            counter = 1
            for j in range(i + 1, r):
                constraint_matrix[q, i * r + j] = 1.0
                constraint_matrix[q, i * r + j + counter * (r - 1)] = 1.0
                counter = counter + 1
                q = q + 1
    # linear_r4_mat or linear_r12_mat are initial guesses
    # for the optimization
    linear_r4_mat = np.zeros((r, r))
    linear_r4_mat[0, 1] = 0.091
    linear_r4_mat[1, 0] = -0.091
    linear_r4_mat[2, 3] = 0.182
    linear_r4_mat[3, 2] = -0.182
    linear_r4_mat[5, 4] = -3 * 0.091
    linear_r4_mat[4, 5] = 3 * 0.091
    #linear_r4_mat[8,7] = -4*0.091
    #linear_r4_mat[7,8] = 4*0.091
    #linear_r4_mat[6,7] = 4*0.091
    #linear_r4_mat[7,6] = -4*0.091
    linear_r12_mat = np.zeros((12, 90))
    linear_r12_mat[0, 1] = 0.089
    linear_r12_mat[1, 0] = -0.089
    linear_r12_mat[2, 3] = 0.172
    linear_r12_mat[3, 2] = -0.172
    linear_r12_mat[2, 5] = 0.03
    linear_r12_mat[5, 2] = -0.03
    linear_r12_mat[2, 6] = 0.022
    linear_r12_mat[6, 2] = -0.022
    linear_r12_mat[6, 4] = 0.022
    linear_r12_mat[4, 6] = 0.023
    linear_r12_mat[7, 5] = -0.023
    linear_r12_mat[5, 7] = -0.123
    linear_r12_mat[7, 5] = 0.123
    sindy_opt = SR3Enhanced(threshold=threshold, nu=1, max_iter=20000, \
        constraint_lhs=constraint_matrix,constraint_rhs=constraint_zeros, \
        tol=1e-6,thresholder='l0',initial_guess=linear_r4_mat)
    model = SINDy(optimizer=sindy_opt, \
        feature_library=sindy_library, \
        differentiation_method=FiniteDifference(drop_endpoints=True), \
        feature_names=feature_names)
    x_train = x[:M_train, :]
    x0_train = x[0, :]
    x_true = x[M_train:, :]
    x0_test = x[M_train, :]
    model.fit(x_train, t=t_train, unbias=False)
    t_cycle = np.linspace(time[M_train], time[M_train] * 1.3,
                          int(len(time) / 2.0))
    print(model.coefficients())
    x_sim,output = model.simulate(x0_test,t_test, \
        integrator=odeint,stop_condition=None,full_output=True, \
        rtol=1e-20,h0=1e-5) #h0=1e-20
    x_sim1,output = model.simulate(-0.4*np.ones(r),t_cycle, \
        integrator=odeint,stop_condition=None,full_output=True, \
        rtol=1e-20,h0=1e-5)
    x_sim2,output = model.simulate(0.15*np.ones(r),t_cycle, \
        integrator=odeint,stop_condition=None,full_output=True, \
        rtol=1e-20,h0=1e-5)
    x_dot = model.differentiate(x, t=time)
    x_dot_train = model.predict(x_train)
    x_dot_sim = model.predict(x_true)
    print('Model score: %f' % model.score(x, t=time))
    make_evo_plots(x_dot,x_dot_train, \
        x_dot_sim,x_true,x_sim,time,t_train,t_test)
    make_table(model, feature_names)
    # Makes 3D phase space plots
    if make_3Dphaseplots:
        make_3d_plots(x_true, x_sim, t_test, 'sim', 0, 1, 2)
        make_3d_plots(x_true, x_sim, t_test, 'sim', 0, 1, 3)
        make_3d_plots(x_true, x_sim, t_test, 'sim', 0, 1, 4)
        make_3d_plots(x_true, x_sim, t_test, 'sim', 0, 1, 5)
        make_3d_plots(x_true, x_sim, t_test, 'sim', 0, 1, 6)
        make_3d_plots(x_true, x_sim, t_test, 'sim', 3, 4, 5)
        make_3d_plots(x_true, x_sim, t_test, 'sim', 4, 5, 6)
        make_3d_plots(x_sim1, x_sim2, t_cycle, 'limitcycle')
    for i in range(r):
        x_sim[:, i] = x_sim[:, i] * sum(np.amax(abs(Vh), axis=1)[0:r])
        x_true[:, i] = x_true[:, i] * sum(np.amax(abs(Vh), axis=1)[0:r])
    return t_test, x_true, x_sim, S2
Beispiel #15
0
def test_order_error():
    with pytest.raises(NotImplementedError):
        FiniteDifference(order=3)
    with pytest.raises(ValueError):
        FiniteDifference(order=-1)
Beispiel #16
0
def test_centered_difference_dim():
    x = np.ones((5, 5, 5))
    centered_difference = FiniteDifference(order=2)
    with pytest.raises(ValueError):
        centered_difference(x)
Beispiel #17
0
def test_forward_difference_dim():
    x = np.ones((5, 5, 5))
    forward_difference = FiniteDifference(order=1)
    with pytest.raises(ValueError):
        forward_difference(x)
Beispiel #18
0
def test_nan_derivatives(data_lorenz):
    x, t = data_lorenz

    model = SINDy(differentiation_method=FiniteDifference(drop_endpoints=True))
    model.fit(x, t)
    check_is_fitted(model)