def test_local_linear_gradients(self):
        data = helper.load_test_npz('train_points_200_2.npz')
        X = data['X'].copy()
        f = 2 - 5 * X[:, 0] + 4 * X[:, 1]

        df = gr.local_linear_gradients(X, f)
        M = df.shape[0]
        np.testing.assert_array_almost_equal(df,
                                             np.tile(np.array([-5.0, 4.0]),
                                                     (M, 1)),
                                             decimal=9)

        df = gr.local_linear_gradients(X, f, p=8)
        M = df.shape[0]
        np.testing.assert_array_almost_equal(df,
                                             np.tile(np.array([-5.0, 4.0]),
                                                     (M, 1)),
                                             decimal=9)

        f = 2 - np.sin(X[:, 0]) + np.cos(X[:, 1])
        np.random.seed(1234)
        df = gr.local_linear_gradients(X, f)
        #np.savez('data/test_llm_gradients',df=df)
        data = helper.load_test_npz('test_llm_gradients.npz')
        np.testing.assert_equal(df, data['df'])
Пример #2
0
def opg_subspace(X, f, weights):
    """Estimate active subspace with local linear models.
    
    This approach is related to the sufficient dimension reduction method known 
    sometimes as the outer product of gradient method. See the 2001 paper 
    'Structure adaptive approach for dimension reduction' from Hristache, et al.
    
    Parameters
    ----------
    X : ndarray
        M-by-m matrix of input samples, oriented as rows
    f : ndarray
        M-by-1 vector of output samples corresponding to the rows of `X`
    weights : ndarray
        M-by-1 weight vector, corresponds to numerical quadrature rule used to
        estimate matrix whose eigenspaces define the active subspace
        
    Returns
    -------
    e : ndarray
        m-by-1 vector of eigenvalues
    W : ndarray
        m-by-m orthogonal matrix of eigenvectors
    """
    X, f, M, m = process_inputs_outputs(X, f)

    # Obtain gradient approximations using local linear regressions
    df = local_linear_gradients(X, f, weights=weights)

    # Use gradient approximations to compute active subspace
    opg_weights = np.ones((df.shape[0], 1)) / df.shape[0]
    e, W = active_subspace(df, opg_weights)

    return e, W
Пример #3
0
    def test_local_linear_gradients(self):

        np.random.seed(42)
        X = np.random.uniform(-1.0,1.0,size=(200,2))
        f = 2 - 5*X[:,0] + 4*X[:,1]
        
        df = gr.local_linear_gradients(X, f)
        M = df.shape[0]
        np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M,1)), decimal=9)

        df = gr.local_linear_gradients(X, f, p=8)
        M = df.shape[0]
        np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M,1)), decimal=9)
        
        f = 2 - np.sin(X[:,0]) + np.cos(X[:,1])        
        np.random.seed(1234)
        df = gr.local_linear_gradients(X, f)
Пример #4
0
    def test_local_linear_gradients(self):
        data = helper.load_test_npz('train_points_200_2.npz')
        X = data['X'].copy()
        f = 2 - 5*X[:,0] + 4*X[:,1]
        
        df = gr.local_linear_gradients(X, f)
        M = df.shape[0]
        np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M,1)), decimal=9)

        df = gr.local_linear_gradients(X, f, p=8)
        M = df.shape[0]
        np.testing.assert_array_almost_equal(df, np.tile(np.array([-5.0, 4.0]), (M,1)), decimal=9)
        
        f = 2 - np.sin(X[:,0]) + np.cos(X[:,1])        
        np.random.seed(1234)
        df = gr.local_linear_gradients(X, f)
        
        data = helper.load_test_npz('test_llm_gradients.npz')
        np.testing.assert_equal(df, data['df'])        
    def test_local_linear_gradients(self):

        np.random.seed(42)
        X = np.random.uniform(-1.0, 1.0, size=(200, 2))
        f = 2 - 5 * X[:, 0] + 4 * X[:, 1]

        df = gr.local_linear_gradients(X, f)
        M = df.shape[0]
        np.testing.assert_array_almost_equal(df,
                                             np.tile(np.array([-5.0, 4.0]),
                                                     (M, 1)),
                                             decimal=9)

        df = gr.local_linear_gradients(X, f, p=8)
        M = df.shape[0]
        np.testing.assert_array_almost_equal(df,
                                             np.tile(np.array([-5.0, 4.0]),
                                                     (M, 1)),
                                             decimal=9)

        f = 2 - np.sin(X[:, 0]) + np.cos(X[:, 1])
        np.random.seed(1234)
        df = gr.local_linear_gradients(X, f)