def test_local_linear_gradients_02(self): np.random.seed(42) inputs = np.random.uniform(-1.0, 1.0, size=(200, 2)) outputs = 2 - 5 * inputs[:, 0] + 4 * inputs[:, 1] gradients = local_linear_gradients(inputs, outputs, n_neighbors=8)[0] M = gradients.shape[0] np.testing.assert_array_almost_equal(gradients, np.tile(np.array([-5.0, 4.0]), (M, 1)), decimal=9)
def test_local_linear_gradients_05(self): np.random.seed(42) inputs = np.random.uniform(-1.0, 1.0, size=(10, 2)) outputs = 2 - 5 * inputs[:, 0] + 4 * inputs[:, 1] with self.assertRaises(ValueError): local_linear_gradients(inputs, outputs, n_neighbors=15)
asub1_.plot_eigenvectors(figsize=(6, 4), title=title) asub1_.plot_sufficient_summary(x, f, figsize=(6, 4), title=title) asub2_ = ActiveSubspaces(dim=2, method='exact', n_boot=100) asub2_.fit(gradients=df_exact) asub2_.plot_sufficient_summary(x, f, figsize=(6, 4), title=title) # Error analysis samples_lis = [2**i for i in range(6, 12)] err_abs, err_rel = [], [] for n_samples in samples_lis: X = inputs_uniform(n_samples, input_dim, lb, ub) nor = Normalizer(lb, ub) x = nor.fit_transform(X) f = func(x) ll_gradients, new_inputs = local_linear_gradients(inputs=x, outputs=f) df = egrad(func)(new_inputs) absdiff = (1 / n_samples) * np.sum( np.linalg.norm(df - ll_gradients, axis=1)) err_abs += [absdiff] err_rel += [ absdiff / ((1 / n_samples) * np.sum(np.linalg.norm(df, axis=1))) ] fig, axes = plt.subplots(2, 1, sharex=True, figsize=(9, 7)) fig.suptitle('Error Analysis') axes[0].scatter(samples_lis, err_abs) axes[0].plot(samples_lis, err_abs) axes[0].set_ylabel('Absolute error') axes[1].scatter(samples_lis, err_rel) axes[1].plot(samples_lis, err_rel)