Beispiel #1
0
 def test_plot_eigenvalues_04(self):
     np.random.seed(42)
     gradients = np.random.uniform(-1, 1, 200).reshape(50, 4)
     weights = np.ones((50, 1)) / 50
     ss = ActiveSubspaces(dim=1, n_boot=200)
     ss.fit(gradients=gradients, weights=weights)
     with self.assertRaises(TypeError):
         ss.plot_eigenvalues(n_evals=5, figsize=(7, 7))
Beispiel #2
0
 def test_plot_eigenvalues_03(self):
     np.random.seed(42)
     gradients = np.random.uniform(-1, 1, 200).reshape(50, 4)
     weights = np.ones((50, 1)) / 50
     ss = ActiveSubspaces()
     ss.compute(gradients=gradients, weights=weights, nboot=200)
     with assert_plot_figures_added():
         ss.plot_eigenvalues(n_evals=3, figsize=(7, 7), title='Eigenvalues')
Beispiel #3
0
 def test_plot_eigenvalues_02(self):
     np.random.seed(42)
     gradients = np.random.uniform(-1, 1, 200).reshape(50, 4)
     weights = np.ones((50, 1)) / 50
     ss = ActiveSubspaces(dim=1, n_boot=200)
     ss.fit(gradients=gradients, weights=weights)
     with assert_plot_figures_added():
         ss.plot_eigenvalues(figsize=(7, 7), title='Eigenvalues')
Beispiel #4
0
 def test_plot_eigenvalues_05(self):
     np.random.seed(42)
     inputs = np.random.uniform(-1, 1, 500).reshape(10, 50)
     gradients = (inputs[i, :] for i in range(10))
     ss = ActiveSubspaces(dim=4, n_boot=200)
     ss.fit(gradients=gradients)
     with assert_plot_figures_added():
         ss.plot_eigenvalues(figsize=(7, 7), title='Eigenvalues')
Beispiel #5
0
 def test_plot_eigenvalues_05(self):
     np.random.seed(42)
     grad = np.array(
         [[-0.50183952, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 0],
          [-1.26638196, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 0],
          [ 0.43017941, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 0],
          [ 0.65008914, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0, 0, 0, 0, 0]])
     gradients = (grad[i, :] for i in range(4))
     ss = ActiveSubspaces(dim=2, method='exact', n_boot=200)
     ss.fit(gradients=gradients)
     with assert_plot_figures_added():
         ss.plot_eigenvalues(figsize=(7, 7), title='Eigenvalues')
Beispiel #6
0
plt.figure(figsize=(6, 4))
plt.title("Inputs distribution")
plt.scatter(X[:, 0], X[:, 1])
plt.grid(linestyle='dotted')
plt.show()

# Define the output of interest and compute the gradients
func = partial(radial, normalizer=None, generatrix=lambda x: x)
f = func(X)
df = egrad(func)(X)

# Compute the active subspace
asub = ActiveSubspaces(dim=1)
asub.fit(gradients=df)
asub.plot_eigenvalues(figsize=(6, 4))
asub.plot_sufficient_summary(X, f, figsize=(6, 4))

# Use again parallel implementation of HMC sampling to compute the
# outputs of the optimal profile $g$ at the active components of
# the inputs X_test.
M_test = 60
X_test = chains(log_proposal,
                2,
                n_samples=30,
                epsilon=0.2,
                pr_args=(V, ),
                dim=2,
                n_burn=300)
f_test = func(X_test)
x_forward = asub.transform(X_test)[0]
Beispiel #7
0
 def test_plot_eigenvalues_01(self):
     ss = ActiveSubspaces()
     with self.assertRaises(ValueError):
         ss.plot_eigenvalues(figsize=(7, 7), title='Eigenvalues')
Beispiel #8
0
# nor = Normalizer(lb, ub)
# x = nor.fit_transform(x_raw)
x_raw = inputs_gaussian(n_samples, mean, cov)
x = (x_raw-mean).dot(linalg.sqrtm(np.linalg.inv(cov)))

# Define the output of interest and compute the gradients
# func = partial(output, normalizer=nor, r=generatrix)
func = sin_2d
f = func(x)
df = egrad(func)(x)

fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x[:,0], x[:,1], f)
plt.show()

# compute the active subspace
asub = ActiveSubspaces(dim=1, method='exact', n_boot=100)
asub.fit(gradients=df)

title = '2D sine'
asub.plot_eigenvalues(figsize=(6, 4), title=title)
print("Eigenvalues: {}".format(np.squeeze(asub.evals)))

asub.plot_eigenvectors(figsize=(6, 4), title=title)
asub.plot_sufficient_summary(x, f, figsize=(6, 4), title=title)

asub_2d = ActiveSubspaces(dim=2, method='exact', n_boot=100)
asub_2d.fit(gradients=df)
asub_2d.plot_sufficient_summary(x, f, figsize=(6, 4), title=title)
Beispiel #9
0
                obs=f)


#generate inputs, outputs, gradients
dist_inputs = pyro.distributions.MultivariateNormal(torch.zeros([input_dim]),
                                                    torch.eye(input_dim))
x = dist_inputs(sample_shape=torch.Size([n_samples]))
x.requires_grad = True
f = radial(x + torch.ones(input_dim), generatrix=lambda x: x)
f.backward(gradient=torch.ones([n_samples]))
df = x.grad

#search for an active subspace
ss = ActiveSubspaces(dim=1)
ss.fit(gradients=df.cpu().detach().numpy())
ss.plot_eigenvalues(figsize=(6, 4))
ss.plot_sufficient_summary(x.detach().numpy(),
                           f.detach().numpy(),
                           figsize=(6, 4))

kernel = GPy.kern.RBF(input_dim=1, ARD=True)
gp = GPy.models.GPRegression(
    ss.transform(x.detach().numpy())[0],
    f.reshape(-1, 1).detach().numpy(), kernel)
gp.optimize_restarts(5, verbose=False)

# Use No U-Turn Sampler (NUTS) Hamiltonian Monte Carlo to sample from the posterior of the original model.
#plain NUTS
num_chains = 1
num_samples = 100
kernel = NUTS(model)
Beispiel #10
0
#simulation parameters
np.random.seed(42)
n_samples = x_.shape[0]
input_dim = x_.shape[1]
d = fa_.shape[1]
dim = 1

#process data
x, f, df = x_, f_, df_
print("data", x.shape, f.shape, df.shape)

#AS
ss = ActiveSubspaces(dim=1)
ss.fit(inputs=x, outputs=f, gradients=df)
ss.plot_eigenvalues()
ss.plot_eigenvectors()
ss.plot_sufficient_summary(inputs=x, outputs=f)

## Active Subspaces with vectorial outputs
#process data
x, f, df = x_, fa_, dfa_.reshape(n_samples, d, input_dim)
print("data", x.shape, f.shape, df.shape)

#vectorial AS
vss = ActiveSubspaces(dim=5, n_boot=10)
vss.fit(inputs=x, outputs=f, gradients=df, metric=metric)
np.save("data/modes_AS", vss.W1)
vss.dim = 1
vss._partition()
vss.plot_eigenvalues()