Exemple #1
0
 def test_plot_sufficient_summary_02(self):
     np.random.seed(42)
     gradients = np.random.uniform(-1, 1, 200).reshape(50, 4)
     weights = np.ones((50, 1)) / 50
     ss = ActiveSubspaces(dim=3, n_boot=200)
     ss.fit(gradients=gradients, weights=weights)
     with self.assertRaises(ValueError):
         ss.plot_sufficient_summary(10, 10)
Exemple #2
0
 def test_plot_sufficient_summary_04(self):
     np.random.seed(42)
     gradients = np.random.uniform(-1, 1, 200).reshape(50, 4)
     ss = ActiveSubspaces(dim=1, n_boot=100)
     ss.fit(gradients=gradients)
     with assert_plot_figures_added():
         ss.plot_sufficient_summary(
             np.random.uniform(-1, 1, 100).reshape(25, 4),
             np.random.uniform(-1, 1, 25).reshape(-1, 1))
Exemple #3
0
 def test_plot_sufficient_summary_03(self):
     np.random.seed(42)
     gradients = np.random.uniform(-1, 1, 200).reshape(50, 4)
     weights = np.ones((50, 1)) / 50
     ss = ActiveSubspaces()
     ss.compute(gradients=gradients, weights=weights, nboot=200)
     ss.partition(2)
     with assert_plot_figures_added():
         ss.plot_sufficient_summary(
             np.random.uniform(-1, 1, 100).reshape(25, 4),
             np.random.uniform(-1, 1, 25).reshape(-1, 1))
Exemple #4
0
plt.figure(figsize=(6, 4))
plt.title("Inputs distribution")
plt.scatter(X[:, 0], X[:, 1])
plt.grid(linestyle='dotted')
plt.show()

# Define the output of interest and compute the gradients
func = partial(radial, normalizer=None, generatrix=lambda x: x)
f = func(X)
df = egrad(func)(X)

# Compute the active subspace
asub = ActiveSubspaces(dim=1)
asub.fit(gradients=df)
asub.plot_eigenvalues(figsize=(6, 4))
asub.plot_sufficient_summary(X, f, figsize=(6, 4))

# Use again parallel implementation of HMC sampling to compute the
# outputs of the optimal profile $g$ at the active components of
# the inputs X_test.
M_test = 60
X_test = chains(log_proposal,
                2,
                n_samples=30,
                epsilon=0.2,
                pr_args=(V, ),
                dim=2,
                n_burn=300)
f_test = func(X_test)
x_forward = asub.transform(X_test)[0]
optimal_profile = np.array(
Exemple #5
0
 def test_plot_sufficient_summary_01(self):
     ss = ActiveSubspaces()
     with self.assertRaises(ValueError):
         ss.plot_sufficient_summary(10, 10)
Exemple #6
0
# nor = Normalizer(lb, ub)
# x = nor.fit_transform(x_raw)
x_raw = inputs_gaussian(n_samples, mean, cov)
x = (x_raw-mean).dot(linalg.sqrtm(np.linalg.inv(cov)))

# Define the output of interest and compute the gradients
# func = partial(output, normalizer=nor, r=generatrix)
func = sin_2d
f = func(x)
df = egrad(func)(x)

fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x[:,0], x[:,1], f)
plt.show()

# compute the active subspace
asub = ActiveSubspaces(dim=1, method='exact', n_boot=100)
asub.fit(gradients=df)

title = '2D sine'
asub.plot_eigenvalues(figsize=(6, 4), title=title)
print("Eigenvalues: {}".format(np.squeeze(asub.evals)))

asub.plot_eigenvectors(figsize=(6, 4), title=title)
asub.plot_sufficient_summary(x, f, figsize=(6, 4), title=title)

asub_2d = ActiveSubspaces(dim=2, method='exact', n_boot=100)
asub_2d.fit(gradients=df)
asub_2d.plot_sufficient_summary(x, f, figsize=(6, 4), title=title)
Exemple #7
0
#generate inputs, outputs, gradients
dist_inputs = pyro.distributions.MultivariateNormal(torch.zeros([input_dim]),
                                                    torch.eye(input_dim))
x = dist_inputs(sample_shape=torch.Size([n_samples]))
x.requires_grad = True
f = radial(x + torch.ones(input_dim), generatrix=lambda x: x)
f.backward(gradient=torch.ones([n_samples]))
df = x.grad

#search for an active subspace
ss = ActiveSubspaces(dim=1)
ss.fit(gradients=df.cpu().detach().numpy())
ss.plot_eigenvalues(figsize=(6, 4))
ss.plot_sufficient_summary(x.detach().numpy(),
                           f.detach().numpy(),
                           figsize=(6, 4))

kernel = GPy.kern.RBF(input_dim=1, ARD=True)
gp = GPy.models.GPRegression(
    ss.transform(x.detach().numpy())[0],
    f.reshape(-1, 1).detach().numpy(), kernel)
gp.optimize_restarts(5, verbose=False)

# Use No U-Turn Sampler (NUTS) Hamiltonian Monte Carlo to sample from the posterior of the original model.
#plain NUTS
num_chains = 1
num_samples = 100
kernel = NUTS(model)
mcmc = MCMC(kernel,
            num_samples=num_samples,
Exemple #8
0
    #output values (f) and gradients (df)
    func = partial(radial, normalizer=nor, generatrix=lambda x: np.cos(x))
    f = func(x)
    df = egrad(func)(x)
    return x, f, df


xx, f, df = sample_in_out(input_dim, n_samples)
y, t, dt = sample_in_out(input_dim, N)

#AS
ss = ActiveSubspaces(1)
ss.fit(gradients=dt, outputs=t, inputs=y)
ss.plot_eigenvalues(figsize=(6, 4))
ss.plot_sufficient_summary(y, t, figsize=(6, 4))

# number of parameters of the spectral distribution associated to the feature map
# this is the number of parameters to tune after
n_params = 1

# sample the bias term
b = np.random.uniform(0, 2 * np.pi, n_features)

# define the feature map
fm = FeatureMap(distr='laplace',
                bias=b,
                input_dim=input_dim,
                n_features=n_features,
                params=np.zeros(n_params),
                sigma_f=f.var())
Exemple #9
0
np.random.seed(42)
n_samples = x_.shape[0]
input_dim = x_.shape[1]
d = fa_.shape[1]
dim = 1

#process data
x, f, df = x_, f_, df_
print("data", x.shape, f.shape, df.shape)

#AS
ss = ActiveSubspaces(dim=1)
ss.fit(inputs=x, outputs=f, gradients=df)
ss.plot_eigenvalues()
ss.plot_eigenvectors()
ss.plot_sufficient_summary(inputs=x, outputs=f)

## Active Subspaces with vectorial outputs
#process data
x, f, df = x_, fa_, dfa_.reshape(n_samples, d, input_dim)
print("data", x.shape, f.shape, df.shape)

#vectorial AS
vss = ActiveSubspaces(dim=5, n_boot=10)
vss.fit(inputs=x, outputs=f, gradients=df, metric=metric)
np.save("data/modes_AS", vss.W1)
vss.dim = 1
vss._partition()
vss.plot_eigenvalues()
vss.plot_eigenvectors()
Exemple #10
0
func = sin_2d
dfunc = egrad(func)

f = func(x)
df_exact = dfunc(x)  # exact gradients
df_gp = eval_gp_grad(
    x, f, n_samples,
    input_dim)  # gradients approximated with Gaussian process regression
title = "sin"

# Compute the active subspace with approximated gradients
asub1 = ActiveSubspaces(dim=1, method='exact', n_boot=100)
asub1.fit(gradients=df_gp)
asub1.plot_eigenvalues(figsize=(6, 4), title=title)
asub1.plot_eigenvectors(figsize=(6, 4), title=title)
asub1.plot_sufficient_summary(x, f, figsize=(6, 4), title=title)

asub2 = ActiveSubspaces(dim=2, method='exact', n_boot=100)
asub2.fit(gradients=df_gp)
asub2.plot_sufficient_summary(x, f, figsize=(6, 4), title=title)

# Compute the active subspace with exact gradients
asub1_ = ActiveSubspaces(dim=1, method='exact', n_boot=100)
asub1_.fit(gradients=df_exact)
asub1_.plot_eigenvalues(figsize=(6, 4), title=title)
asub1_.plot_eigenvectors(figsize=(6, 4), title=title)
asub1_.plot_sufficient_summary(x, f, figsize=(6, 4), title=title)

asub2_ = ActiveSubspaces(dim=2, method='exact', n_boot=100)
asub2_.fit(gradients=df_exact)
asub2_.plot_sufficient_summary(x, f, figsize=(6, 4), title=title)
Exemple #11
0
np.random.seed(42)

# global parameters
n_train = 300
n_params = 2

x_np = np.random.uniform(size=(n_train, n_params))
f = x_np[:, 0]**3 + x_np[:, 1]**3 + 0.2 * x_np[:, 0] + 0.6 * x_np[:, 1]
df_np = np.empty((n_train, n_params))
df_np[:, 0] = 3.0 * x_np[:, 0]**2 + 0.2
df_np[:, 1] = 3.0 * x_np[:, 1]**2 + 0.6

ss = ActiveSubspaces(1)
ss.fit(inputs=x_np, gradients=df_np)
ss.plot_eigenvalues(figsize=(6, 4))
ss.plot_sufficient_summary(x_np, f, figsize=(6, 4))

nll = NonlinearLevelSet(
    n_layers=10,
    active_dim=1,
    lr=0.008,
    epochs=1000,
    dh=0.25,
    optimizer=torch.optim.SGD,
    scheduler=torch.optim.lr_scheduler.StepLR,
)
x_torch = torch.as_tensor(x_np, dtype=torch.double)
df_torch = torch.as_tensor(df_np, dtype=torch.double)
nll.train(inputs=x_torch,
          gradients=df_torch,
          outputs=f.reshape(-1, 1),