示例#1
0
def test_symmetry(P, D):
    dnn = Dnn()
    layer = DenseLayer(P * D, 1)
    dnn.add_layer(layer)

    sdnn = InputSorter(dnn)

    # Random system, sorted by distance to origin.
    s = np.random.randn(P, D)
    s = s[np.argsort([row.dot(row) for row in s])]

    # Limit the number of permutations, for computational feasibility.
    for _, p in zip(range(100), permutations(s)):
        p = np.asarray(p)

        # Permutation invariant:
        assert sdnn(s) == sdnn(p)

        # Values should be equal to dnn applied to the pre-sorted system.
        assert sdnn(p) == dnn(s)
        assert sdnn.laplacian(p) == dnn.laplacian(s)
        assert all(sdnn.gradient(p) == dnn.gradient(s))

        # Drift force should have equal values, but permuted according to the permutation p.
        # This because the drift force depends on which positions are which. Values should
        # still be the same, so we only allow for sequence particle-wise permutations.
        sdnn_drift, dnn_drift = (
            sdnn.drift_force(p).reshape(s.shape),
            dnn.drift_force(s).reshape(s.shape),
        )
        sdnn_drift.sort(axis=0)
        dnn_drift.sort(axis=0)
        assert (sdnn_drift == dnn_drift).all()
示例#2
0
system = np.empty((P, D))
H = CoulombHarmonicOscillator()

# Wave functions:
simple_gaussian = SimpleGaussian(alpha=0.5)
jastrow = JastrowPade(alpha=1, beta=1)
simple_and_jastrow = WavefunctionProduct(simple_gaussian, jastrow)

layers = [
    DenseLayer(P * D, 32, activation=tanh, scale_factor=0.001),
    DenseLayer(32, 16, activation=tanh),
    DenseLayer(16, 1, activation=exponential),
]
dnn = Dnn()
for l in layers:
    dnn.add_layer(l)
psi = WavefunctionProduct(simple_and_jastrow, dnn)
psi_sampler = ImportanceSampler(system, psi, step_size=0.1)

# Sorted
simple_gaussian2 = SimpleGaussian(alpha=0.5)
jastrow2 = JastrowPade(alpha=1, beta=1)
simple_and_jastrow2 = WavefunctionProduct(simple_gaussian2, jastrow2)

layers2 = [
    DenseLayer(P * D, 32, activation=tanh, scale_factor=0.001),
    DenseLayer(32, 16, activation=tanh),
    DenseLayer(16, 1, activation=exponential),
]
dnn2 = Dnn()
for l in layers2:
示例#3
0
class TestDnn(unittest.TestCase):
    def setUp(self):
        auto_np.random.seed(1234)
        self.nn = Dnn()
        self.input_layer = DenseLayer(2, 3, activation=sigmoid)
        self.middle_layer = DenseLayer(3, 4, activation=relu)
        self.output_layer = DenseLayer(4, 1)
        self.nn.add_layer(self.input_layer)
        self.nn.add_layer(self.middle_layer)
        self.nn.add_layer(self.output_layer)

        self.W1 = self.nn.layers[0].weights
        self.b1 = self.nn.layers[0].biases
        self.W2 = self.nn.layers[1].weights
        self.b2 = self.nn.layers[1].biases
        self.W3 = self.nn.layers[2].weights
        self.b3 = self.nn.layers[2].biases

        self.params = [self.W1, self.b1, self.W2, self.b2, self.W3, self.b3]

        def f(x, w1, b1, w2, b2, w3, b3):
            z1 = x @ w1 + b1
            z2 = sigmoid_np(z1) @ w2 + b2
            z3 = relu_np(z2) @ w3 + b3
            return z3

        self.f_np = f

    def test_evaluate(self):
        for _ in range(10):
            for x in auto_np.random.randn(500, 2):
                np.testing.assert_almost_equal(self.f_np(x, *self.params),
                                               self.nn(x))

    def test_parameter_gradients(self):
        for _ in range(10):
            x = auto_np.random.randn(1, 2)
            output = self.nn(x)
            grads = self.nn.gradient(x)

            # Compute gradients using autograd
            auto_grads = []
            for i in range(len(self.nn.layers)):
                W_grad = elementwise_grad(self.f_np, 1 + 2 * i)(x,
                                                                *self.params)
                b_grad = elementwise_grad(self.f_np,
                                          1 + 2 * i + 1)(x, *self.params)

                auto_grads.extend(W_grad.ravel())
                auto_grads.extend(b_grad.ravel())

                # Test each indivudual layer
                np.testing.assert_almost_equal(
                    W_grad, self.nn.layers[i].weights_gradient)
                np.testing.assert_almost_equal(b_grad,
                                               self.nn.layers[i].bias_gradient)

            # Test extraction of full gradient vector
            np.testing.assert_almost_equal(auto_grads, grads * output)

    def test_drift_force(self):
        for _ in range(10):
            x = auto_np.random.randn(1, 2)

            # Autograd computes gradient per row of x. Want the sum over rows.
            auto_gradient = np.sum(elementwise_grad(self.f_np,
                                                    0)(x, *self.params),
                                   axis=0)

            np.testing.assert_almost_equal(
                auto_gradient,
                self.nn.drift_force(x) * self.nn(x) / 2)

    def test_laplace(self):
        # Autograd makes some warnings about code that is not ours. Ignore them here.
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=FutureWarning)

            hess = hessian(self.f_np)
            for _ in range(10):
                x = auto_np.random.randn(
                    1, 2)  # Autograd hessian slow, less testing.
                output = self.nn(x)

                # Need to feed autograd hessian one row at a time and sum results.
                expected = sum(
                    np.trace(hess(x[i], *self.params)[0])
                    for i in range(x.shape[0]))

                self.assertAlmostEqual(expected, self.nn.laplacian(x) * output)