Esempio n. 1
0
    def test_fit_normalized(self, input_shape, output_dim):
        bmr = BernoulliMLPRegressor(input_shape=input_shape,
                                    output_dim=output_dim)

        observations, returns = get_train_data(input_shape, output_dim)

        for _ in range(150):
            bmr.fit(observations, returns)

        paths, expected = get_test_data(input_shape, output_dim)

        prediction = np.cast['int'](bmr.predict(paths['observations']))
        assert np.allclose(prediction, expected, rtol=0, atol=0.1)

        x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
        x_mean_expected = np.mean(observations, axis=0, keepdims=True)
        x_std = self.sess.run(bmr.model._networks['default'].x_std)
        x_std_expected = np.std(observations, axis=0, keepdims=True)

        assert np.allclose(x_mean, x_mean_expected)
        assert np.allclose(x_std, x_std_expected)
Esempio n. 2
0
    def test_is_pickleable(self):
        bmr = BernoulliMLPRegressor(input_shape=(1, ), output_dim=2)

        with tf.compat.v1.variable_scope(
                'BernoulliMLPRegressor/NormalizedInputMLPModel', reuse=True):
            bias = tf.compat.v1.get_variable('mlp/hidden_0/bias')
        bias.load(tf.ones_like(bias).eval())
        bias1 = bias.eval()

        result1 = np.cast['int'](bmr.predict(np.ones((1, 1))))
        h = pickle.dumps(bmr)

        with tf.compat.v1.Session(graph=tf.Graph()):
            bmr_pickled = pickle.loads(h)
            result2 = np.cast['int'](bmr_pickled.predict(np.ones((1, 1))))
            assert np.array_equal(result1, result2)

            with tf.compat.v1.variable_scope(
                    'BernoulliMLPRegressor/NormalizedInputMLPModel',
                    reuse=True):
                bias2 = tf.compat.v1.get_variable('mlp/hidden_0/bias').eval()

            assert np.array_equal(bias1, bias2)