def test_log_likelihood_sym(self, output_dim, input_shape): bmr = BernoulliMLPRegressorWithModel(input_shape=(input_shape[1], ), output_dim=output_dim) new_xs_var = tf.placeholder(tf.float32, input_shape) new_ys_var = tf.placeholder(dtype=tf.float32, name='ys', shape=(None, output_dim)) data = np.full(input_shape, 0.5) one_hot_label = np.zeros((input_shape[0], output_dim)) one_hot_label[np.arange(input_shape[0]), 0] = 1 p = bmr._f_prob(np.asarray(data)) ll = bmr._dist.log_likelihood(np.asarray(one_hot_label), dict(p=p)) outputs = bmr.log_likelihood_sym(new_xs_var, new_ys_var, name='ll_sym') ll_from_sym = self.sess.run(outputs, feed_dict={ new_xs_var: data, new_ys_var: one_hot_label }) assert np.allclose(ll, ll_from_sym, rtol=0, atol=1e-5)
def test_fit_normalized(self, input_shape, output_dim): bmr = BernoulliMLPRegressorWithModel(input_shape=input_shape, output_dim=output_dim) observations, returns = get_train_data(input_shape, output_dim) for _ in range(150): bmr.fit(observations, returns) paths, expected = get_test_data(input_shape, output_dim) prediction = np.cast['int'](bmr.predict(paths['observations'])) assert np.allclose(prediction, expected, rtol=0, atol=0.1) x_mean = self.sess.run(bmr.model.networks['default'].x_mean) x_mean_expected = np.mean(observations, axis=0, keepdims=True) x_std = self.sess.run(bmr.model.networks['default'].x_std) x_std_expected = np.std(observations, axis=0, keepdims=True) assert np.allclose(x_mean, x_mean_expected) assert np.allclose(x_std, x_std_expected)
def test_is_pickleable2(self): bmr = BernoulliMLPRegressorWithModel(input_shape=(1, ), output_dim=2) with tf.variable_scope( 'BernoulliMLPRegressorWithModel/NormalizedInputMLPModel', reuse=True): x_mean = tf.get_variable('normalized_vars/x_mean') x_mean.load(tf.ones_like(x_mean).eval()) x1 = bmr.model.networks['default'].x_mean.eval() h = pickle.dumps(bmr) with tf.Session(graph=tf.Graph()): bmr_pickled = pickle.loads(h) x2 = bmr_pickled.model.networks['default'].x_mean.eval() assert np.array_equal(x1, x2)
def test_is_pickleable(self): bmr = BernoulliMLPRegressorWithModel(input_shape=(1, ), output_dim=2) with tf.variable_scope( 'BernoulliMLPRegressorWithModel/NormalizedInputMLPModel', reuse=True): bias = tf.get_variable('mlp/hidden_0/bias') bias.load(tf.ones_like(bias).eval()) bias1 = bias.eval() result1 = np.cast['int'](bmr.predict(np.ones((1, 1)))) h = pickle.dumps(bmr) with tf.Session(graph=tf.Graph()): bmr_pickled = pickle.loads(h) result2 = np.cast['int'](bmr_pickled.predict(np.ones((1, 1)))) assert np.array_equal(result1, result2) with tf.variable_scope( 'BernoulliMLPRegressorWithModel/NormalizedInputMLPModel', reuse=True): bias2 = tf.get_variable('mlp/hidden_0/bias').eval() assert np.array_equal(bias1, bias2)
def test_optimizer_args(self, mock_cg, mock_lbfgs): lbfgs_args = dict(max_opt_itr=25) cg_args = dict(cg_iters=15) bmr = BernoulliMLPRegressorWithModel( input_shape=(1, ), output_dim=2, optimizer=LbfgsOptimizer, optimizer_args=lbfgs_args, tr_optimizer=ConjugateGradientOptimizer, tr_optimizer_args=cg_args, use_trust_region=True) assert mock_lbfgs.return_value is bmr._optimizer assert mock_cg.return_value is bmr._tr_optimizer mock_lbfgs.assert_called_with(max_opt_itr=25) mock_cg.assert_called_with(cg_iters=15)