def test(self, model, cases):
        sess = tf.get_default_session()
        guarantee_initialized_variables(sess)
        embeds = model.compute(model.embeds, cases)
        primitive_embeddings = RLongPrimitiveEmbeddings(6)

        # compute object embedding after applying projection
        object_projection_layer = model._object_projection_layer
        W, b = object_projection_layer.get_weights()  # shapes [10, 6] and [6]
        object_embed = np.ones(10).dot(W) + b

        assert_array_almost_equal(
            embeds[0],
            np.concatenate((np.zeros(6), primitive_embeddings['r'],
                            primitive_embeddings[-1])))

        assert_array_almost_equal(
            embeds[1],
            np.concatenate(
                (np.zeros(6), np.zeros(6), primitive_embeddings['X1/1'])))

        assert_array_almost_equal(
            embeds[2],
            np.concatenate(
                (primitive_embeddings['b'], object_embed, object_embed)))
Пример #2
0
 def test_outputs(self, model, inputs, output_tensors, outputs):
     """Test for correct output."""
     sess = tf.get_default_session()
     guarantee_initialized_variables(sess)
     args, kwargs = inputs
     test_outputs = model.compute(output_tensors, *args, **kwargs)
     assert_array_collections_equal(outputs, test_outputs, decimal=4)
Пример #3
0
    def __init__(self, config, save_dir):
        super(Experiment, self).__init__(config, save_dir)
        self.workspace.add_file('train_visualize', 'train_visualizer.txt')
        self.workspace.add_file('valid_visualize', 'valid_visualizer.txt')
        self.workspace.add_file('full_eval', 'full_eval_at_{step}.txt')
        self.workspace.add_file('codalab', 'codalab.json')
        self._domain = get_domain(config)

        self._train_parse_model = self._build_train_parse_model()
        self._decoder = self._build_decoder(self.train_parse_model)

        self._train_visualizer = Visualizer(self.decoder,
                                            self.workspace.train_visualize,
                                            'train',
                                            train=True)

        self._valid_visualizer = Visualizer(self.decoder,
                                            self.workspace.valid_visualize,
                                            'valid',
                                            train=False)

        # Reload weights if they exist. Otherwise, initialize weights.
        try:
            self.saver.restore()
            print 'Successfully reloaded the weights'
        except IOError:
            # NOTE: use this instead of tf.initialize_all_variables()!
            # That op will overwrite Keras initializations.
            sess = tf.get_default_session()
            guarantee_initialized_variables(sess)
            print 'Weights initialized'
Пример #4
0
    def test_restore(self, tmpdir, v):
        save_100_path = str(tmpdir.join('weights-100'))
        save_10_path = str(tmpdir.join('weights-10'))

        saver = Saver(str(tmpdir))
        assign_op = tf.assign(v, 12)

        sess = tf.get_default_session()
        guarantee_initialized_variables(sess)

        assert v.eval() == 5
        saver.save(100)  # save as step 100

        sess.run(assign_op)
        assert v.eval() == 12
        saver.save(10)  # save as step 10

        saver.restore()  # restores from the larger step number by default (100)
        assert v.eval() == 5  # restored

        saver.restore(10)  # force restore number 10
        assert v.eval() == 12

        saver.restore(save_100_path)
        assert v.eval() == 5

        # latest should be the largest step number, not necessarily last saved
        assert saver.latest_checkpoint == save_100_path
        assert os.path.exists(save_100_path)

        assert saver.checkpoint_paths == {
            10: save_10_path,
            100: save_100_path,
        }
Пример #5
0
def test_value_function(value_function, weights, dummy_cases, rewards):
    sess = tf.InteractiveSession()
    guarantee_initialized_variables(sess)

    fetch = {
        "loss": value_function._loss
    }

    feed_dict = value_function.inputs_to_feed_dict(dummy_cases, rewards)

    # Test that the loss decreases after taking a train step
    loss = sess.run(fetch, feed_dict=feed_dict)["loss"]
    values = value_function.values(dummy_cases)
    for i in xrange(10):
        vf_examples = [ValueFunctionExample(c, r) for c, r in zip(dummy_cases, rewards)]
        value_function.train_step(vf_examples)
    new_loss = sess.run(fetch, feed_dict=feed_dict)["loss"]
    new_values = value_function.values(dummy_cases)
    assert new_loss < loss

    # Test that the weights didn't propagate to the ParseModel
    fetch = {
        "weights": value_function._parse_model._weights
    }

    model_weights = sess.run(fetch, feed_dict=feed_dict)["weights"]
    assert np.array_equal(model_weights, weights)
    def test_restore(self, tmpdir, v):
        save_100_path = str(tmpdir.join('weights-100'))
        save_10_path = str(tmpdir.join('weights-10'))

        saver = Saver(str(tmpdir))
        assign_op = tf.assign(v, 12)

        sess = tf.get_default_session()
        guarantee_initialized_variables(sess)

        assert v.eval() == 5
        saver.save(100)  # save as step 100

        sess.run(assign_op)
        assert v.eval() == 12
        saver.save(10)  # save as step 10

        saver.restore(
        )  # restores from the larger step number by default (100)
        assert v.eval() == 5  # restored

        saver.restore(10)  # force restore number 10
        assert v.eval() == 12

        saver.restore(save_100_path)
        assert v.eval() == 5

        # latest should be the largest step number, not necessarily last saved
        assert saver.latest_checkpoint == save_100_path
        assert os.path.exists(save_100_path)

        assert saver.checkpoint_paths == {
            10: save_10_path,
            100: save_100_path,
        }
Пример #7
0
    def test(self, model, cases):
        sess = tf.get_default_session()
        guarantee_initialized_variables(sess)
        embeds = model.compute(model.embeds, cases)
        primitive_embeddings = RLongPrimitiveEmbeddings(6)

        # compute object embedding after applying projection
        object_projection_layer = model._object_projection_layer
        W, b = object_projection_layer.get_weights()  # shapes [10, 6] and [6]
        object_embed = np.ones(10).dot(W) + b

        assert_array_almost_equal(embeds[0],
                                  np.concatenate((np.zeros(6), primitive_embeddings['r'], primitive_embeddings[-1]))
                                  )

        assert_array_almost_equal(embeds[1],
                                  np.concatenate((np.zeros(6), np.zeros(6), primitive_embeddings['X1/1']))
                                  )

        assert_array_almost_equal(embeds[2],
                                  np.concatenate((primitive_embeddings['b'], object_embed, object_embed))
                                  )