def test_update_precedence_vector(self): graph = tf.Graph() with graph.as_default(): with tf.Session(graph=graph) as session: mem = Memory(4, 5, 2, 2) write_weighting = random_softmax((2, 4), axis=1) initial_precedence = random_softmax((2, 4), axis=1) predicted = (1 - write_weighting.sum(axis=1, keepdims=True) ) * initial_precedence + write_weighting mem.precedence_vector = tf.convert_to_tensor( initial_precedence) op = mem.update_precedence_vector(write_weighting) p = session.run(op) #updated_precedence_vector = session.run(mem.precedence_vector.value()) self.assertEqual(p.shape, (2, 4)) self.assertTrue(np.allclose(p, predicted))
def test_update_link_matrix(self): graph = tf.Graph() with graph.as_default(): with tf.Session(graph=graph) as session: mem = Memory(4, 5, 2, 2) _write_weighting = random_softmax((2, 4), axis=1) _precedence_vector = random_softmax((2, 4), axis=1) initial_link = np.random.uniform(0, 1, (2, 4, 4)).astype(np.float32) np.fill_diagonal(initial_link[0, :], 0) np.fill_diagonal(initial_link[1, :], 0) # calculate the updated link iteratively as in paper # to check the correcteness of the vectorized implementation predicted = np.zeros((2, 4, 4), dtype=np.float32) for i in range(4): for j in range(4): if i != j: reset_factor = (1 - _write_weighting[:, i] - _write_weighting[:, j]) predicted[:, i, j] = reset_factor * initial_link[:, i, j] + _write_weighting[:, i] * _precedence_vector[:, j] mem.link_matrix = tf.convert_to_tensor(initial_link) mem.precedence_vector = tf.convert_to_tensor( _precedence_vector) write_weighting = tf.constant(_write_weighting) op = mem.update_link_matrix(write_weighting) L = session.run(op) #updated_link_matrix = session.run(mem.link_matrix.value()) self.assertTrue(np.allclose(L, predicted))