def test_hermitian_tuple_scorer(): emb = [[1., 1, 0, 3], [0, 1, 0, 1], [-1, 1, 1, 5]] tuples_var = tf.Variable([[0, 1], [1, 0], [0, 2], [2, 0], [1, 2], [2, 1]]) (g, params) = hermitian_tuple_scorer(tuples_var, emb0=emb, symmetry_coef=(1.0, 0.0)) print(tf_eval(g)) (g, params) = hermitian_tuple_scorer(tuples_var, emb0=emb, symmetry_coef=(0.0, 1.0)) print(tf_eval(g)) (g, params) = hermitian_tuple_scorer(tuples_var, emb0=emb, symmetry_coef=(0.5, 0.5)) print(tf_eval(g)) (g, params) = hermitian_tuple_scorer(tuples_var, emb0=emb, symmetry_coef=(0.9, 0.1)) print(tf_eval(g)) # close to symmetric (g, params) = hermitian_tuple_scorer(tuples_var, emb0=emb, symmetry_coef=(0.1, 0.9)) print(tf_eval(g)) # close to anti-symmetric
def test_loss_func_softmax(): """Test the softmax computation Returns: Nothing >>> test_loss_func_softmax() [ 2.00384593 2.19245744 0.31507221 1.76036525] [ 0.20384566 0.59245753 1.31507206 0.26036522] """ pred = tf.Variable([[1.1, 2.1, 3.9], [-1.0, -0.5, -2.1], [1.1, 2.1, -3.9], [-1.0, 0.5, -2.1]]) gold = tf.Variable([1, 2, 1, 0]) print(tf_eval(loss_func_softmax(pred, gold))) gold = tf.Variable([2, 1, 0, 1]) print(tf_eval(loss_func_softmax(pred, gold)))
def test_sparse_relational_hermitian_scoring(): emb = tf.Variable([[1., 1, 0, 3], [0, 1, 0, 1], [-1, 1, 1, 5], [-3, 1, 0, 2], [-1, 2, -1, -5]]) tuples_var = tf.Variable([[0, 3, 1], [1, 3, 0], [0, 3, 2], [2, 4, 0], [1, 4, 2], [2, 4, 1]]) g = sparse_relational_hermitian_scoring(emb, tuples_var) print(tf_eval(g)) # close to anti-symmetric
def test_learning_factorization(verbose=False): n = 9 m = 8 y_mat = toy_factorization_problem(n=n, m=m, rk=3, noise=1) rank = 2 batch_size = n * m tuples = mat2tuples(y_mat) tuple_iterable = data_to_batches(tuples, minibatch_size=batch_size) # tuple_iterable = positive_and_negative_tuple_sampler(mat2tuples(y_mat), minibatch_size=batch_size) sampler, (x, y) = feed_dict_sampler(tuple_iterable, types=[np.int64, np.float32]) loss_op = tf.reduce_mean(loss_func(multilinear_tuple_scorer(x, rank=rank, n_emb=n+m)[0], y, 'quadratic')) initial_losses = [tf_eval(loss_op, f) for _, f in sampler] if verbose: print(initial_losses) # hooks = [lambda s, e, it, l: it and ((it % 100) == 0) and print("%d) loss=%f" % (it, l))] hooks = [lambda it, b, l: it and ((it % 1) == 0) and print("{0}) train loss={1}".format(it, l[0]))] emb, = learn(loss_op, sampler, tf.train.AdamOptimizer(learning_rate=0.1), hooks, max_epochs=200) # emb, = learn(l, sampler, tf.train.GradientDescentOptimizer(learning_rate=0.5), hooks, max_epochs=500) # emb, = learn(l, sampler, tf.train.AdagradOptimizer(0.01, initial_accumulator_value=0.01), hooks, max_epochs=500) mat0 = svd_factorize_matrix(y_mat, rank=2) # exact svd solution mat = emb[:n, :].dot(emb[n:, :].T) if verbose: print(np.linalg.norm(mat - mat0)) assert(np.linalg.norm(mat - mat0) < 1e-3)
def test_learning_factorization(verbose=False): n = 9 m = 8 y_mat = toy_factorization_problem(n=n, m=m, rk=3, noise=1) rank = 2 batch_size = n * m tuples = mat2tuples(y_mat) tuple_iterable = data_to_batches(tuples, minibatch_size=batch_size) # tuple_iterable = positive_and_negative_tuple_sampler(mat2tuples(y_mat), minibatch_size=batch_size) sampler, (x, y) = feed_dict_sampler(tuple_iterable, types=[np.int64, np.float32]) loss_op = tf.reduce_mean( loss_func( multilinear_tuple_scorer(x, rank=rank, n_emb=n + m)[0], y, 'quadratic')) initial_losses = [tf_eval(loss_op, f) for _, f in sampler] if verbose: print(initial_losses) # hooks = [lambda s, e, it, l: it and ((it % 100) == 0) and print("%d) loss=%f" % (it, l))] hooks = [ lambda it, b, l: it and ( (it % 1) == 0) and print("{0}) train loss={1}".format(it, l[0])) ] emb, = learn(loss_op, sampler, tf.train.AdamOptimizer(learning_rate=0.1), hooks, max_epochs=200) # emb, = learn(l, sampler, tf.train.GradientDescentOptimizer(learning_rate=0.5), hooks, max_epochs=500) # emb, = learn(l, sampler, tf.train.AdagradOptimizer(0.01, initial_accumulator_value=0.01), hooks, max_epochs=500) mat0 = svd_factorize_matrix(y_mat, rank=2) # exact svd solution mat = emb[:n, :].dot(emb[n:, :].T) if verbose: print(np.linalg.norm(mat - mat0)) assert (np.linalg.norm(mat - mat0) < 1e-3)