def diff_argsort(matrices, x, sigma=0.1, softmax=softmax): """Return the smoothed, differentiable ranking of each element of x. Sigma specifies the smoothing of the ranking. """ sortd = diff_sort(matrices, x, softmax) return order_matrix(x, sortd, sigma=sigma) @ ( torch.arange(len(x), dtype=torch.float).to(x.get_device()) )
def test_network(): for n in [2, 4, 8, 16, 32, 64, 128]: matrices = bitonic_matrices(n) for i in range(5): for r in [0.01, 1, 10, 100, 1000]: vec = np.random.uniform(-r, r, n) # verify exact sort is working assert np.allclose(np.sort(vec), diff_sort(matrices, vec, np.maximum))
def test_forms(): for n in [2, 4, 8, 16, 32, 64, 128, 256, 512]: woven = bitonic_woven_matrices(n) indices = bitonic_indices(n) matrices = bitonic_matrices(n) for i in range(20): test = np.random.randint(-200, 200, n) truth = np.sort(test) assert np.all(diff_sort(matrices, truth, np.maximum) == truth) assert np.all(diff_sort_weave(woven, truth, np.maximum) == truth) assert np.all(diff_sort_indexed(indices, truth, np.maximum) == truth)
def test_sort(): for n in [2, 4, 8, 16, 32, 64, 128]: matrices = bitonic_matrices(n) for i in range(5): for dtype in [np.float32, np.float64]: for r in [10, 200, 250]: vec = np.random.uniform(-r, r, n).astype(dtype) for max_fn in maxes: sorted_vec = diff_sort(matrices, vec, max_fn) truth = np.sort(vec) # check error is (roughly) bounded in this range assert np.max(np.abs(truth - sorted_vec)) < r / 2
def test_sorting(): # convert to TF tensors dtype = tf.float64 tf_matrices = bitonic_matrices(8) for max_fn in [softmax, smoothmax, softmax_smooth]: test = to_tf(np.random.randint(-200, 200, 8), dtype=dtype) tf_output = tf.reshape(diff_sort(tf_matrices, test), (-1,)) tf_ranks = diff_argsort(tf_matrices, test) tf_argsort = diff_argsort(tf_matrices, test, transpose=True) tf_grads = tf.squeeze(jacobian(tf_output, test)) # compute output and gradient with tf.Session() as s: s.run((tf_output, tf_grads, tf_ranks, tf_argsort))