def diff_sort_weave(matrices, x, softmax=softmax): """ Given a set of bitonic sort matrices generated by bitonic_woven_matrices(n), sort a sequence x of length n. """ split = len(x) // 2 for weave, unweave in matrices: woven = weave @ x a, b = woven[:split], woven[split:] mx = softmax(a, b) mn = a + b - mx x = unweave @ np.concatenate([mn, mx]) return x
def diff_sort_indexed(indices, x, softmax=softmax): """ Given a set of bitonic sort indices generated by bitonic_indices(n), sort a sequence x of length n. """ split = len(x) // 2 for weave, unweave in indices: woven = x[weave] a, b = woven[:split], woven[split:] mx = softmax(a, b) mn = a + b - mx x = np.concatenate([mn, mx])[unweave] return x
def diff_sort_weave(fused, x, softmax=softmax): """ Given a set of bitonic sort matrices generated by bitonic_woven_matrices(n), sort a sequence x of length n. """ split = len(x) // 2 x = fused[0] @ x for mat in fused[1:]: a, b = x[:split], x[split:] mx = softmax(a, b) mn = a + b - mx x = mat @ np.concatenate([mn, mx]) return x
def diff_sort_weave(fused, x, softmax=softmax, beta=0.0): """ Given a set of bitonic sort matrices generated by bitonic_woven_matrices(n), sort a sequence x of length n. beta specifies interpolation between true permutations (beta=0.0) and leaving the values unchanged (beta=1.0) """ i = np.eye(len(x)) split = len(x) // 2 x = ((beta * i) + (1 - beta) * fused[0]) @ x for mat in fused[1:]: a, b = x[:split], x[split:] mx = softmax(a, b) mn = a + b - mx x = (beta * i + (1 - beta) * mat) @ np.concatenate([mn, mx]) return x
def concatenate(values, axis=0): """Concatenate a sequence of tensors along the specified axis. .. warning:: Tensors that are incompatible (such as Torch and TensorFlow tensors) cannot both be present. Args: values (Sequence[tensor_like]): Sequence of tensor-like objects to concatenate. The objects must have the same shape, except in the dimension corresponding to axis (the first, by default). axis (int): The axis along which the input tensors are concatenated. If axis is None, tensors are flattened before use. Default is 0. Returns: tensor_like: The concatenated tensor. **Example** >>> x = tf.constant([0.6, 0.1, 0.6]) >>> y = tf.Variable([0.1, 0.2, 0.3]) >>> z = np.array([5., 8., 101.]) >>> concatenate([x, y, z]) <tf.Tensor: shape=(3, 3), dtype=float32, numpy= array([6.00e-01, 1.00e-01, 6.00e-01, 1.00e-01, 2.00e-01, 3.00e-01, 5.00e+00, 8.00e+00, 1.01e+02], dtype=float32)> """ interface = _multi_dispatch(values) if interface == "torch": import torch if axis is None: # flatten and then concatenate zero'th dimension # to reproduce numpy's behaviour values = [np.flatten(torch.as_tensor(t)) for t in values] axis = 0 else: values = [torch.as_tensor(t) for t in values] if interface == "tensorflow" and axis is None: # flatten and then concatenate zero'th dimension # to reproduce numpy's behaviour values = [np.flatten(np.array(t)) for t in values] axis = 0 return np.concatenate(values, axis=axis, like=interface)