def bitonic_woven_matrices(n): """ Combine the l,r and l_inv, r_inv matrices into single n x n multiplies, for use with bisort_weave/diff_bisort_weave, fusing together consecutive stages. This reduces the number of multiplies to (k)(k+1) + 1 multiplies, where k=np.log2(n) """ layers = int(np.log2(n)) matrices = [] last_unweave = np.eye(n) for n, m, layer in bitonic_layer_loop(n): weave, unweave = np.zeros((n, n)), np.zeros((n, n)) for a, b, out, swap in bitonic_swap_loop(n, m, layer): weave[out, a] = 1 weave[out + n // 2, b] = 1 # flip comparison order as needed if swap: a, b = b, a unweave[a, out] = 1 unweave[b, out + n // 2] = 1 # fuse the unweave and weave steps matrices.append(weave @ last_unweave) last_unweave = unweave # make sure the last unweave is preserved matrices.append(last_unweave) return matrices
def bitonic_indices(n): """Compute a set of bitonic sort indices to sort a sequence of length n. n *must* be a power of 2. As opposed to the matrix operations, this requires only two index vectors of length n for each layer of the network. """ # number of outer layers layers = int(np.log2(n)) indices = [] for layer in range(1, layers + 1): # we have 1..layer sub layers for sub in reversed(range(1, layer + 1)): weave = np.zeros(n, dtype='i4') unweave = np.zeros(n, dtype='i4') out = 0 for i in range(0, n, 2**sub): for j in range(2**(sub - 1)): ix = i + j a, b = ix, ix + (2**(sub - 1)) weave[out] = a weave[out + n // 2] = b if (ix >> layer) & 1: a, b = b, a unweave[a] = out unweave[b] = out + n // 2 out += 1 indices.append((weave, unweave)) return indices
def bitonic_woven_matrices_alt(n): """ Alternative direct implementation of bitonic_woven_matrices. """ layers = int(np.log2(n)) matrices = [] n2 = n // 2 last_unweave = np.eye(n) for layer in range(layers): for s in range(layer + 1): m = 1 << (layer - s) weave, unweave = np.zeros((n, n)), np.zeros((n, n)) out = 0 for i in range(0, n, m << 1): for j in range(m): ix = i + j a, b = ix, ix + m weave[out, a] = 1 weave[out + n // 2, b] = 1 if (ix >> (layer + 1)) & 1: a, b = b, a unweave[a, out] = 1 unweave[b, out + n // 2] = 1 out += 1 matrices.append(weave @ last_unweave) last_unweave = unweave matrices.append(last_unweave) return matrices
def bitonic_matrices(n): """Compute a set of bitonic sort matrices to sort a sequence of length n. n *must* be a power of 2. See: https://en.wikipedia.org/wiki/Bitonic_sorter Set k=log2(n). There will be k "layers", i=1, 2, ... k Each ith layer will have i sub-steps, so there are (k*(k+1)) / 2 sorting steps total. For each step, we compute 4 matrices. l and r are binary matrices of size (k/2, k) and map_l and map_r are matrices of size (k, k/2). l and r "interleave" the inputs into two k/2 size vectors. map_l and map_r "uninterleave" these two k/2 vectors back into two k sized vectors that can be summed to get the correct output. The result is such that to apply any layer's sorting, we can perform: l, r, map_l, map_r = layer[j] a, b = l @ y, r @ y permuted = map_l @ np.minimum(a, b) + map_r @ np.maximum(a,b) Applying this operation for each layer in sequence sorts the input vector. """ # number of outer layers layers = int(np.log2(n)) matrices = [] for layer in range(1, layers + 1): # we have 1..layer sub layers for sub in reversed(range(1, layer + 1)): l, r = np.zeros((n // 2, n)), np.zeros((n // 2, n)) map_l, map_r = np.zeros((n, n // 2)), np.zeros((n, n // 2)) out = 0 for i in range(0, n, 2**sub): for j in range(2**(sub - 1)): ix = i + j a, b = ix, ix + (2**(sub - 1)) l[out, a] = 1 r[out, b] = 1 if (ix >> layer) & 1: a, b = b, a map_l[a, out] = 1 map_r[b, out] = 1 out += 1 matrices.append((l, r, map_l, map_r)) return matrices
def bitonic_indices(n): """Compute a set of bitonic sort indices to sort a sequence of length n. n *must* be a power of 2. As opposed to the matrix operations, this requires only two index vectors of length n for each layer of the network. """ # number of outer layers layers = int(np.log2(n)) indices = [] for n, m, layer in bitonic_layer_loop(n): weave = np.zeros(n, dtype="i4") unweave = np.zeros(n, dtype="i4") for a, b, out, swap in bitonic_swap_loop(n, m, layer): weave[out] = a weave[out + n // 2] = b if swap: a, b = b, a unweave[a] = out unweave[b] = out + n // 2 indices.append((weave, unweave)) return indices
def marginal_prob(prob, axis): """Compute the marginal probability given a joint probability distribution expressed as a tensor. Each random variable corresponds to a dimension. If the distribution arises from a quantum circuit measured in computational basis, each dimension corresponds to a wire. For example, for a 2-qubit quantum circuit `prob[0, 1]` is the probability of measuring the first qubit in state 0 and the second in state 1. Args: prob (tensor_like): 1D tensor of probabilities. This tensor should of size ``(2**N,)`` for some integer value ``N``. axis (list[int]): the axis for which to calculate the marginal probability distribution Returns: tensor_like: the marginal probabilities, of size ``(2**len(axis),)`` **Example** >>> x = tf.Variable([1, 0, 0, 1.], dtype=tf.float64) / np.sqrt(2) >>> marginal_prob(x, axis=[0, 1]) <tf.Tensor: shape=(4,), dtype=float64, numpy=array([0.70710678, 0. , 0. , 0.70710678])> >>> marginal_prob(x, axis=[0]) <tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.70710678, 0.70710678])> """ prob = np.flatten(prob) num_wires = int(np.log2(len(prob))) if num_wires == len(axis): return prob inactive_wires = tuple(set(range(num_wires)) - set(axis)) prob = np.reshape(prob, [2] * num_wires) prob = np.sum(prob, axis=inactive_wires) return np.flatten(prob)