def concatenate(values, axis=0): """Concatenate a sequence of tensors along the specified axis. .. warning:: Tensors that are incompatible (such as Torch and TensorFlow tensors) cannot both be present. Args: values (Sequence[tensor_like]): Sequence of tensor-like objects to concatenate. The objects must have the same shape, except in the dimension corresponding to axis (the first, by default). axis (int): The axis along which the input tensors are concatenated. If axis is None, tensors are flattened before use. Default is 0. Returns: tensor_like: The concatenated tensor. **Example** >>> x = tf.constant([0.6, 0.1, 0.6]) >>> y = tf.Variable([0.1, 0.2, 0.3]) >>> z = np.array([5., 8., 101.]) >>> concatenate([x, y, z]) <tf.Tensor: shape=(3, 3), dtype=float32, numpy= array([6.00e-01, 1.00e-01, 6.00e-01, 1.00e-01, 2.00e-01, 3.00e-01, 5.00e+00, 8.00e+00, 1.01e+02], dtype=float32)> """ interface = _multi_dispatch(values) if interface == "torch": import torch if axis is None: # flatten and then concatenate zero'th dimension # to reproduce numpy's behaviour values = [np.flatten(torch.as_tensor(t)) for t in values] axis = 0 else: values = [torch.as_tensor(t) for t in values] if interface == "tensorflow" and axis is None: # flatten and then concatenate zero'th dimension # to reproduce numpy's behaviour values = [np.flatten(np.array(t)) for t in values] axis = 0 return np.concatenate(values, axis=axis, like=interface)
def marginal_prob(prob, axis): """Compute the marginal probability given a joint probability distribution expressed as a tensor. Each random variable corresponds to a dimension. If the distribution arises from a quantum circuit measured in computational basis, each dimension corresponds to a wire. For example, for a 2-qubit quantum circuit `prob[0, 1]` is the probability of measuring the first qubit in state 0 and the second in state 1. Args: prob (tensor_like): 1D tensor of probabilities. This tensor should of size ``(2**N,)`` for some integer value ``N``. axis (list[int]): the axis for which to calculate the marginal probability distribution Returns: tensor_like: the marginal probabilities, of size ``(2**len(axis),)`` **Example** >>> x = tf.Variable([1, 0, 0, 1.], dtype=tf.float64) / np.sqrt(2) >>> marginal_prob(x, axis=[0, 1]) <tf.Tensor: shape=(4,), dtype=float64, numpy=array([0.70710678, 0. , 0. , 0.70710678])> >>> marginal_prob(x, axis=[0]) <tf.Tensor: shape=(2,), dtype=float64, numpy=array([0.70710678, 0.70710678])> """ prob = np.flatten(prob) num_wires = int(np.log2(len(prob))) if num_wires == len(axis): return prob inactive_wires = tuple(set(range(num_wires)) - set(axis)) prob = np.reshape(prob, [2] * num_wires) prob = np.sum(prob, axis=inactive_wires) return np.flatten(prob)