def test_expand_dims(): """Test that dimension expansion works""" x = np.array([1, 2, 3]) xT = qml.proc.TensorBox(x) res = xT.expand_dims(axis=1) expected = np.expand_dims(x, axis=1) assert isinstance(res, AutogradBox) assert np.all(res == expected)
def weighted_random_sampling(qnodes, coeffs, shots, argnums, *args, **kwargs): """Returns an array of length ``shots`` containing single-shot estimates of the Hamiltonian gradient. The shots are distributed randomly over the terms in the Hamiltonian, as per a multinomial distribution. Args: qnodes (Sequence[.QNode]): Sequence of QNodes, each one when evaluated returning the corresponding expectation value of a term in the Hamiltonian. coeffs (Sequence[float]): Sequences of coefficients corresponding to each term in the Hamiltonian. Must be the same length as ``qnodes``. shots (int): The number of shots used to estimate the Hamiltonian expectation value. These shots are distributed over the terms in the Hamiltonian, as per a Multinomial distribution. argnums (Sequence[int]): the QNode argument indices which are trainable *args: Arguments to the QNodes **kwargs: Keyword arguments to the QNodes Returns: array[float]: the single-shot gradients of the Hamiltonian expectation value """ # determine the shot probability per term prob_shots = np.abs(coeffs) / np.sum(np.abs(coeffs)) # construct the multinomial distribution, and sample # from it to determine how many shots to apply per term si = multinomial(n=shots, p=prob_shots) shots_per_term = si.rvs()[0] grads = [] for h, c, p, s in zip(qnodes, coeffs, prob_shots, shots_per_term): # if the number of shots is 0, do nothing if s == 0: continue # set the QNode device shots h.device.shots = [(1, s)] jacs = [] for i in argnums: j = qml.jacobian(h, argnum=i)(*args, **kwargs) if s == 1: j = np.expand_dims(j, 0) # Divide each term by the probability per shot. This is # because we are sampling one at a time. jacs.append(c * j / p) grads.append(jacs) return [np.concatenate(i) for i in zip(*grads)]
def test_linear(self): """Tests gradients with multivariate multidimensional linear func.""" x_vec = np.random.uniform(-5, 5, size=(2)) x_vec_multidim = np.expand_dims(x_vec, axis=1) gradf = lambda x: np.array([[2 * x_[0]] for x_ in x]) f = lambda x: np.sum([x_[0]**2 for x_ in x]) g = qml.grad(f, 0) auto_grad = g(x_vec_multidim) correct_grad = gradf(x_vec_multidim) np.allclose(auto_grad, correct_grad)
def test_sin(self, tol): """Tests gradients with multivariate multidimensional sin and cos.""" x_vec = np.random.uniform(-5, 5, size=(2)) x_vec_multidim = np.expand_dims(x_vec, axis=1) gradf = lambda x: np.array([[np.cos(x[0, 0])], [-np.sin(x[[1]])]], dtype=np.float64) f = lambda x: np.sin(x[0, 0]) + np.cos(x[1, 0]) g = qml.grad(f, 0) auto_grad = g(x_vec_multidim) correct_grad = gradf(x_vec_multidim) assert np.allclose(auto_grad, correct_grad, atol=tol, rtol=0)
def test_exp(self): """Tests gradients with multivariate multidimensional exp and tanh.""" x_vec = np.random.uniform(-5, 5, size=(2)) x_vec_multidim = np.expand_dims(x_vec, axis=1) gradf = lambda x: np.array([ [np.exp(x[0, 0] / 3) / 3 * np.tanh(x[1, 0])], [np.exp(x[0, 0] / 3) * (1 - np.tanh(x[1, 0])**2)], ]) f = lambda x: np.exp(x[0, 0] / 3) * np.tanh(x[1, 0]) g = qml.grad(f, 0) auto_grad = g(x_vec_multidim) correct_grad = gradf(x_vec_multidim) np.allclose(auto_grad, correct_grad)
def test_gradient_multivar_multidim(self): """Tests gradients of multivariate multidimensional functions.""" self.logTestName() for gradf, f, name in zip(self.grad_mvar_mdim_funcs, self.mvar_mdim_funcs, self.fnames): with self.subTest(i=name): for jdx in range(len(x_vals[:-1])): x_vec = x_vals[jdx:jdx + 2] x_vec_multidim = np.expand_dims(x_vec, axis=1) g = qml.grad(f, 0) auto_grad = g(x_vec_multidim) correct_grad = gradf(x_vec_multidim) self.assertAllAlmostEqual(auto_grad, correct_grad, delta=self.tol)
class AutogradBox(qml.math.TensorBox): """Implements the :class:`~.TensorBox` API for ``pennylane.numpy`` tensors. For more details, please refer to the :class:`~.TensorBox` documentation. """ abs = wrap_output(lambda self: np.abs(self.data)) angle = wrap_output(lambda self: np.angle(self.data)) arcsin = wrap_output(lambda self: np.arcsin(self.data)) cast = wrap_output(lambda self, dtype: np.tensor(self.data, dtype=dtype)) diag = staticmethod(wrap_output(lambda values, k=0: np.diag(values, k=k))) expand_dims = wrap_output( lambda self, axis: np.expand_dims(self.data, axis=axis)) ones_like = wrap_output(lambda self: np.ones_like(self.data)) reshape = wrap_output(lambda self, shape: np.reshape(self.data, shape)) sqrt = wrap_output(lambda self: np.sqrt(self.data)) sum = wrap_output(lambda self, axis=None, keepdims=False: np.sum( self.data, axis=axis, keepdims=keepdims)) T = wrap_output(lambda self: self.data.T) squeeze = wrap_output(lambda self: self.data.squeeze()) @staticmethod def astensor(tensor): return np.tensor(tensor) @staticmethod @wrap_output def concatenate(values, axis=0): return np.concatenate(AutogradBox.unbox_list(values), axis=axis) @staticmethod @wrap_output def dot(x, y): x, y = AutogradBox.unbox_list([x, y]) if x.ndim == 0 and y.ndim == 0: return x * y if x.ndim == 2 and y.ndim == 2: return x @ y return np.dot(x, y) @property def interface(self): return "autograd" def numpy(self): if hasattr(self.data, "_value"): # Catches the edge case where the data is an Autograd arraybox, # which only occurs during backpropagation. return self.data._value return self.data.numpy() @property def requires_grad(self): return self.data.requires_grad @wrap_output def scatter_element_add(self, index, value): size = self.data.size flat_index = np.ravel_multi_index(index, self.shape) t = [0] * size t[flat_index] = value self.data = self.data + np.array(t).reshape(self.shape) return self.data @property def shape(self): return self.data.shape @staticmethod @wrap_output def stack(values, axis=0): return np.stack(AutogradBox.unbox_list(values), axis=axis) @wrap_output def take(self, indices, axis=None): indices = self.astensor(indices) if axis is None: return self.data.flatten()[indices] fancy_indices = [slice(None)] * axis + [indices] return self.data[tuple(fancy_indices)] @staticmethod @wrap_output def where(condition, x, y): return np.where(condition, *AutogradBox.unbox_list([x, y]))
def expand_dims(self, axis): return AutogradBox(np.expand_dims(self.data, axis=axis))