def __call__(self, *args, **kwargs): if self.interface == "autograd": # HOTFIX: to maintain compatibility with core, here we treat # all inputs that do not explicitly specify `requires_grad=False` # as trainable. This should be removed at some point, forcing users # to specify `requires_grad=True` for trainable parameters. args = [ anp.array(a, requires_grad=True) if not hasattr(a, "requires_grad") else a for a in args ] # construct the tape self.construct(args, kwargs) # execute the tape res = self.qtape.execute(device=self.device) if isinstance(self.qfunc_output, Sequence): return res # HOTFIX: Output is a single measurement function. To maintain compatibility # with core, we squeeze all outputs. # Get the namespace associated with the return type res_type_namespace = res.__class__.__module__.split(".")[0] if res_type_namespace in ("pennylane", "autograd"): # For PennyLane and autograd we must branch, since # 'squeeze' does not exist in the top-level of the namespace return anp.squeeze(res) # Same for JAX if res_type_namespace == "jax": return __import__(res_type_namespace).numpy.squeeze(res) return __import__(res_type_namespace).squeeze(res)
def __call__(self, *args, **kwargs): # construct the tape self.construct(args, kwargs) # execute the tape res = self.qtape.execute(device=self.device) # FIX: If the qnode swapped the device, increase the num_execution value on the original device. # In the long run, we should make sure that the user's device is the one # actually run so she has full control. This could be done by changing the class # of the user's device before and after executing the tape. if self.device is not self._original_device: self._original_device._num_executions += 1 # pylint: disable=protected-access if isinstance(self.qfunc_output, Sequence): return res # HOTFIX: Output is a single measurement function. To maintain compatibility # with core, we squeeze all outputs. # Get the namespace associated with the return type res_type_namespace = res.__class__.__module__.split(".")[0] if res_type_namespace in ("pennylane", "autograd"): # For PennyLane and autograd we must branch, since # 'squeeze' does not exist in the top-level of the namespace return anp.squeeze(res) # Same for JAX if res_type_namespace == "jax": return __import__(res_type_namespace).numpy.squeeze(res) return __import__(res_type_namespace).squeeze(res)
def __call__(self, *args, **kwargs): if self.interface == "autograd": # HOTFIX: to maintain compatibility with core, here we treat # all inputs that do not explicitly specify `requires_grad=False` # as trainable. This should be removed at some point, forcing users # to specify `requires_grad=True` for trainable parameters. args = [ anp.array(a, requires_grad=True) if not hasattr(a, "requires_grad") else a for a in args ] # construct the tape self.construct(args, kwargs) if self._caching: # Every time the QNode is called, it creates a new tape. We want the tape cache to # persist over multiple tapes, so hence keep track of it as a QNode attribute and # load it into the new tape self.qtape._cache_execute = self._cache_execute # execute the tape res = self.qtape.execute(device=self.device) # HOTFIX: to maintain compatibility with core, we squeeze # all outputs. # Get the namespace associated with the return type res_type_namespace = res.__class__.__module__.split(".")[0] if res_type_namespace in ("pennylane", "autograd"): # For PennyLane and autograd we must branch, since # 'squeeze' does not exist in the top-level of the namespace return anp.squeeze(res) if self._caching: self._cache_execute = self.qtape._cache_execute return __import__(res_type_namespace).squeeze(res)