Ejemplo n.º 1
0
    def test_integration_cv_grad(self, gaussian_device_2_wires, template, inpts, argnm, intrfc, to_var):
        """Checks that gradient calculations of cv templates execute without error."""
        inpts = [to_var(i) for i in inpts]
        @qml.qnode(gaussian_device_2_wires, interface=intrfc)
        def circuit(*inp_):
            template(*inp_, wires=range(2))
            return qml.expval(qml.Identity(0))

        # Check gradients in numpy interface
        if intrfc == 'numpy':
            grd = qml.grad(circuit, argnum=argnm)
            assert grd(*inpts) is not None

        # Check gradients in torch interface
        if intrfc == 'torch':
            for a in argnm:
                inpts[a] = TorchVariable(inpts[a], requires_grad=True)
            res = circuit(*inpts)
            res.backward()
            for a in argnm:
                assert inpts[a].grad.numpy() is not None

        # Check gradients in tf interface
        if intrfc == 'tf':
            grad_inpts = [inpts[a] for a in argnm]
            with tf.GradientTape() as tape:
                loss = circuit(*inpts)
                assert tape.gradient(loss, grad_inpts) is not None
Ejemplo n.º 2
0
    def test_integration_qubit_grad(self, template, inpts, argnm, intrfc, to_var):
        """Checks that gradient calculations of qubit templates execute without error."""
        inpts = [to_var(i) for i in inpts]
        dev = qml.device('default.qubit', wires=2)
        @qml.qnode(dev, interface=intrfc)
        def circuit(*inp_):
            template(*inp_, wires=range(2))
            return qml.expval(qml.Identity(0))

        # Check gradients in numpy interface
        if intrfc == 'numpy':
            grd = qml.grad(circuit, argnum=argnm)
            grd(*inpts)

        # Check gradients in torch interface
        if intrfc == 'torch':
            for a in argnm:
                inpts[a] = TorchVariable(inpts[a], requires_grad=True)
            res = circuit(*inpts)
            res.backward()
            for a in argnm:
                inpts[a].grad.numpy()

        # Check gradients in tf interface
        if intrfc == 'tf':
            grad_inpts = [inpts[a] for a in argnm]
            with tf.GradientTape() as tape:
                loss = circuit(*inpts)
                tape.gradient(loss, grad_inpts)
Ejemplo n.º 3
0
    def test_integration_cv_grad(self, template, diffable, nondiffable,
                                 interface, to_var, gaussian_dummy):
        """Tests that gradient calculations of cv templates execute without error."""

        # Extract keys and items
        keys_diffable = [*diffable]
        diffable = list(diffable.values())

        # Turn into correct format
        diffable = [to_var(i) for i in diffable]

        # Make qnode
        n_wires = len(nondiffable['wires'])
        dev = gaussian_dummy(n_wires)

        @qml.qnode(dev, interface=interface)
        def circuit(*diffable):

            # Turn diffables back into dictionaries
            dict = {key: item for key, item in zip(keys_diffable, diffable)}

            # Merge diffables and nondiffables
            dict.update(nondiffable)

            # Circuit
            template(**dict)
            return qml.expval(qml.Identity(0))

        # Do gradient check for every differentiable argument
        for argnum in range(len(diffable)):

            # Check gradients in numpy interface
            if interface == 'numpy':
                grd = qml.grad(circuit, argnum=[argnum])
                grd(*diffable)

            # Check gradients in torch interface
            if interface == 'torch':
                diffable[argnum] = TorchVariable(diffable[argnum],
                                                 requires_grad=True)
                res = circuit(*diffable)
                res.backward()
                diffable[argnum].grad.numpy()

            # Check gradients in tf interface
            if interface == 'tf':
                with tf.GradientTape() as tape:
                    loss = circuit(*diffable)
                    tape.gradient(loss, diffable[argnum])
Ejemplo n.º 4
0
    def test_integration_qubit_grad(self, template, diffable, nondiffable,
                                    argnum, interface, to_var):
        """Tests that gradient calculations of qubit templates execute without error."""

        # Extract keys and items
        keys_diffable = [*diffable]
        diffable = list(diffable.values())

        # Turn into correct format
        diffable = [to_var(i) for i in diffable]

        # Make qnode
        n_wires = len(nondiffable['wires'])
        dev = qml.device('default.qubit', wires=n_wires)

        @qml.qnode(dev, interface=interface)
        def circuit(*diffable):

            # Turn diffables back into dictionaries
            dict = {key: item for key, item in zip(keys_diffable, diffable)}

            # Merge diffables and nondiffables
            dict.update(nondiffable)

            # Circuit
            template(**dict)
            return qml.expval(qml.Identity(0))

        # Check gradients in numpy interface
        if interface == 'numpy':
            grd = qml.grad(circuit, argnum=argnum)
            grd(*diffable)

        # Check gradients in torch interface
        if interface == 'torch':
            for a in argnum:
                diffable[a] = TorchVariable(diffable[a], requires_grad=True)
            res = circuit(*diffable)
            res.backward()
            for a in argnum:
                diffable[a].grad.numpy()

        # Check gradients in tf interface
        if interface == 'tf':
            grad_inpts = [diffable[a] for a in argnum]
            with tf.GradientTape() as tape:
                loss = circuit(*diffable)
                tape.gradient(loss, grad_inpts)
Ejemplo n.º 5
0
def Variable(tensor, **kwargs):
    if _use_gpu and not tensor.is_cuda:
        return TorchVariable(tensor.cuda(), **kwargs)
    else:
        return TorchVariable(tensor, **kwargs)
Ejemplo n.º 6
0
 def __new__(cls, *args, use_cuda=True, device=None, **kwargs):
     if use_cuda:
         return TorchVariable(*args, **kwargs).cuda(device=device)
     else:
         return TorchVariable(*args, **kwargs)