Пример #1
0
 def ohe_cost_fcn(params, circuit, ang_array, actual):
     '''
     use MAE to start
     '''
     predictions = (np.stack([circuit(params, x)
                              for x in ang_array]) + 1) * 0.5
     return mse(actual, predictions)
Пример #2
0
    def test_stack_array_jax(self):
        """Test that stack, called without the axis arguments, stacks vertically"""
        t1 = onp.array([0.6, 0.1, 0.6])
        t2 = jnp.array([0.1, 0.2, 0.3])
        t3 = jnp.array([5.0, 8.0, 101.0])

        res = fn.stack([t1, t2, t3])
        assert np.all(res == np.stack([t1, t2, t3]))
Пример #3
0
    def test_stack_torch(self):
        """Test that stack, called without the axis arguments, stacks vertically"""
        t1 = onp.array([5.0, 8.0, 101.0], dtype=np.float64)
        t2 = torch.tensor([0.6, 0.1, 0.6], dtype=torch.float64)
        t3 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64)

        res = fn.stack([t1, t2, t3])
        assert isinstance(res, torch.Tensor)
        assert np.all(res.numpy() == np.stack([t1, t2.numpy(), t3.numpy()]))
Пример #4
0
    def test_stack_tensorflow(self):
        """Test that stack, called without the axis arguments, stacks vertically"""
        t1 = tf.constant([0.6, 0.1, 0.6])
        t2 = tf.Variable([0.1, 0.2, 0.3])
        t3 = onp.array([5.0, 8.0, 101.0])

        res = fn.stack([t1, t2, t3])
        assert isinstance(res, tf.Tensor)
        assert np.all(res.numpy() == np.stack([t1.numpy(), t2.numpy(), t3]))
Пример #5
0
def test_stack():
    """Test that arrays are correctly stacked together"""
    x = np.array([[1, 2], [3, 4]])
    y = np.array([[1, 0], [0, 1]])

    xT = qml.proc.TensorBox(x)
    res = xT.stack([y, xT, x])

    assert np.all(res == np.stack([y, x, x]))
Пример #6
0
def jacobian(func, argnum=None):
    """Returns the Jacobian as a callable function of vector-valued
    (functions of) QNodes.

    This is a wrapper around the :mod:`autograd.jacobian` function.

    Args:
        func (function): A vector-valued Python function or QNode that contains
            a combination of quantum and classical nodes. The output of the computation
            must consist of a single NumPy array (if classical) or a tuple of
            expectation values (if a quantum node)
        argnum (int or Sequence[int]): Which argument to take the gradient
            with respect to. If a sequence is given, the Jacobian matrix
            corresponding to all input elements and all output elements is returned.

    Returns:
        function: the function that returns the Jacobian of the input
        function with respect to the arguments in argnum
    """
    # pylint: disable=no-value-for-parameter

    if argnum is not None:
        # for backwards compatibility with existing code
        # that manually specifies argnum
        if isinstance(argnum, int):
            return _jacobian(func, argnum)

        return lambda *args, **kwargs: np.stack(
            [_jacobian(func, arg)(*args, **kwargs) for arg in argnum]).T

    def _jacobian_function(*args, **kwargs):
        """Inspect the arguments for differentiability, and
        compute the autograd gradient function with required argnums
        dynamically.

        This wrapper function is returned to the user instead of autograd.jacobian,
        so that we can take into account cases where the user computes the
        jacobian function once, but then calls it with arguments that change
        in differentiability.
        """
        argnum = []

        for idx, arg in enumerate(args):
            if getattr(arg, "requires_grad", True):
                argnum.append(idx)

        if not argnum:
            return tuple()

        if len(argnum) == 1:
            return _jacobian(func, argnum[0])(*args, **kwargs)

        return np.stack(
            [_jacobian(func, arg)(*args, **kwargs) for arg in argnum]).T

    return _jacobian_function
Пример #7
0
    def ohe_cost_fcn(params, circuit, ang_array, actual):
        """use MAE to start

        Args:
          params:
          circuit:
          ang_array:
          actual:

        Returns:

        """
        predictions = (np.stack([circuit(params, x)
                                 for x in ang_array]) + 1) * 0.5
        return mse(actual, predictions)
Пример #8
0
    def _jacobian_function(*args, **kwargs):
        """Inspect the arguments for differentiability, and
        compute the autograd gradient function with required argnums
        dynamically.

        This wrapper function is returned to the user instead of autograd.jacobian,
        so that we can take into account cases where the user computes the
        jacobian function once, but then calls it with arguments that change
        in differentiability.
        """
        argnum = []

        for idx, arg in enumerate(args):

            trainable = getattr(arg, "requires_grad", None)
            array_box = isinstance(arg, ArrayBox)

            if trainable is None and not array_box:

                warnings.warn(
                    "Starting with PennyLane v0.20.0, when using Autograd, inputs "
                    "have to explicitly specify requires_grad=True (or the "
                    "argnum argument must be passed) in order for trainable parameters to be "
                    "identified.",
                    UserWarning,
                )

            if trainable is None:
                trainable = True

            if trainable:
                argnum.append(idx)

        if not argnum:
            return tuple()

        if len(argnum) == 1:
            return _jacobian(func, argnum[0])(*args, **kwargs)

        jacobians = [_jacobian(func, arg)(*args, **kwargs) for arg in argnum]

        try:
            return np.stack(jacobians).T
        except ValueError:
            # The Jacobian of each argument is a different shape and cannot
            # be stacked; simply return the tuple of argument Jacobians.
            return tuple(jacobians)
Пример #9
0
def make_predictions(circuit, pre_trained_vals, X, Y, **kwargs):
    """

    Args:
      circuit:
      pre_trained_vals:
      X:
      Y:
      **kwargs:

    Returns:

    """

    if kwargs['readout_layer'] == 'one_hot':
        var = pre_trained_vals

    elif kwargs['readout_layer'] == "weighted_neuron":
        var = pre_trained_vals

    # make final predictions
    if kwargs['readout_layer'] == 'one_hot':
        final_predictions = np.stack([circuit(var, x) for x in X])
        acc = ohe_accuracy(Y, predictions)

    elif kwargs['readout_layer'] == 'weighted_neuron':
        from autograd.numpy import exp
        n = kwargs.get('nqubits')
        w = var[:, -1]
        theta = var[:, :-1].numpy()
        final_predictions = [
            int(
                np.round(
                    2. *
                    (1.0 /
                     (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) - 1.,
                    1)) for x in X
        ]
        acc = wn_accuracy(Y, predictions)

    return final_predictions, acc
Пример #10
0
    def _jacobian_function(*args, **kwargs):
        """Inspect the arguments for differentiability, and
        compute the autograd gradient function with required argnums
        dynamically.

        This wrapper function is returned to the user instead of autograd.jacobian,
        so that we can take into account cases where the user computes the
        jacobian function once, but then calls it with arguments that change
        in differentiability.
        """
        argnum = []

        for idx, arg in enumerate(args):
            if getattr(arg, "requires_grad", True):
                argnum.append(idx)

        if not argnum:
            return tuple()

        if len(argnum) == 1:
            return _jacobian(func, argnum[0])(*args, **kwargs)

        return np.stack(
            [_jacobian(func, arg)(*args, **kwargs) for arg in argnum]).T
Пример #11
0
 def stack(values, axis=0):
     return np.stack(AutogradBox.unbox_list(values), axis=axis)
Пример #12
0
def jacobian(func, argnum=None):
    """Returns the Jacobian as a callable function of vector-valued
    (functions of) QNodes.

    This is a wrapper around the :mod:`autograd.jacobian` function.

    Args:
        func (function): A vector-valued Python function or QNode that contains
            a combination of quantum and classical nodes. The output of the computation
            must consist of a single NumPy array (if classical) or a tuple of
            expectation values (if a quantum node)
        argnum (int or Sequence[int]): Which argument to take the gradient
            with respect to. If a sequence is given, the Jacobian matrix
            corresponding to all input elements and all output elements is returned.

    Returns:
        function: the function that returns the Jacobian of the input
        function with respect to the arguments in argnum
    """
    # pylint: disable=no-value-for-parameter

    if argnum is not None:
        # for backwards compatibility with existing code
        # that manually specifies argnum
        if isinstance(argnum, int):
            return _jacobian(func, argnum)

        return lambda *args, **kwargs: np.stack(
            [_jacobian(func, arg)(*args, **kwargs) for arg in argnum]).T

    def _jacobian_function(*args, **kwargs):
        """Inspect the arguments for differentiability, and
        compute the autograd gradient function with required argnums
        dynamically.

        This wrapper function is returned to the user instead of autograd.jacobian,
        so that we can take into account cases where the user computes the
        jacobian function once, but then calls it with arguments that change
        in differentiability.
        """
        argnum = []

        for idx, arg in enumerate(args):

            trainable = getattr(arg, "requires_grad", None)
            array_box = isinstance(arg, ArrayBox)

            if trainable is None and not array_box:

                warnings.warn(
                    "Starting with PennyLane v0.20.0, when using Autograd, inputs "
                    "have to explicitly specify requires_grad=True (or the "
                    "argnum argument must be passed) in order for trainable parameters to be "
                    "identified.",
                    UserWarning,
                )

            if trainable is None:
                trainable = True

            if trainable:
                argnum.append(idx)

        if not argnum:
            return tuple()

        if len(argnum) == 1:
            return _jacobian(func, argnum[0])(*args, **kwargs)

        jacobians = [_jacobian(func, arg)(*args, **kwargs) for arg in argnum]

        try:
            return np.stack(jacobians).T
        except ValueError:
            # The Jacobian of each argument is a different shape and cannot
            # be stacked; simply return the tuple of argument Jacobians.
            return tuple(jacobians)

    return _jacobian_function
Пример #13
0
def train_circuit(circuit, parameter_shape, X_train, Y_train, batch_size,
                  learning_rate, **kwargs):
    """
    train a circuit classifier
    Args:
        circuit (qml.QNode): A circuit that you want to train
        parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
        the second one is the number of layers in the circuit architecture.
        X_train (np.ndarray): An array of floats of size (M, n) to be used as training data.
        Y_train (np.ndarray): An array of size (M,) which are the categorical labels
            associated to the training data.

        batch_size (int): Batch size for the circuit training.

        learning_rate (float): The learning rate/step size of the optimizer.

        kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters:

            nsteps (int) : Number of training steps.

            optim (pennylane.optimize instance): Optimizer used during the training of the circuit.
                Pass as qml.OptimizerName.

            Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8)
                    The first element is the maximum number of parameters among all architectures,
                    the second is the maximum inference time among all architectures in terms of computing time,
                    the third one is the maximum inference time among all architectures in terms of the number of CNOTS
                    in the circuit

            rate_type (string): Determines the type of error rate in the W-coefficient.
                    If rate_type == 'accuracy', the inference time of the circuit
                    is equal to the time it takes to evaluate the accuracy of the trained circuit with
                    respect to a validation batch three times the size of the training batch size and
                    the error rate is equal to 1-accuracy (w.r.t. to a validation batch).

                    If rate_type == 'accuracy', the inference time of the circuit is equal to the time
                    it takes to train the circuit (for nsteps training steps) and compute the cost at
                    each step and the error rate is equal to the cost after nsteps training steps.






    Returns:
        (W_,weights): W-coefficient, trained weights
    """

    #print('batch_size',batch_size)
    # fix the seed while debugging
    #np.random.seed(1337)
    def ohe_cost_fcn(params, circuit, ang_array, actual):
        '''
        use MAE to start
        '''
        predictions = (np.stack([circuit(params, x)
                                 for x in ang_array]) + 1) * 0.5
        return mse(actual, predictions)

    def wn_cost_fcn(params, circuit, ang_array, actual):
        '''
        use MAE to start
        '''
        w = params[:, -1]

        theta = params[:, :-1]
        #print(w.shape,w,theta.shape,theta)
        predictions = np.asarray([
            2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) -
            1. for x in ang_array
        ])
        return mse(actual, predictions)

    if kwargs['readout_layer'] == 'one_hot':
        var = np.zeros(parameter_shape)
    elif kwargs['readout_layer'] == "weighted_neuron":
        var = np.hstack(
            (np.zeros(parameter_shape), np.random.random(
                (kwargs['nqubits'], 1)) - 0.5))
    rate_type = kwargs['rate_type']
    inf_time = kwargs['inf_time']
    optim = kwargs['optim']
    numcnots = kwargs['numcnots']

    Tmax = kwargs[
        'Tmax']  #Tmax[0] is maximum parameter size, Tmax[1] maximum inftime (timeit),Tmax[2] maximum number of entangling gates
    num_train = len(Y_train)
    validation_size = int(0.1 * num_train)
    opt = optim(
        stepsize=learning_rate
    )  #all optimizers in autograd module take in argument stepsize, so this works for all
    start = time.time()
    for _ in range(kwargs['nsteps']):
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        X_train_batch = np.asarray(X_train[batch_index])
        Y_train_batch = np.asarray(Y_train[batch_index])
        if kwargs['readout_layer'] == 'one_hot':
            var, cost = opt.step_and_cost(
                lambda v: ohe_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                       ), var)
        elif kwargs['readout_layer'] == 'weighted_neuron':
            var, cost = opt.step_and_cost(
                lambda v: wn_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                      ), var)
    end = time.time()
    cost_time = (end - start)

    if kwargs['rate_type'] == 'accuracy':
        validation_batch = np.random.randint(0, num_train, (validation_size, ))
        X_validation_batch = np.asarray(X_train[validation_batch])
        Y_validation_batch = np.asarray(Y_train[validation_batch])
        start = time.time()  # add in timeit function from Wbranch
        if kwargs['readout_layer'] == 'one_hot':
            predictions = np.stack(
                [circuit(var, x) for x in X_validation_batch])
        elif kwargs['readout_layer'] == 'weighted_neuron':
            n = kwargs.get('nqubits')
            w = var[:, -1]
            theta = var[:, :-1]
            predictions = [
                int(
                    np.round(
                        2. *
                        (1.0 /
                         (1.0 + exp(np.dot(-w, circuit(theta, features=x))))) -
                        1., 1)) for x in X_validation_batch
            ]
        end = time.time()
        inftime = (end - start) / len(X_validation_batch)
        if kwargs['readout_layer'] == 'one_hot':
            err_rate = (
                1.0 - ohe_accuracy(Y_validation_batch, predictions)
            ) + 10**-7  #add small epsilon to prevent divide by 0 errors
            #print('error rate:',err_rate)
            #print('weights: ',var)
        elif kwargs['readout_layer'] == 'weighted_neuron':
            err_rate = (
                1.0 - wn_accuracy(Y_validation_batch, predictions)
            ) + 10**-7  #add small epsilon to prevent divide by 0 errors
            #print('error rate:',err_rate)
            #print('weights: ',var)
    elif kwargs['rate_type'] == 'batch_cost':
        err_rate = (
            cost) + 10**-7  #add small epsilon to prevent divide by 0 errors
        #print('error rate:',err_rate)
        #print('weights: ',var)
        inftime = cost_time
    # QHACK #

    if kwargs['inf_time'] == 'timeit':

        W_ = np.abs((Tmax[0] - len(var)) / (Tmax[0])) * np.abs(
            (Tmax[1] - inftime) / (Tmax[1])) * (1. / err_rate)

    elif kwargs['inf_time'] == 'numcnots':
        nc_ = numcnots
        W_ = np.abs((Tmax[0] - len(var)) / (Tmax[0])) * np.abs(
            (Tmax[2] - nc_) / (Tmax[2])) * (1. / err_rate)

    return W_, var
Пример #14
0
def train_best(circuit, pre_trained_vals, X_train, Y_train, batch_size,
               learning_rate, **kwargs):
    """train a circuit classifier

    Args:
      circuit(qml.QNode): A circuit that you want to train
      parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
      parameter_shape: A tuple describing the shape of the parameters. The first entry is the number of qubits,
    the second one is the number of layers in the circuit architecture.
      X_train(np.ndarray): An array of floats of size (M, n) to be used as training data.
      Y_train(np.ndarray): An array of size (M,) which are the categorical labels
    associated to the training data.
      batch_size(int): Batch size for the circuit training.
      learning_rate(float): The learning rate/step size of the optimizer.
      kwargs: Hyperparameters for the training (passed as keyword arguments). There are the following hyperparameters:
    nsteps (int) : Number of training steps.
    optim (pennylane.optimize instance): Optimizer used during the training of the circuit.
    Pass as qml.OptimizerName.
    Tmax (list): Maximum point T as defined in https://arxiv.org/abs/2010.08512. (Definition 8)
    The first element is the maximum number of parameters among all architectures,
    the second is the maximum inference time among all architectures in terms of computing time,
    the third one is the maximum inference time among all architectures in terms of the number of CNOTS
    in the circuit
    rate_type (string): Determines the type of error rate in the W-coefficient.
    If rate_type == 'accuracy', the inference time of the circuit
    is equal to the time it takes to evaluate the accuracy of the trained circuit with
    respect to a validation batch three times the size of the training batch size and
    the error rate is equal to 1-accuracy (w.r.t. to a validation batch).
    If rate_type == 'accuracy', the inference time of the circuit is equal to the time
    it takes to train the circuit (for nsteps training steps) and compute the cost at
    each step and the error rate is equal to the cost after nsteps training steps.
      pre_trained_vals:
      **kwargs:

    Returns:
      Yprime: final predictions, final accuracy

    """
    from autograd.numpy import exp

    def ohe_cost_fcn(params, circuit, ang_array, actual):
        """use MAE to start

        Args:
          params:
          circuit:
          ang_array:
          actual:

        Returns:

        """
        predictions = (np.stack([circuit(params, x)
                                 for x in ang_array]) + 1) * 0.5
        return mse(actual, predictions)

    def wn_cost_fcn(params, circuit, ang_array, actual):
        """use MAE to start

        Args:
          params:
          circuit:
          ang_array:
          actual:

        Returns:

        """
        w = params[:, -1]

        theta = params[:, :-1]
        print(w.shape, w, theta.shape, theta)
        predictions = np.asarray([
            2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, x))))) - 1.
            for x in ang_array
        ])
        return mse(actual, predictions)

    if kwargs['readout_layer'] == 'one_hot':
        var = pre_trained_vals
    elif kwargs['readout_layer'] == "weighted_neuron":
        var = pre_trained_vals
    rate_type = kwargs['rate_type']
    optim = kwargs['optim']
    num_train = len(Y_train)
    validation_size = int(0.1 * num_train)
    opt = optim(
        stepsize=learning_rate
    )  #all optimizers in autograd module take in argument stepsize, so this works for all

    for _ in range(kwargs['nsteps']):
        batch_index = np.random.randint(0, num_train, (batch_size, ))
        X_train_batch = np.asarray(X_train[batch_index])
        Y_train_batch = np.asarray(Y_train[batch_index])

        if kwargs['readout_layer'] == 'one_hot':
            var, cost = opt.step_and_cost(
                lambda v: ohe_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                       ), var)
        elif kwargs['readout_layer'] == 'weighted_neuron':
            print(var)
            var, cost = opt.step_and_cost(
                lambda v: wn_cost_fcn(v, circuit, X_train_batch, Y_train_batch
                                      ), var)
        print(_, cost)
        # check for early stopping
        if _ % 5 == 0:
            validation_batch = np.random.randint(0, num_train,
                                                 (validation_size, ))
            X_validation_batch = np.asarray(X_train[validation_batch])
            Y_validation_batch = np.asarray(Y_train[validation_batch])
            if kwargs['rate_type'] == 'accuracy':
                if kwargs['readout_layer'] == 'one_hot':
                    predictions = np.stack(
                        [circuit(var, x) for x in X_validation_batch])
                    acc = ohe_accuracy(Y_validation_batch, predictions)
                elif kwargs['readout_layer'] == 'weighted_neuron':
                    n = kwargs.get('nqubits')
                    w = var[:, -1]
                    theta = var[:, :-1].numpy()
                    predictions = [
                        int(
                            np.round(
                                2. *
                                (1.0 /
                                 (1.0 + exp(np.dot(-w, circuit(theta, x))))) -
                                1., 1)) for x in X_validation_batch
                    ]
                    acc = wn_accuracy(Y_validation_batch, predictions)
                if acc > 0.95:
                    break

            elif kwargs['rate_type'] == 'batch_cost':
                if cost < 0.001:
                    break
    # make final predictions
    if kwargs['readout_layer'] == 'one_hot':
        final_predictions = np.stack([circuit(var, x) for x in X_train])
    elif kwargs['readout_layer'] == 'weighted_neuron':
        n = kwargs.get('nqubits')
        w = var[:, -1]
        theta = var[:, :-1]
        final_predictions = [
            int(
                np.round(
                    2. * (1.0 / (1.0 + exp(np.dot(-w, circuit(theta, x))))) -
                    1., 1)) for x in X_train
        ]
    return var, final_predictions