Beispiel #1
0
    def hamiltonian(*args):
        r"""Compute the qubit hamiltonian.

        Args:
            args (array[array[float]]): initial values of the differentiable parameters

        Returns:
            Hamiltonian: the qubit Hamiltonian
        """
        h_ferm = generate_fermionic_hamiltonian(mol, cutoff, core,
                                                active)(*args)

        ops = []

        for n, t in enumerate(h_ferm[1]):

            if len(t) == 0:
                coeffs = np.array([h_ferm[0][n]])
                ops = ops + [qml.Identity(0)]

            elif len(t) == 2:
                op = _generate_qubit_operator(t)
                if op != 0:
                    for i, o in enumerate(op[1]):
                        if len(o) == 0:
                            op[1][i] = qml.Identity(0)
                        if len(o) == 1:
                            op[1][i] = _return_pauli(o[0][1])(o[0][0])
                        if len(o) > 1:
                            k = qml.Identity(0)
                            for o_ in o:
                                k = k @ _return_pauli(o_[1])(o_[0])
                            op[1][i] = k
                    coeffs = np.concatenate(
                        [coeffs, np.array(op[0]) * h_ferm[0][n]])
                    ops = ops + op[1]

            elif len(t) == 4:
                op = _generate_qubit_operator(t)
                if op != 0:
                    for i, o in enumerate(op[1]):
                        if len(o) == 0:
                            op[1][i] = qml.Identity(0)
                        if len(o) == 1:
                            op[1][i] = _return_pauli(o[0][1])(o[0][0])
                        if len(o) > 1:
                            k = qml.Identity(0)
                            for o_ in o:
                                k = k @ _return_pauli(o_[1])(o_[0])
                            op[1][i] = k
                    coeffs = np.concatenate(
                        [coeffs, np.array(op[0]) * h_ferm[0][n]])
                    ops = ops + op[1]

        h = qml.Hamiltonian(coeffs, ops, simplify=True)

        return h
def decimalToBinaryFixLength(_length, _decimal):
	binNum = bin(int(_decimal))[2:]
	outputNum = [int(item) for item in binNum]
	if len(outputNum) < _length:
		outputNum = np.concatenate((np.zeros((_length-len(outputNum),)),np.array(outputNum)))
	else:
		outputNum = np.array(outputNum)
	return outputNum
Beispiel #3
0
    def test_stack_torch(self):
        """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension"""
        t1 = onp.array([5.0, 8.0, 101.0], dtype=np.float64)
        t2 = torch.tensor([0.6, 0.1, 0.6], dtype=torch.float64)
        t3 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64)

        res = fn.concatenate([t1, t2, t3])
        assert isinstance(res, torch.Tensor)
        assert np.all(res.numpy() == np.concatenate([t1, t2.numpy(), t3.numpy()]))
Beispiel #4
0
    def test_stack_tensorflow(self):
        """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension"""
        t1 = tf.constant([0.6, 0.1, 0.6])
        t2 = tf.Variable([0.1, 0.2, 0.3])
        t3 = onp.array([5.0, 8.0, 101.0])

        res = fn.concatenate([t1, t2, t3])
        assert isinstance(res, tf.Tensor)
        assert np.all(res.numpy() == np.concatenate([t1.numpy(), t2.numpy(), t3]))
Beispiel #5
0
    def test_concatenate_array(self):
        """Test that concatenate, called without the axis arguments, concatenates across the 0th dimension"""
        t1 = [0.6, 0.1, 0.6]
        t2 = np.array([0.1, 0.2, 0.3])
        t3 = onp.array([5.0, 8.0, 101.0])

        res = fn.concatenate([t1, t2, t3])
        assert isinstance(res, np.ndarray)
        assert np.all(res == np.concatenate([t1, t2, t3]))
    def weighted_random_sampling(qnodes, coeffs, shots, argnums, *args,
                                 **kwargs):
        """Returns an array of length ``shots`` containing single-shot estimates
        of the Hamiltonian gradient. The shots are distributed randomly over
        the terms in the Hamiltonian, as per a multinomial distribution.

        Args:
            qnodes (Sequence[.QNode]): Sequence of QNodes, each one when evaluated
                returning the corresponding expectation value of a term in the Hamiltonian.
            coeffs (Sequence[float]): Sequences of coefficients corresponding to
                each term in the Hamiltonian. Must be the same length as ``qnodes``.
            shots (int): The number of shots used to estimate the Hamiltonian expectation
                value. These shots are distributed over the terms in the Hamiltonian,
                as per a Multinomial distribution.
            argnums (Sequence[int]): the QNode argument indices which are trainable
            *args: Arguments to the QNodes
            **kwargs: Keyword arguments to the QNodes

        Returns:
            array[float]: the single-shot gradients of the Hamiltonian expectation value
        """

        # determine the shot probability per term
        prob_shots = np.abs(coeffs) / np.sum(np.abs(coeffs))

        # construct the multinomial distribution, and sample
        # from it to determine how many shots to apply per term
        si = multinomial(n=shots, p=prob_shots)
        shots_per_term = si.rvs()[0]

        grads = []

        for h, c, p, s in zip(qnodes, coeffs, prob_shots, shots_per_term):

            # if the number of shots is 0, do nothing
            if s == 0:
                continue

            # set the QNode device shots
            h.device.shots = [(1, s)]

            jacs = []
            for i in argnums:
                j = qml.jacobian(h, argnum=i)(*args, **kwargs)

                if s == 1:
                    j = np.expand_dims(j, 0)

                # Divide each term by the probability per shot. This is
                # because we are sampling one at a time.
                jacs.append(c * j / p)

            grads.append(jacs)

        return [np.concatenate(i) for i in zip(*grads)]
Beispiel #7
0
def iris(train_size=100, test_size=50, shuffle=True) -> DataSet:
    train_size = min(train_size, MAX_SAMPLES)
    if train_size + test_size > MAX_SAMPLES:
        test_size = MAX_SAMPLES - train_size
    data, target = datasets.load_iris(return_X_y=True)
    target = target.reshape((train_size + test_size, 1))
    dataset = np.concatenate((data, target), axis=1)

    if shuffle:
        np.random.shuffle(dataset)

    train, test = (
        dataset[:train_size, :],
        dataset[train_size:, :],
    )

    x_train, y_train = np.split(train, [4], axis=1)
    x_test, y_test = np.split(test, [4], axis=1)

    y_train = one_hot(y_train, 3)
    y_test = one_hot(y_test, 3)

    return DataSet(x_train, y_train, None, None, x_test, y_test)
Beispiel #8
0
def init_parameters(
    layers: int,
    current_layers: int,
    wires: int,
    default_value: Optional[float],
    dynamic_parameters: bool = True,
    mask_type: Type[Mask] = DropoutMask,
) -> MaskedCircuit:
    params_uniform = np.random.uniform(low=-np.pi,
                                       high=np.pi,
                                       size=(current_layers, wires))
    params_zero = np.zeros((layers - current_layers, wires))
    params_combined = np.concatenate((params_uniform, params_zero))
    mc = MaskedCircuit.full_circuit(
        parameters=params_combined,
        layers=layers,
        wires=wires,
        default_value=default_value,
        entangling_mask=DropoutMask(shape=(layers, wires - 1)),
        dynamic_parameters=dynamic_parameters,
        mask_type=mask_type,
    )
    mc.mask(Axis.LAYERS, mask_type=mask_type)[current_layers:] = True
    return mc
Beispiel #9
0
    def step(self, objective_fn, *args, **kwargs):
        """Update trainable arguments with one step of the optimizer.

        Args:
            objective_fn (function): the objective function for optimization
            *args: variable length argument list for objective function
            **kwargs: variable length of keyword arguments for the objective function

        Returns:
            list[array]: The new variable values :math:`x^{(t+1)}`.
            If single arg is provided, list[array] is replaced by array.
        """

        self.trainable_args = set()

        for index, arg in enumerate(args):
            if getattr(arg, "requires_grad", True):
                self.trainable_args |= {index}

        if self.s is None:
            # Number of shots per parameter
            self.s = [
                np.zeros_like(a, dtype=np.int64) + self.min_shots
                for i, a in enumerate(args)
                if i in self.trainable_args
            ]

        # keep track of the number of shots run
        s = np.concatenate([i.flatten() for i in self.s])
        self.max_shots = max(s)
        self.shots_used = int(2 * np.sum(s))
        self.total_shots_used += self.shots_used

        # compute the gradient, as well as the variance in the gradient,
        # using the number of shots determined by the array s.
        grads, grad_variances = self.compute_grad(objective_fn, args, kwargs)
        new_args = self.apply_grad(grads, args)

        if self.xi is None:
            self.chi = [np.zeros_like(g, dtype=np.float64) for g in grads]
            self.xi = [np.zeros_like(g, dtype=np.float64) for g in grads]

        # running average of the gradient
        self.chi = [self.mu * c + (1 - self.mu) * g for c, g in zip(self.chi, grads)]

        # running average of the gradient variance
        self.xi = [self.mu * x + (1 - self.mu) * v for x, v in zip(self.xi, grad_variances)]

        for idx, (c, x) in enumerate(zip(self.chi, self.xi)):
            xi = x / (1 - self.mu ** (self.k + 1))
            chi = c / (1 - self.mu ** (self.k + 1))

            # determine the new optimum shots distribution for the next
            # iteration of the optimizer
            s = np.ceil(
                (2 * self.lipschitz * self.stepsize * xi)
                / ((2 - self.lipschitz * self.stepsize) * (chi ** 2 + self.b * (self.mu ** self.k)))
            )

            # apply an upper and lower bound on the new shot distributions,
            # to avoid the number of shots reducing below min(2, min_shots),
            # or growing too significantly.
            gamma = (
                (self.stepsize - self.lipschitz * self.stepsize ** 2 / 2) * chi ** 2
                - xi * self.lipschitz * self.stepsize ** 2 / (2 * s)
            ) / s

            argmax_gamma = np.unravel_index(np.argmax(gamma), gamma.shape)
            smax = max(s[argmax_gamma], 2)
            self.s[idx] = np.squeeze(np.int64(np.clip(s, min(2, self.min_shots), smax)))

        self.k += 1

        # unwrap from list if one argument, cleaner return
        if len(new_args) == 1:
            return new_args[0]

        return new_args