Exemple #1
0
    def _probability_gradients(self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]
                               ) -> Tuple[Union[np.ndarray, SparseArray],
                                          Union[np.ndarray, SparseArray]]:

        # check whether gradient circuit could be constructed
        if self._grad_circuit is None:
            return None, None

        rows = input_data.shape[0]

        # initialize empty gradients
        if self._sparse:
            input_grad = DOK((rows, *self.output_shape, self.num_inputs))
            weights_grad = DOK((rows, *self.output_shape, self.num_weights))
        else:
            input_grad = np.zeros((rows, *self.output_shape, self.num_inputs))
            weights_grad = np.zeros((rows, *self.output_shape, self.num_weights))

        for row in range(rows):
            param_values = {input_param: input_data[row, j]
                            for j, input_param in enumerate(self.input_params)}
            param_values.update({weight_param: weights[j]
                                 for j, weight_param in enumerate(self.weight_params)})

            # TODO: additional "bind_parameters" should not be necessary,
            #  seems like a bug to be fixed
            grad = self._sampler.convert(self._grad_circuit, param_values
                                         ).bind_parameters(param_values).eval()

            # construct gradients
            for i in range(self.num_inputs + self.num_weights):
                coo_grad = coo_matrix(grad[i])  # this works for sparse and dense case

                # get index for input or weights gradients
                j = i if i < self.num_inputs else i - self.num_inputs

                for _, k, val in zip(coo_grad.row, coo_grad.col, coo_grad.data):

                    # interpret integer and construct key
                    key = self._interpret(k)
                    if isinstance(key, Integral):
                        key = (row, int(key), j)
                    else:
                        # if key is an array-type, cast to hashable tuple
                        key = tuple(cast(Iterable[int], key))
                        key = (row, *key, j)  # type: ignore

                    # store value for inputs or weights gradients
                    if i < self.num_inputs:
                        input_grad[key] += np.real(val)
                    else:
                        weights_grad[key] += np.real(val)

        if self.sparse:
            return input_grad.to_coo(), weights_grad.to_coo()
        else:
            return input_grad, weights_grad
Exemple #2
0
    def _probabilities(self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]
                       ) -> Union[np.ndarray, SparseArray]:
        # evaluate operator
        circuits = []
        rows = input_data.shape[0]
        for i in range(rows):
            param_values = {input_param: input_data[i, j]
                            for j, input_param in enumerate(self.input_params)}
            param_values.update({weight_param: weights[j]
                                 for j, weight_param in enumerate(self.weight_params)})
            circuits.append(self._circuit.bind_parameters(param_values))

        result = self.quantum_instance.execute(circuits)
        # initialize probabilities
        if self.sparse:
            prob = DOK((rows, *self.output_shape))
        else:
            prob = np.zeros((rows, *self.output_shape))

        for i, circuit in enumerate(circuits):
            counts = result.get_counts(circuit)
            shots = sum(counts.values())

            # evaluate probabilities
            for b, v in counts.items():
                key = self._interpret(int(b, 2))
                if isinstance(key, Integral):
                    key = (cast(int, key),)
                key = (i, *key)  # type: ignore
                prob[key] += v / shots

        if self.sparse:
            return prob.to_coo()
        else:
            return prob
Exemple #3
0
    def _probabilities(
        self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]
    ) -> Union[np.ndarray, SparseArray]:
        self._check_quantum_instance("probabilities")

        # evaluate operator
        circuits = []
        num_samples = input_data.shape[0]
        for i in range(num_samples):
            param_values = {
                input_param: input_data[i, j] for j, input_param in enumerate(self._input_params)
            }
            param_values.update(
                {weight_param: weights[j] for j, weight_param in enumerate(self._weight_params)}
            )
            circuits.append(self._circuit.bind_parameters(param_values))

        if self._quantum_instance.bound_pass_manager is not None:
            circuits = self._quantum_instance.transpile(
                circuits, pass_manager=self._quantum_instance.bound_pass_manager
            )

        result = self._quantum_instance.execute(circuits, had_transpiled=self._circuit_transpiled)
        # initialize probabilities
        if self._sparse:
            # pylint: disable=import-error
            from sparse import DOK

            prob = DOK((num_samples, *self._output_shape))
        else:
            prob = np.zeros((num_samples, *self._output_shape))

        for i, circuit in enumerate(circuits):
            counts = result.get_counts(circuit)
            shots = sum(counts.values())

            # evaluate probabilities
            for b, v in counts.items():
                key = self._interpret(int(b, 2))
                if isinstance(key, Integral):
                    key = (cast(int, key),)
                key = (i, *key)  # type: ignore
                prob[key] += v / shots

        if self._sparse:
            return prob.to_coo()
        else:
            return prob
Exemple #4
0
    def _probability_gradients(
        self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]
    ) -> Tuple[Union[np.ndarray, SparseArray], Union[np.ndarray, SparseArray]]:
        self._check_quantum_instance("probability gradients")

        # check whether gradient circuit could be constructed
        if self._gradient_circuit is None:
            return None, None

        num_samples = input_data.shape[0]

        # initialize empty gradients
        input_grad = None  # by default we don't have data gradients
        if self._sparse:
            # pylint: disable=import-error
            from sparse import DOK

            if self._input_gradients:
                input_grad = DOK((num_samples, *self._output_shape, self._num_inputs))
            weights_grad = DOK((num_samples, *self._output_shape, self._num_weights))
        else:
            if self._input_gradients:
                input_grad = np.zeros((num_samples, *self._output_shape, self._num_inputs))
            weights_grad = np.zeros((num_samples, *self._output_shape, self._num_weights))

        param_values = {
            input_param: input_data[:, j] for j, input_param in enumerate(self._input_params)
        }
        param_values.update(
            {
                weight_param: np.full(num_samples, weights[j])
                for j, weight_param in enumerate(self._weight_params)
            }
        )

        converted_op = self._sampler.convert(self._gradient_circuit, param_values)
        # if statement is a workaround for https://github.com/Qiskit/qiskit-terra/issues/7608
        if len(converted_op.parameters) > 0:
            # create an list of parameter bindings, each element corresponds to a sample in the dataset
            param_bindings = [
                {param: param_values[i] for param, param_values in param_values.items()}
                for i in range(num_samples)
            ]

            grad = []
            # iterate over gradient vectors and bind the correct leftover parameters
            for g_i, param_i in zip(converted_op, param_bindings):
                # bind or re-bind remaining values and evaluate the gradient
                grad.append(g_i.bind_parameters(param_i).eval())
        else:
            grad = converted_op.eval()

        if self._input_gradients:
            num_grad_vars = self._num_inputs + self._num_weights
        else:
            num_grad_vars = self._num_weights

        # construct gradients
        for sample in range(num_samples):
            for i in range(num_grad_vars):
                coo_grad = coo_matrix(grad[sample][i])  # this works for sparse and dense case

                # get index for input or weights gradients
                if self._input_gradients:
                    grad_index = i if i < self._num_inputs else i - self._num_inputs
                else:
                    grad_index = i

                for _, k, val in zip(coo_grad.row, coo_grad.col, coo_grad.data):
                    # interpret integer and construct key
                    key = self._interpret(k)
                    if isinstance(key, Integral):
                        key = (sample, int(key), grad_index)
                    else:
                        # if key is an array-type, cast to hashable tuple
                        key = tuple(cast(Iterable[int], key))
                        key = (sample, *key, grad_index)

                    # store value for inputs or weights gradients
                    if self._input_gradients:
                        # we compute input gradients first
                        if i < self._num_inputs:
                            input_grad[key] += np.real(val)
                        else:
                            weights_grad[key] += np.real(val)
                    else:
                        weights_grad[key] += np.real(val)
        # end of for each sample

        if self._sparse:
            if self._input_gradients:
                input_grad = input_grad.to_coo()
            weights_grad = weights_grad.to_coo()

        return input_grad, weights_grad