Exemple #1
0
    def _probabilities(self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]
                       ) -> Union[np.ndarray, SparseArray]:
        # evaluate operator
        circuits = []
        rows = input_data.shape[0]
        for i in range(rows):
            param_values = {input_param: input_data[i, j]
                            for j, input_param in enumerate(self.input_params)}
            param_values.update({weight_param: weights[j]
                                 for j, weight_param in enumerate(self.weight_params)})
            circuits.append(self._circuit.bind_parameters(param_values))

        result = self.quantum_instance.execute(circuits)
        # initialize probabilities
        if self.sparse:
            prob = DOK((rows, *self.output_shape))
        else:
            prob = np.zeros((rows, *self.output_shape))

        for i, circuit in enumerate(circuits):
            counts = result.get_counts(circuit)
            shots = sum(counts.values())

            # evaluate probabilities
            for b, v in counts.items():
                key = self._interpret(int(b, 2))
                if isinstance(key, Integral):
                    key = (cast(int, key),)
                key = (i, *key)  # type: ignore
                prob[key] += v / shots

        if self.sparse:
            return prob.to_coo()
        else:
            return prob
Exemple #2
0
    def _probabilities(
        self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]
    ) -> Union[np.ndarray, SparseArray]:
        self._check_quantum_instance("probabilities")

        # evaluate operator
        circuits = []
        num_samples = input_data.shape[0]
        for i in range(num_samples):
            param_values = {
                input_param: input_data[i, j] for j, input_param in enumerate(self._input_params)
            }
            param_values.update(
                {weight_param: weights[j] for j, weight_param in enumerate(self._weight_params)}
            )
            circuits.append(self._circuit.bind_parameters(param_values))

        if self._quantum_instance.bound_pass_manager is not None:
            circuits = self._quantum_instance.transpile(
                circuits, pass_manager=self._quantum_instance.bound_pass_manager
            )

        result = self._quantum_instance.execute(circuits, had_transpiled=self._circuit_transpiled)
        # initialize probabilities
        if self._sparse:
            # pylint: disable=import-error
            from sparse import DOK

            prob = DOK((num_samples, *self._output_shape))
        else:
            prob = np.zeros((num_samples, *self._output_shape))

        for i, circuit in enumerate(circuits):
            counts = result.get_counts(circuit)
            shots = sum(counts.values())

            # evaluate probabilities
            for b, v in counts.items():
                key = self._interpret(int(b, 2))
                if isinstance(key, Integral):
                    key = (cast(int, key),)
                key = (i, *key)  # type: ignore
                prob[key] += v / shots

        if self._sparse:
            return prob.to_coo()
        else:
            return prob
Exemple #3
0
def test_set_zero():
    s = DOK((1,), dtype=np.uint8)
    s[0] = 1
    s[0] = 0

    assert s[0] == 0
    assert s.nnz == 0
    def _probabilities(self, input_data: np.ndarray, weights: np.ndarray
                       ) -> Union[np.ndarray, SparseArray]:
        # combine parameter dictionary
        param_values = {p: input_data[i] for i, p in enumerate(self.input_params)}
        param_values.update({p: weights[i] for i, p in enumerate(self.weight_params)})

        # evaluate operator
        result = self.quantum_instance.execute(
            self.circuit.bind_parameters(param_values))
        counts = result.get_counts()
        shots = sum(counts.values())

        # initialize probabilities
        prob: Union[np.ndarray, SparseArray] = None
        if self.sparse:
            prob = DOK((1, *self.output_shape))
        else:
            prob = np.zeros((1, *self.output_shape))

        # evaluate probabilities
        for b, v in counts.items():
            key = self._interpret(int(b, 2))
            if isinstance(key, Integral):
                key = (cast(int, key),)
            key = (0, *key)  # type: ignore
            prob[key] += v / shots

        return prob
Exemple #5
0
def test_convert_from_scipy_sparse():
    import scipy.sparse

    x = scipy.sparse.rand(6, 3, density=0.2)
    s = DOK(x)

    assert_eq(x, s)
Exemple #6
0
def test_construct(shape, data):
    s = DOK(shape, data)
    x = np.zeros(shape, dtype=s.dtype)

    for c, d in six.iteritems(data):
        x[c] = d

    assert_eq(x, s)
Exemple #7
0
def test_float_dtype():
    data = {
        1: np.uint8(1),
        2: np.float32(2),
    }

    s = DOK((5,), data)

    assert s.dtype == np.float32
Exemple #8
0
def test_construct(sd):
    shape, data = sd
    s = DOK(shape, data)
    x = np.zeros(shape, dtype=s.dtype)

    for c, d in data.items():
        x[c] = d

    assert_eq(x, s)
Exemple #9
0
def test_int_dtype():
    data = {
        1: np.uint8(1),
        2: np.uint16(2),
    }

    s = DOK((5,), data)

    assert s.dtype == np.uint16
Exemple #10
0
def normalize_vecs(mat):
    """Normalizes probability vectors so they
    sum to one."""
    #convert to array for operation
    order = len(mat.shape) - 1
    mat = COO(mat)
    row_sums = mat.sum(axis=order)
    mat = DOK(mat)
    for point in mat.data:
        divisor = row_sums[point[:-1]]
        mat[point] = mat[point] / divisor
    mat = COO(mat)
    return mat
Exemple #11
0
    def _probability_gradients(self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]
                               ) -> Tuple[Union[np.ndarray, SparseArray],
                                          Union[np.ndarray, SparseArray]]:

        # check whether gradient circuit could be constructed
        if self._grad_circuit is None:
            return None, None

        rows = input_data.shape[0]

        # initialize empty gradients
        if self._sparse:
            input_grad = DOK((rows, *self.output_shape, self.num_inputs))
            weights_grad = DOK((rows, *self.output_shape, self.num_weights))
        else:
            input_grad = np.zeros((rows, *self.output_shape, self.num_inputs))
            weights_grad = np.zeros((rows, *self.output_shape, self.num_weights))

        for row in range(rows):
            param_values = {input_param: input_data[row, j]
                            for j, input_param in enumerate(self.input_params)}
            param_values.update({weight_param: weights[j]
                                 for j, weight_param in enumerate(self.weight_params)})

            # TODO: additional "bind_parameters" should not be necessary,
            #  seems like a bug to be fixed
            grad = self._sampler.convert(self._grad_circuit, param_values
                                         ).bind_parameters(param_values).eval()

            # construct gradients
            for i in range(self.num_inputs + self.num_weights):
                coo_grad = coo_matrix(grad[i])  # this works for sparse and dense case

                # get index for input or weights gradients
                j = i if i < self.num_inputs else i - self.num_inputs

                for _, k, val in zip(coo_grad.row, coo_grad.col, coo_grad.data):

                    # interpret integer and construct key
                    key = self._interpret(k)
                    if isinstance(key, Integral):
                        key = (row, int(key), j)
                    else:
                        # if key is an array-type, cast to hashable tuple
                        key = tuple(cast(Iterable[int], key))
                        key = (row, *key, j)  # type: ignore

                    # store value for inputs or weights gradients
                    if i < self.num_inputs:
                        input_grad[key] += np.real(val)
                    else:
                        weights_grad[key] += np.real(val)

        if self.sparse:
            return input_grad.to_coo(), weights_grad.to_coo()
        else:
            return input_grad, weights_grad
Exemple #12
0
def markovize_bytes(byte_string, order=1, class_dict=byte_classes):
    """Takes a byte string and iterates through it,
    taking a count of next bytes depending on previous state.
    Second order."""
    dims = tuple([len(class_dict) for i in range(order + 1)])
    mat = DOK(shape=dims)
    prev = [-1 for i in range(order)]
    for i, true_byte in enumerate(byte_string):
        #reference dict to get byte index
        try:
            byt = class_dict[str(true_byte)]
        except KeyError:
            print('key error.')
            byt = class_dict['other']
        if not any(np.array(prev) < 0):
            loc = tuple(prev + [byt])
            mat[loc] += 1
        for j, val in enumerate(prev):
            if j == (len(prev) - 1):
                prev[j] = byt
            else:
                prev[j] = prev[j + 1]
    return mat
Exemple #13
0
def test_convert_from_numpy():
    x = np.random.rand(2, 3, 4)
    s = DOK(x)

    assert_eq(x, s)
Exemple #14
0
def test_convert_from_coo():
    s1 = sparse.random((2, 3, 4), 0.5, format='coo')
    s2 = DOK(s1)

    assert_eq(s1, s2)
    def _probability_gradients(self, input_data: np.ndarray, weights: np.ndarray
                               ) -> Tuple[Union[np.ndarray, SparseArray],
                                          Union[np.ndarray, SparseArray]]:
        # combine parameter dictionary
        param_values = {p: input_data[i] for i, p in enumerate(self.input_params)}
        param_values.update({p: weights[i] for i, p in enumerate(self.weight_params)})

        # TODO: additional "bind_parameters" should not be necessary, seems like a bug to be fixed
        grad = self._sampler.convert(self._grad_circuit, param_values
                                     ).bind_parameters(param_values).eval()

        # TODO: map to dictionary to pretend sparse logic --> needs to be fixed in opflow!
        input_grad_dicts: List[Dict] = []
        if self.num_inputs > 0:
            input_grad_dicts = [{} for _ in range(self.num_inputs)]
            for i in range(self.num_inputs):
                for k in range(2 ** self.circuit.num_qubits):
                    key = self._interpret(k)
                    if not isinstance(key, Integral):
                        # if key is an array-type, cast to hashable tuple
                        key = tuple(cast(Iterable[int], key))
                    input_grad_dicts[i][key] = (input_grad_dicts[i].get(key, 0.0) +
                                                np.real(grad[i][k]))

        weights_grad_dicts: List[Dict] = []
        if self.num_weights > 0:
            weights_grad_dicts = [{} for _ in range(self.num_weights)]
            for i in range(self.num_weights):
                for k in range(2 ** self.circuit.num_qubits):
                    key = self._interpret(k)
                    if not isinstance(key, Integral):
                        # if key is an array-type, cast to hashable tuple
                        key = tuple(cast(Iterable[int], key))
                    weights_grad_dicts[i][key] = (weights_grad_dicts[i].get(key, 0.0) +
                                                  np.real(grad[i + self.num_inputs][k]))

        input_grad: Union[np.ndarray, SparseArray] = None
        weights_grad: Union[np.ndarray, SparseArray] = None
        if self._sparse:
            if self.num_inputs > 0:
                input_grad = DOK((1, *self.output_shape, self.num_inputs))
            else:
                input_grad = np.zeros((1, *self.output_shape, self.num_inputs))
            if self.num_weights > 0:
                weights_grad = DOK((1, *self.output_shape, self.num_weights))
            else:
                weights_grad = np.zeros((1, *self.output_shape, self.num_weights))
        else:
            input_grad = np.zeros((1, *self.output_shape, self.num_inputs))
            weights_grad = np.zeros((1, *self.output_shape, self.num_weights))

        for i in range(self.num_inputs):
            for k, grad in input_grad_dicts[i].items():
                key = -1
                if isinstance(k, Integral):
                    key = (0, k, i)
                else:
                    key = (0, *k, i)  # type: ignore
                input_grad[key] = grad

        for i in range(self.num_weights):
            for k, grad in weights_grad_dicts[i].items():
                key = -1
                if isinstance(key, Integral):
                    key = (0, k, i)
                else:
                    key = (0, *k, i)  # type: ignore
                weights_grad[key] = grad

        return input_grad, weights_grad
Exemple #16
0
def test_default_dtype():
    s = DOK((5,))

    assert s.dtype == np.float64
Exemple #17
0
    def _probability_gradients(
        self, input_data: Optional[np.ndarray], weights: Optional[np.ndarray]
    ) -> Tuple[Union[np.ndarray, SparseArray], Union[np.ndarray, SparseArray]]:
        self._check_quantum_instance("probability gradients")

        # check whether gradient circuit could be constructed
        if self._gradient_circuit is None:
            return None, None

        num_samples = input_data.shape[0]

        # initialize empty gradients
        input_grad = None  # by default we don't have data gradients
        if self._sparse:
            # pylint: disable=import-error
            from sparse import DOK

            if self._input_gradients:
                input_grad = DOK((num_samples, *self._output_shape, self._num_inputs))
            weights_grad = DOK((num_samples, *self._output_shape, self._num_weights))
        else:
            if self._input_gradients:
                input_grad = np.zeros((num_samples, *self._output_shape, self._num_inputs))
            weights_grad = np.zeros((num_samples, *self._output_shape, self._num_weights))

        param_values = {
            input_param: input_data[:, j] for j, input_param in enumerate(self._input_params)
        }
        param_values.update(
            {
                weight_param: np.full(num_samples, weights[j])
                for j, weight_param in enumerate(self._weight_params)
            }
        )

        converted_op = self._sampler.convert(self._gradient_circuit, param_values)
        # if statement is a workaround for https://github.com/Qiskit/qiskit-terra/issues/7608
        if len(converted_op.parameters) > 0:
            # create an list of parameter bindings, each element corresponds to a sample in the dataset
            param_bindings = [
                {param: param_values[i] for param, param_values in param_values.items()}
                for i in range(num_samples)
            ]

            grad = []
            # iterate over gradient vectors and bind the correct leftover parameters
            for g_i, param_i in zip(converted_op, param_bindings):
                # bind or re-bind remaining values and evaluate the gradient
                grad.append(g_i.bind_parameters(param_i).eval())
        else:
            grad = converted_op.eval()

        if self._input_gradients:
            num_grad_vars = self._num_inputs + self._num_weights
        else:
            num_grad_vars = self._num_weights

        # construct gradients
        for sample in range(num_samples):
            for i in range(num_grad_vars):
                coo_grad = coo_matrix(grad[sample][i])  # this works for sparse and dense case

                # get index for input or weights gradients
                if self._input_gradients:
                    grad_index = i if i < self._num_inputs else i - self._num_inputs
                else:
                    grad_index = i

                for _, k, val in zip(coo_grad.row, coo_grad.col, coo_grad.data):
                    # interpret integer and construct key
                    key = self._interpret(k)
                    if isinstance(key, Integral):
                        key = (sample, int(key), grad_index)
                    else:
                        # if key is an array-type, cast to hashable tuple
                        key = tuple(cast(Iterable[int], key))
                        key = (sample, *key, grad_index)

                    # store value for inputs or weights gradients
                    if self._input_gradients:
                        # we compute input gradients first
                        if i < self._num_inputs:
                            input_grad[key] += np.real(val)
                        else:
                            weights_grad[key] += np.real(val)
                    else:
                        weights_grad[key] += np.real(val)
        # end of for each sample

        if self._sparse:
            if self._input_gradients:
                input_grad = input_grad.to_coo()
            weights_grad = weights_grad.to_coo()

        return input_grad, weights_grad
Exemple #18
0
def test_convert_from_coo(s1):
    s2 = DOK(s1)

    assert_eq(s1, s2)