Exemple #1
0
 def setUp(self):
     experiment_result_data = ExperimentResultData.from_dict(
         {'counts': {
             '0x0': 42
         }})
     experiment_result_data_2 = ExperimentResultData.from_dict(
         {'counts': {
             '0x1': 42
         }})
     header_1 = Obj.from_dict({'name': 'Test1'})
     header_2 = Obj.from_dict({'name': 'Test2'})
     self.experiment_result_dictionary_1 = {
         'name': 'Test1',
         'shots': 42,
         'data': experiment_result_data,
         'status': 'DONE',
         'success': True,
         'time_taken': 0.42,
         'header': header_1
     }
     self.experiment_result_dictionary_2 = {
         'name': 'Test2',
         'shots': 23,
         'data': experiment_result_data_2,
         'status': 'DONE',
         'success': True,
         'time_taken': 0.12,
         'header': header_2
     }
     self.experiment_result_1 = ExperimentResult(
         **self.experiment_result_dictionary_1)
     self.experiment_result_2 = ExperimentResult(
         **self.experiment_result_dictionary_2)
 def test_patternproperties_to_dict(self):
     """Test a field using the PatternProperties validator produces a correct value"""
     counts_dict = {'0x00': 50, '0x11': 50}
     counts = Obj(**counts_dict)
     histogram = Histogram(counts=counts)
     histogram_dict = histogram.to_dict()
     self.assertEqual(histogram_dict, {'counts': counts_dict})
Exemple #3
0
def marginal_counts(result, indices=None):
    """Marginalize counts from an experiment over some indices of interest.

    Args:
        result (dict or Result): result to be marginalized
            (a Result object or a dict of counts).
        indices (list(int) or None): The bit positions of interest
            to marginalize over. If None, do not marginalize at all.

    Returns:
        Result or dict[str:int]: a dictionary with the observed counts,
            marginalized to only account for frequency of observations
            of bits of interest.

    Raises:
        QiskitError: in case of invalid indices to marginalize over.
    """
    from qiskit.result.result import Result
    from qiskit.result.postprocess import _bin_to_hex
    if isinstance(result, Result):
        for i, experiment_result in enumerate(result.results):
            counts = result.get_counts(i)
            new_counts = _marginalize(counts, indices)
            new_counts_hex = {}
            for k, v in new_counts.items():
                new_counts_hex[_bin_to_hex(k)] = v
            experiment_result.data.counts = Obj(**new_counts_hex)
            experiment_result.header.memory_slots = len(indices)
    else:
        counts = result
        new_counts = _marginalize(counts, indices)
        result = new_counts

    return result
    def __convert_histogram(result: Dict[str, Any],
                            measurements: Dict[str, Any]) -> Obj:
        """ The quantum inspire backend always uses full state projection. The SDK user
            can measure not all qubits and change the combined classical bits. This function
            converts the result to a histogram output that represents the probabilities
            measured with the classical bits.

        Args:
            result: The result output from the quantum inspire backend with full-
                    state projection histogram output.
            measurements: The dictionary contains a measured qubits/classical bits map (list) and the
                          number of classical bits (int).

        Returns:
            The resulting full state histogram with probabilities.
        """
        output_histogram_probabilities: Dict[str,
                                             float] = defaultdict(lambda: 0)
        number_of_qubits = result['number_of_qubits']
        state_probability: Dict[str, float] = result['histogram']
        for qubit_register, probability in state_probability.items():
            classical_state_hex = QuantumInspireBackend.__qubit_to_classical_hex(
                qubit_register, measurements, number_of_qubits)
            output_histogram_probabilities[classical_state_hex] += probability

        sorted_histogram_probabilities: List[Tuple[str, float]] = sorted(
            output_histogram_probabilities.items(),
            key=lambda kv: int(kv[0], 16))
        full_state_histogram_obj = OrderedDict(sorted_histogram_probabilities)
        return Obj.from_dict(full_state_histogram_obj)
Exemple #5
0
    def get_experiment_results(self, qi_job: QIJob) -> List[ExperimentResult]:
        """ Get results from experiments from the Quantum-inspire platform.

        Args:
            qi_job: A job that has already been submitted and which execution is completed.

        Raises:
            QisKitBackendError: If an error occurred during execution by the backend.

        Returns:
            A list of experiment results; containing the data, execution time, status, etc.
        """
        jobs = self.__api.get_jobs_from_project(int(qi_job.job_id()))
        results = [self.__api.get_result_from_job(job['id']) for job in jobs]
        experiment_results = []
        for result, job in zip(results, jobs):
            if not result.get('histogram', {}):
                raise QisKitBackendError(
                    'Result from backend contains no histogram data!\n{}'.format(result.get('raw_text')))

            user_data = json.loads(str(job.get('user_data')))
            measurements = user_data.pop('measurements')
            histogram_obj, memory_data = self.__convert_result_data(result, measurements)
            full_state_histogram_obj = self.__convert_histogram(result, measurements)
            experiment_result_data = ExperimentResultData(counts=histogram_obj,
                                                          probabilities=full_state_histogram_obj,
                                                          memory=memory_data)
            header = Obj.from_dict(user_data)
            experiment_result_dictionary = {'name': job.get('name'), 'seed': 42, 'shots': job.get('number_of_shots'),
                                            'data': experiment_result_data, 'status': 'DONE', 'success': True,
                                            'time_taken': result.get('execution_time_in_seconds'), 'header': header}
            experiment_results.append(ExperimentResult(**experiment_result_dictionary))
        return experiment_results
    def __convert_result_data(
            self, result: Dict[str, Any],
            measurements: Dict[str, Any]) -> Tuple[Obj, List[str]]:
        """ The quantum inspire backend returns the single shot values as raw data. This function
            converts this list of single shot values to hexadecimal memory data according the Qiskit spec.
            From this memory data the counts histogram is constructed by counting the single shot values.

        Note:
            When shots = 1, the backend returns an empty list as raw_data. This is a special case. In this case the
            resulting memory data consists of 1 value and the count histogram consists of 1 instance of this value.
            To determine this value a random float is generated in the range [0, 1). With this random number the
            value from this probabilities histogram is taken where the added probabilities is greater this random
            number.
            Example: probability histogram is {[0x0, 0.2], [0x3, 0.4], [0x5, 0.1], [0x6, 0.3]}.
            When random is in the range [0, 0.2) the first value of the probability histogram is taken (0x0).
            When random is in the range [0.2, 0.6) the second value of the probability histogram is taken (0x3).
            When random is in the range [0.6, 0.7) the third value of the probability histogram is taken (0x5).
            When random is in the range [0.7, 1) the last value of the probability histogram is taken (0x6).

        Args:
            result: The result output from the quantum inspire backend with full-
                    state projection histogram output.
            measurements: The dictionary contains a measured qubits/classical bits map (list) and the
                          number of classical bits (int).

        Returns:
            The result consists of two formats for the result. The first result is the histogram with count data,
            the second result is a list with converted hexadecimal memory values for each shot.
        """
        memory_data = []
        histogram_data: Dict[str, int] = defaultdict(lambda: 0)
        number_of_qubits: int = result['number_of_qubits']
        raw_data = self.__api.get_raw_data_from_result(result['id'])
        if raw_data:
            for raw_qubit_register in raw_data:
                classical_state_hex = QuantumInspireBackend.__qubit_to_classical_hex(
                    str(raw_qubit_register), measurements, number_of_qubits)
                memory_data.append(classical_state_hex)
            histogram_data = {
                elem: count
                for elem, count in Counter(memory_data).items()
            }
        else:
            state_probabilities = result['histogram']
            random_probability = np.random.rand()
            sum_probability = 0.0
            for qubit_register, probability in state_probabilities.items():
                sum_probability += probability
                if random_probability < sum_probability:
                    classical_state_hex = QuantumInspireBackend.__qubit_to_classical_hex(
                        qubit_register, measurements, number_of_qubits)
                    memory_data.append(classical_state_hex)
                    histogram_data[classical_state_hex] = 1
                    break

        sorted_histogram_data: List[Tuple[str, int]] = sorted(
            histogram_data.items(), key=lambda kv: int(kv[0], 16))
        histogram_obj = OrderedDict(sorted_histogram_data)
        return Obj.from_dict(histogram_obj), memory_data
    def test_patternproperties_valid(self):
        """Test the PatternProperties validator allowing fine control on keys and values."""
        counts_dict = {'0x00': 50, '0x11': 50}
        counts = Obj(**counts_dict)
        histogram = Histogram(counts=counts)
        self.assertEqual(histogram.counts, counts)

        # From dict
        histogram = Histogram.from_dict({'counts': counts_dict})
        self.assertEqual(histogram.counts, counts)
Exemple #8
0
def _generate_experiment_result(qlm_result, head):
    """
    Generates a Qiskit experiment result.

    Args:
        qlm_result: qat.core.wrappers.Result object which data is aggregated
        head: Header of the experiment

    Returns:
        An ExperimentResult structure.
    """
    samples = [hex(s.state.state) for s in qlm_result.raw_data]
    counts = dict(Counter(samples))
    data = ExperimentResultData.from_dict({"counts": counts})
    return ExperimentResult(
        shots=len(qlm_result.raw_data),
        success=True,
        data=data,
        header=Obj.from_dict(head),
    )
Exemple #9
0
    def apply(self, raw_data: Result) -> Result:
        """
        Create a new result from the raw_data by converting level 1 data to
        level 2 data.

        Args:
            raw_data: list of qiskit.Result or qiskit.Result.

        Returns:
            A list of qiskit.Result or qiskit.Result.
        """
        new_results = deepcopy(raw_data)

        to_be_discriminated = []

        # Extract all the meas level 1 data from the Result.
        shots_per_experiment_result = []
        for result in new_results.results:
            if result.meas_level == 1:
                shots_per_experiment_result.append(result.shots)
                to_be_discriminated.append(result)

        new_results.results = to_be_discriminated

        x_data = self.discriminator.get_xdata(new_results, 2)
        y_data = self.discriminator.discriminate(x_data)

        start = 0
        for idx, n_shots in enumerate(shots_per_experiment_result):
            memory = y_data[start:(start + n_shots)]
            counts = Obj.from_dict(self.count(memory))
            new_results.results[idx].data = ExperimentResultData(counts=counts,
                                                                 memory=memory)
            start += n_shots

        for result in new_results.results:
            result.meas_level = 2

        return new_results
Exemple #10
0
    def apply(self, raw_data, method='least_squares'):
        """
        Apply the calibration matrices to results

        Args:
            raw_data: The data to be corrected. Can be in a number of forms.
                a counts dictionary from results.get_countsphy data);
                or a qiskit Result

            method (str): fitting method. If None, then least_squares is used.
                'pseudo_inverse': direct inversion of the cal matrices
                'least_squares': constrained to have physical probabilities

        Returns:
            The corrected data in the same form as raw_data
        """

        all_states = count_keys(self.nqubits)
        num_of_states = 2**self.nqubits

        # check forms of raw_data
        if isinstance(raw_data, dict):
            # counts dictionary
            # convert to list
            raw_data2 = [np.zeros(num_of_states, dtype=float)]
            for state, count in raw_data.items():
                stateidx = int(state, 2)
                raw_data2[0][stateidx] = count

        elif isinstance(raw_data, qiskit.result.result.Result):

            # extract out all the counts, re-call the function with the
            # counts and push back into the new result
            new_result = deepcopy(raw_data)

            for resultidx, _ in enumerate(raw_data.results):
                new_counts = self.apply(raw_data.get_counts(resultidx),
                                        method=method)
                new_result.results[resultidx].data.counts = \
                    Obj(**new_counts)

            return new_result

        else:
            raise QiskitError("Unrecognized type for raw_data.")

        if method == 'pseudo_inverse':
            pinv_cal_matrices = []
            for cal_mat in self._cal_matrices:
                pinv_cal_matrices.append(la.pinv(cal_mat))

        # Apply the correction
        for data_idx, _ in enumerate(raw_data2):

            if method == 'pseudo_inverse':
                inv_mat_dot_raw = np.zeros([num_of_states], dtype=float)
                for state1_idx, state1 in enumerate(all_states):
                    for state2_idx, state2 in enumerate(all_states):
                        if raw_data2[data_idx][state2_idx] == 0:
                            continue

                        product = 1.
                        end_index = self.nqubits
                        for p_ind, pinv_mat in enumerate(pinv_cal_matrices):

                            start_index = end_index - \
                                self._qubit_list_sizes[p_ind]

                            state1_as_int = \
                                self._indices_list[p_ind][
                                    state1[start_index:end_index]]

                            state2_as_int = \
                                self._indices_list[p_ind][
                                    state2[start_index:end_index]]

                            end_index = start_index
                            product *= \
                                pinv_mat[state1_as_int][state2_as_int]
                            if product == 0:
                                break
                        inv_mat_dot_raw[state1_idx] += \
                            (product * raw_data2[data_idx][state2_idx])
                raw_data2[data_idx] = inv_mat_dot_raw

            elif method == 'least_squares':

                def fun(x):
                    mat_dot_x = np.zeros([num_of_states], dtype=float)
                    for state1_idx, state1 in enumerate(all_states):
                        mat_dot_x[state1_idx] = 0.
                        for state2_idx, state2 in enumerate(all_states):
                            if x[state2_idx] != 0:
                                product = 1.
                                end_index = self.nqubits
                                for c_ind, cal_mat in \
                                        enumerate(self._cal_matrices):

                                    start_index = end_index - \
                                        self._qubit_list_sizes[c_ind]

                                    state1_as_int = \
                                        self._indices_list[c_ind][
                                            state1[start_index:end_index]]

                                    state2_as_int = \
                                        self._indices_list[c_ind][
                                            state2[start_index:end_index]]

                                    end_index = start_index
                                    product *= \
                                        cal_mat[state1_as_int][state2_as_int]
                                    if product == 0:
                                        break
                                mat_dot_x[state1_idx] += \
                                    (product * x[state2_idx])
                    return sum((raw_data2[data_idx] - mat_dot_x)**2)

                x0 = np.random.rand(num_of_states)
                x0 = x0 / sum(x0)
                nshots = sum(raw_data2[data_idx])
                cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
                bnds = tuple((0, nshots) for x in x0)
                res = minimize(fun,
                               x0,
                               method='SLSQP',
                               constraints=cons,
                               bounds=bnds,
                               tol=1e-6)
                raw_data2[data_idx] = res.x

            else:
                raise QiskitError("Unrecognized method.")

        # convert back into a counts dictionary
        new_count_dict = {}
        for state_idx, state in enumerate(all_states):
            if raw_data2[0][state_idx] != 0:
                new_count_dict[state] = raw_data2[0][state_idx]

        return new_count_dict
Exemple #11
0
    authentication = get_authentication()
    QI.set_authentication(authentication)
    qi_backend = QI.get_backend('QX single-node simulator')

    q = QuantumRegister(2)
    b = ClassicalRegister(2)
    circuit = QuantumCircuit(q, b)

    circuit.h(q[0])
    circuit.cx(q[0], q[1])
    circuit.measure(q, b)

    qi_job = execute(circuit, backend=qi_backend, shots=256)
    qi_result = qi_job.result()
    histogram = qi_result.get_counts(circuit)
    print('\nState\tCounts')
    [
        print('{0}\t{1}'.format(state, counts))
        for state, counts in histogram.items()
    ]
    # Print the full state probabilities histogram
    probabilities_histogram = Obj.to_dict(
        qi_result.data(circuit)['probabilities'])
    print('\nState\tProbabilities')
    # Format the hexadecimal key to a zero-padded binary string with length of the number of classical bits
    [
        print('{0}\t{1}'.format(
            format(int(str(bin(int(key, 16)))[2:], 2), '0{}b'.format(b.size)),
            val)) for key, val in probabilities_histogram.items()
    ]
Exemple #12
0
    def apply(self, raw_data, method='least_squares'):
        """
        Apply the calibration matrix to results

        Args:
            raw_data: The data to be corrected. Can be in a number of forms.
                Form1: a counts dictionary from results.get_counts
                Form2: a list of counts of length==len(state_labels)
                Form3: a list of counts of length==M*len(state_labels) where M
                    is an integer (e.g. for use with the tomography data)
                Form4: a qiskit Result

            method (str): fitting method. If None, then least_squares is used.
                'pseudo_inverse': direct inversion of the A matrix
                'least_squares': constrained to have physical probabilities

        Returns:
            The corrected data in the same form as raw_data

        Additional Information:

            e.g.
            calcircuits, state_labels = complete_measurement_calibration(
                qiskit.QuantumRegister(5))
            job = qiskit.execute(calcircuits)
            meas_fitter = CompleteMeasFitter(job.results(),
                                            state_labels)
            meas_filter = MeasurementFilter(meas_fitter.cal_matrix)

            job2 = qiskit.execute(my_circuits)
            result2 = job2.results()

            error_mitigated_counts = meas_filter.apply(
                result2.get_counts('circ1'))

        """

        # check forms of raw_data
        if isinstance(raw_data, dict):
            # counts dictionary
            data_format = 0
            # convert to form2
            raw_data2 = [np.zeros(len(self._state_labels), dtype=float)]
            for stateidx, state in enumerate(self._state_labels):
                raw_data2[0][stateidx] = raw_data.get(state, 0)

        elif isinstance(raw_data, list):
            size_ratio = len(raw_data) / len(self._state_labels)
            if len(raw_data) == len(self._state_labels):
                data_format = 1
                raw_data2 = [raw_data]
            elif int(size_ratio) == size_ratio:
                data_format = 2
                size_ratio = int(size_ratio)
                # make the list into chunks the size of state_labels for easier
                # processing
                raw_data2 = np.zeros([size_ratio, len(self._state_labels)])
                for i in range(size_ratio):
                    raw_data2[i][:] = raw_data[i *
                                               len(self._state_labels):(i +
                                                                        1) *
                                               len(self._state_labels)]
            else:
                raise QiskitError("Data list is not an integer multiple "
                                  "of the number of calibrated states")

        elif isinstance(raw_data, qiskit.result.result.Result):

            # extract out all the counts, re-call the function with the
            # counts and push back into the new result
            new_result = deepcopy(raw_data)

            new_counts_list = parallel_map(
                self._apply_correction,
                [resultidx for resultidx, _ in enumerate(raw_data.results)],
                task_args=(raw_data, method))

            for resultidx, new_counts in new_counts_list:
                new_result.results[resultidx].data.counts = \
                    Obj(**new_counts)

            return new_result

        else:
            raise QiskitError("Unrecognized type for raw_data.")

        if method == 'pseudo_inverse':
            pinv_cal_mat = la.pinv(self._cal_matrix)

        # Apply the correction
        for data_idx, _ in enumerate(raw_data2):

            if method == 'pseudo_inverse':
                raw_data2[data_idx] = np.dot(pinv_cal_mat, raw_data2[data_idx])

            elif method == 'least_squares':
                nshots = sum(raw_data2[data_idx])

                def fun(x):
                    return sum(
                        (raw_data2[data_idx] - np.dot(self._cal_matrix, x))**2)

                x0 = np.random.rand(len(self._state_labels))
                x0 = x0 / sum(x0)
                cons = ({'type': 'eq', 'fun': lambda x: nshots - sum(x)})
                bnds = tuple((0, nshots) for x in x0)
                res = minimize(fun,
                               x0,
                               method='SLSQP',
                               constraints=cons,
                               bounds=bnds,
                               tol=1e-6)
                raw_data2[data_idx] = res.x

            else:
                raise QiskitError("Unrecognized method.")

        if data_format == 2:
            # flatten back out the list
            raw_data2 = raw_data2.flatten()

        elif data_format == 0:
            # convert back into a counts dictionary
            new_count_dict = {}
            for stateidx, state in enumerate(self._state_labels):
                if raw_data2[0][stateidx] != 0:
                    new_count_dict[state] = raw_data2[0][stateidx]

            raw_data2 = new_count_dict
        else:
            # TODO: should probably change to:
            # raw_data2 = raw_data2[0].tolist()
            raw_data2 = raw_data2[0]
        return raw_data2
    def run_experiment(self, experiment):
        """Run an experiment (circuit) and return a single experiment result.

        Args:
            experiment (QobjExperiment): experiment from qobj experiments list

        Returns:
            dict: A dictionary of results.
            dict: A result dictionary

        Raises:
            QCGPUSimulatorError: If the number of qubits is too large, or another
                error occurs during execution.
        """
        self._number_of_qubits = experiment.header.n_qubits
        self._statevector = 0

        start = time.time()

        try:
            sim = qcgpu.State(self._number_of_qubits)
        except OverflowError:
            raise QCGPUSimulatorError('too many qubits')

        for operation in experiment.instructions:
            name = operation.name
            qubits = operation.qubits
            params = [float(param)
                      for param in getattr(operation, 'params', [])]

            if name == 'id':
                logger.info('Identity gates are ignored.')
            elif name == 'barrier':
                logger.info('Barrier gates are ignored.')
            elif name == 'u3':
                sim.u(qubits[0], *params)
            elif name == 'u2':
                sim.u2(qubits[0], *params)
            elif name == 'u1':
                sim.u1(qubits[0], *params)
            elif name == 'cx':
                sim.cx(*qubits)
            elif name == 'h':
                sim.h(qubits[0])
            elif name == 'x':
                sim.x(qubits[0])
            elif name == 'y':
                sim.y(qubits[0])
            elif name == 'z':
                sim.z(qubits[0])
            elif name == 's':
                sim.s(qubits[0])
            elif name == 't':
                sim.t(qubits[0])

        amps = [complex(z)
                for z in sim.amplitudes().round(self._chop_threshold)]

        end = time.time()

        # amps = np.stack((amps.real, amps.imag), axis=-1)

        return ExperimentResult(
            name=experiment.header.name,
            shots=1,
            success=True,
            data=ExperimentResultData(statevector=amps),
            time_taken=(end - start),
            header=Obj(name=experiment.header.name))