コード例 #1
0
    def _cluster_to_paths_and_defect(cls, code, cluster):
        """Splits cluster into paths of X and Z plaquette indices and a non-fusible Y-defect (if present).

        Note:

        * By design, X and Z path lengths have the same parity.
        * If X and Z path lengths are odd then a non-fusible Y-defect is present.
        * If present, the Y-defect is selected to consist of the final X and Z path indices.

        :param code: Rotated planar code.
        :type code: RotatedPlanarCode
        :param cluster: Cluster (directed path of indices) as [(t1, x1, y1), (t2, x2, y2), ..., (tn, xn, yn)].
        :type cluster: list of (int, int, int)
        :return: Path of X-indices, Path of Z-indices, Y-defect as (x_index, z_index) or None.
        :rtype: list of (int, int, int), list of (int, int, int), ((int, int, int), (int, int, int))
        """
        # split into x and z indices
        x_indices = [(t, x, y) for t, x, y in cluster if code.is_x_plaquette((x, y))]
        z_indices = [(t, x, y) for t, x, y in cluster if code.is_z_plaquette((x, y))]
        # sanity: check X and Z path lengths have same parity
        if len(x_indices) % 2 != len(z_indices) % 2:
            raise QecsimError('Cluster has non-fused non-Y defect.')
        # if path lengths odd, choose Y-defect to be final indices
        if len(x_indices) % 2:
            return x_indices[:-1], z_indices[:-1], (x_indices[-1], z_indices[-1])
        else:
            return x_indices, z_indices, None
コード例 #2
0
    def __init__(self,
                 success=None,
                 logical_commutations=None,
                 recovery=None,
                 custom_values=None):
        """
        Initialise new decode result.

        :param success: If the decoding was successful (default=None).
        :type success: bool
        :param logical_commutations: Logical commutations as binary vector or None (default=None).
        :type logical_commutations: numpy.array (1d)
        :param recovery: Recovery operation as binary symplectic vector (default=None).
        :type recovery: numpy.array (1d)
        :param custom_values: Custom values as numeric vector or None (default=None).
        :type custom_values: numpy.array (1d)
        :raises QecsimError: If both success and recovery are unspecified (i.e. None).
        """
        if success is None and recovery is None:
            raise QecsimError(
                'At least one of success or recovery must be specified.')
        self.success = success
        self.logical_commutations = logical_commutations
        self.recovery = recovery
        self.custom_values = custom_values
コード例 #3
0
    def validate(self):
        r"""
        Perform various sanity checks.

        Sanity checks:

        * :math:`stabilizers \odot stabilisers^T = 0`
        * :math:`stabilizers \odot logicals^T = 0`
        * :math:`logicals \odot logicals^T = \Lambda`

        See :func:`qecsim.paulitools.bsp` for definition of :math:`\odot` and :math:`\Lambda`.

        :raises QecsimError: if the stabilizers or logicals fail the sanity checks.
        """
        if not np.all(pt.bsp(self.stabilizers, self.stabilizers.T) == 0):
            raise QecsimError('Stabilizers do not mutually commute.')
        if not np.all(pt.bsp(self.stabilizers, self.logicals.T) == 0):
            raise QecsimError('Stabilizers do not commute with logicals.')
        # twisted identity with shape (len(logicals), len(logicals))
        i1, i2 = np.hsplit(np.identity(len(self.logicals), dtype=int), 2)
        expected = np.hstack((i2, i1))
        if not np.array_equal(pt.bsp(self.logicals, self.logicals.T),
                              expected):
            raise QecsimError('Logicals do not commute as expected.')
コード例 #4
0
ファイル: _sliceerrormodel.py プロジェクト: silky/qecsim
 def _neg_lim(cls, lim):
     """params: lim:np.array(3d). return:lim:np.array(3d)."""
     pX, pY, pZ, pO, pC = map(np.array, ((1, 0, 0), (0, 1, 0), (0, 0, 1),
                                         (0, 0, 0), (1 / 3, 1 / 3, 1 / 3)))
     pL = lim
     # find opposing limit through center point
     for p1, p2, p3 in ((pX, pY, pZ), (pY, pZ, pX), (pZ, pX, pY)):
         if not np.dot(pL, p1):  # lim lies in 23-plane
             # if lim closer to p2 than p3
             if np.linalg.norm(p2 - pL) <= np.linalg.norm(p3 - pL):
                 pN = p2  # opposing limit at intersect with 31-plane
             else:
                 pN = p3  # opposing limit at intersect with 12-plane
             # return slice and plane interest
             return cls._normalize(
                 cls._line_plane_intersect(pN, pO, pC - pL, pL))
     raise QecsimError('Failed to find negative-limit.')
コード例 #5
0
    def _clusters(cls, matches):
        """List of clusters from the given matches where each cluster is a directed path of plaquette indices.

        Notes:

        * Matches between nodes with the same index are removed.
        * For consistency in testing, the clusters are ordered by their SW corner index, and the directed path of each
          cluster starts in the SW corner of the cluster and traverses clockwise (the final index does not repeat the
          first index).

        Algorithm: see class doc.

        :param matches: Matches between index nodes as ((t, x, y), is_row).
        :type matches: set of (((int, int, int), bool), ((int, int, int), bool))
        :return: List of clusters (directed paths of indices) as [[(t1, x1, y1), (t2, x2, y2), ..., (tn, xn, yn)], ...]
        :rtype: list of list of (int, int, int)
        :raises ValueError: If matches are between rows and columns (except virtual nodes at same index).
        """
        # build row and col mates maps
        row_mates, col_mates = {}, {}
        for (a_index, a_is_row), (b_index, b_is_row) in matches:
            # skip if nodes have same index
            if a_index == b_index:
                continue
            # we should not match between rows and columns
            if a_is_row != b_is_row:
                raise ValueError('Matching unsupported between rows and columns (except virtual nodes at same index).')
            # add match and reverse match to appropriate mates map
            mates = row_mates if a_is_row else col_mates
            mates[a_index] = b_index  # add match
            mates[b_index] = a_index  # add reverse match
        # for consistency in testing, loop column sorted column mates so that each cluster begins in sw corner of sw
        # cluster and traverses clockwise.
        col_mates = OrderedDict(sorted(col_mates.items()))
        # build list of clusters
        clusters = []
        # loop until all column mates processed
        while col_mates:
            # single cluster as (x1, y1) -> (x2, y2) ... -> (xn, yn)
            cluster = []
            # pop start_index and next_index (column)
            start_index, next_index = col_mates.popitem(last=False)
            cluster.append(start_index)  # add start_index (column)
            # loop until cluster processed
            while True:
                try:
                    cluster.append(next_index)  # add next_index (column)
                    del col_mates[next_index]  # delete reverse column match
                    next_index = row_mates.pop(next_index)  # find next_index (row)
                    cluster.append(next_index)  # add next_index (row)
                    del row_mates[next_index]  # delete reverse row match
                    next_index = col_mates.pop(next_index)  # find next_index (column)
                except KeyError:
                    break  # break when cluster processed
            # sanity: cluster should be closed loop
            if cluster[0] != cluster[-1]:
                raise QecsimError('Cluster is not a closed loop.')
            # remove redundant final index of closed loop
            cluster.pop()
            # sanity: cluster length should be even
            if len(cluster) % 2:
                raise QecsimError('Cluster length is not even.')
            # add cluster to list of clusters
            clusters.append(cluster)
        # sanity: all row_mates should be processed when all col_mates have been processed
        if row_mates:
            raise QecsimError('Some row matches unclustered after all column matches clustered.')
        return clusters
コード例 #6
0
ファイル: app_defp.py プロジェクト: dua-arpit/qecsim
def _run_defp(mode,
              code,
              time_steps,
              error_model,
              decoder,
              error_probability,
              perm_rates,
              code_name,
              layout,
              measurement_error_probability,
              max_runs=None,
              max_failures=None,
              random_seed=None):
    """Implements run and run_ftp functions"""

    # assumptions
    assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp'

    # derived defaults
    if max_runs is None and max_failures is None:
        max_runs = 1

    if logger.isEnabledFor(logging.DEBUG):
        logger.debug(
            'run: code={},time_steps={},error_model={},decoder={},error_probability={},'
            'measurement_error_probability={} max_runs={},max_failures={},random_seed={}.'
            .format(code, time_steps, error_model, decoder, error_probability,
                    measurement_error_probability, max_runs, max_failures,
                    random_seed))

    wall_time_start = time.perf_counter()

    runs_data = {
        'code': code.label,
        'n_k_d': code.n_k_d,
        'time_steps': time_steps,
        'error_model': error_model.label,
        'decoder': decoder.label,
        'error_probability': error_probability,
        'measurement_error_probability': measurement_error_probability,
        'n_run': 0,
        'n_success': 0,
        'n_fail': 0,
        'n_logical_commutations': None,
        'custom_totals': None,
        'error_weight_total': 0,
        'error_weight_pvar': 0.0,
        'logical_failure_rate_samples': 0.0,
        'logical_failure_rate_samples_errorbar': 0.0,
        'logical_failure_rate': 0.0,
        'logical_failure_rate_errorbar': 0.0,
        'physical_error_rate': 0.0,
        'wall_time': 0.0,
    }

    # if random_seed is None,unpredictable entropy is pulled from the OS,which we log for reproducibility
    seed_sequence = np.random.SeedSequence(random_seed)
    logger.info('run: np.random.SeedSequence.entropy={}'.format(
        seed_sequence.entropy))
    rng = np.random.default_rng(seed_sequence)

    array_sum_keys = (
        'n_logical_commutations',
        'custom_totals',
    )  # list of array sum keys
    array_val_keys = (
        'logical_commutations',
        'custom_values',
    )  # list of array value keys
    error_weights = []  # list of error_weight from current run
    success_list = np.zeros(max_runs)
    max_coset_p_list = np.zeros(max_runs)

    perm_mat, perm_vec = deform_matsvecs(code, decoder, error_model,
                                         perm_rates, code_name, layout)

    while ((max_runs is None or runs_data['n_run'] < max_runs)
           and (max_failures is None or runs_data['n_fail'] < max_failures)):
        # run simulation
        data = _run_once_defp(mode, code, time_steps, error_model, decoder,
                              error_probability, perm_rates, perm_mat,
                              perm_vec, code_name, layout,
                              measurement_error_probability, rng)
        # increment run counts
        success_list[runs_data['n_run']] = data['success']
        max_coset_p_list[runs_data['n_run']] = data['max_coset_p']
        runs_data['n_run'] += 1
        if data['success']:
            runs_data['n_success'] += 1
        else:
            runs_data['n_fail'] += 1
        # sum arrays
        for array_sum_key, array_val_key in zip(array_sum_keys,
                                                array_val_keys):
            array_sum = runs_data[array_sum_key]  # extract sum
            array_val = data[array_val_key]  # extract val
            if runs_data[
                    'n_run'] == 1 and array_val is not None:  # first run,so initialize sum,if val not None
                array_sum = np.zeros_like(array_val)
            if array_sum is None and array_val is None:  # both None
                array_sum = None
            elif (array_sum is None or array_val is None) or (
                    array_sum.shape != array_val.shape):  # mismatch
                raise QecsimError(
                    'Mismatch between {} values to sum: {},{}'.format(
                        array_val_key, array_sum, array_val))
            else:  # match,so sum
                array_sum = array_sum + array_val
            runs_data[array_sum_key] = array_sum  # update runs_data
        # append error weight
        error_weights.append(data['error_weight'])

    #error bar in logical failure rate
    #print(max_coset_p_list)
    #print(max_coset_p_list.mean())
    runs_data['logical_failure_rate_samples_errorbar'] = success_list.std(
    ) / np.sqrt(max_runs)
    runs_data['logical_failure_rate_errorbar'] = max_coset_p_list.std(
    ) / np.sqrt(max_runs)
    runs_data['logical_failure_rate'] = 1 - max_coset_p_list.mean()
    #print(runs_data['logical_failure_rate'])
    # error weight statistics
    runs_data['error_weight_total'] = sum(error_weights)
    runs_data['error_weight_pvar'] = statistics.pvariance(error_weights)

    # rate statistics
    _add_rate_statistics(runs_data)

    # convert sum arrays to tuples if not None
    for array_sum_key in array_sum_keys:
        if runs_data[array_sum_key] is not None:
            runs_data[array_sum_key] = tuple(runs_data[array_sum_key].tolist())

    # record wall_time
    runs_data['wall_time'] = time.perf_counter() - wall_time_start

    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: aggregated_data={}'.format(runs_data))

    return [
        runs_data['logical_failure_rate'],
        runs_data['logical_failure_rate_errorbar']
    ]
コード例 #7
0
    def decode_ftp(self, code, time_steps, syndrome,
                   error_model=BitPhaseFlipErrorModel(),  # noqa: B008
                   error_probability=0.1,
                   measurement_error_probability=0.1,
                   step_measurement_errors=None, **kwargs):
        """
        See :meth:`qecsim.model.DecoderFTP.decode_ftp`

        Note:

        * The optional keyword parameters ``error_model`` and ``error_probability`` are used to determine the prior
          probability distribution for use in the decoding algorithm. Any provided error model must implement
          :meth:`~qecsim.model.ErrorModel.probability_distribution`.
        * This method always returns a ``DecodeResult`` with the following parameters::

            DecodeResult(
                success=None,  # None indicates to be evaluated by app
                               # False indicates time-like logical failure (overrides evaluation by app)
                logical_commutations=None,  # None indicates to be evaluated by app
                recovery=np.array(...),  # recovery operation (used by app to evaluate success and logical_commutations)
                custom_values=np.array([0, 0]),  # [0, 0] no time-like logical failure
                                                 # [1, 0] time-like logical failure through X plaquettes
                                                 # [0, 1] time-like logical failure through Z plaquettes
                                                 # [1, 1] time-like logical failure through both X and Z plaquettes
            )

        :param code: Rotated toric code.
        :type code: RotatedToricCode
        :param time_steps: Number of time steps.
        :type time_steps: int
        :param syndrome: Syndrome as binary array.
        :type syndrome: numpy.array (2d)
        :param error_model: Error model. (default=BitPhaseFlipErrorModel())
        :type error_model: ErrorModel
        :param error_probability: Overall probability of an error on a single qubit. (default=0.1)
        :type error_probability: float
        :param measurement_error_probability: Overall probability of an error on a single measurement. (default=0.1)
        :type measurement_error_probability: float
        :param step_measurement_errors: list of measurement error bits applied to step-syndromes index by time-step.
        :type step_measurement_errors: list of numpy.array (1d)
        :return: Decode result.
        :rtype: DecodeResult
        """
        # deduce bias (potentially overridden by eta)
        bias = self._bias(error_model)

        # IDENTITY RECOVERY AND T-PARITIES
        recovery = code.new_pauli().to_bsf()
        recovery_x_tp = 0
        recovery_z_tp = 0

        # SYMMETRY MATCHING
        # prepare graphs
        graphs = self._graphs(code, time_steps, syndrome, error_probability, measurement_error_probability, bias)
        # minimum weight matching
        matches = self._matching(graphs)
        del graphs  # release heavy object
        # cluster matches
        clusters = self._clusters(matches)
        del matches  # release heavy object
        # resolve symmetry recovery from fusing within clusters
        symmetry_recovery, symmetry_recovery_x_tp, symmetry_recovery_z_tp = self._recovery_tparities(
            code, time_steps, clusters)
        # add symmetry recovery and t-parities
        recovery ^= symmetry_recovery
        recovery_x_tp ^= symmetry_recovery_x_tp
        recovery_z_tp ^= symmetry_recovery_z_tp

        # RESIDUAL CLUSTER SYNDROME
        cluster_syndrome = np.bitwise_xor.reduce(syndrome) ^ pt.bsp(recovery, code.stabilizers.T)
        # warn if infinite bias and non-null cluster syndrome
        if bias is None and np.any(cluster_syndrome):
            logger.warning('UNEXPECTED CLUSTER SYNDROME WITH INFINITE BIAS')

        # CLUSTER RECOVERY
        # prepare cluster graph
        cluster_graph = self._cluster_graph(code, time_steps, clusters)
        del clusters  # release heavy object
        # minimum weight matching
        cluster_matches = self._matching([cluster_graph])
        del cluster_graph  # release heavy object
        # resolve cluster recovery from fusing between clusters
        cluster_recovery, cluster_recovery_x_tp, cluster_recovery_z_tp = self._cluster_recovery_tparities(
            code, time_steps, cluster_matches)
        del cluster_matches  # release heavy object
        # add cluster recovery and t-parities
        recovery ^= cluster_recovery
        recovery_x_tp ^= cluster_recovery_x_tp
        recovery_z_tp ^= cluster_recovery_z_tp

        # TEST T-PARITY
        if self._itp or time_steps == 1:
            if logger.isEnabledFor(logging.DEBUG):
                logger.debug('decode: ignoring t-parity. itp={}, time_steps={}'.format(self._itp, time_steps))
        else:
            if logger.isEnabledFor(logging.DEBUG):
                logger.debug('decode: testing t-parity. itp={}, time_steps={}'.format(self._itp, time_steps))
            if not step_measurement_errors:
                raise QecsimError('Failed to test t-parity. step_measurement_errors not provided.')
            # extract t-parity for measurement errors
            measurement_error_tps = self._measurement_error_tparities(code, step_measurement_errors[-1])
            # total t-parity
            total_tps = np.array((recovery_x_tp, recovery_z_tp)) ^ measurement_error_tps
            # return false decode-result if t-parity fails, with time-parity as custom_values
            if np.any(total_tps != 0):
                return DecodeResult(success=False, recovery=recovery, custom_values=total_tps)

        # return recovery with zeros time parity custom values
        return DecodeResult(recovery=recovery, custom_values=np.array((0, 0)))
コード例 #8
0
ファイル: _planarydecoder.py プロジェクト: silky/qecsim
    def _snake(cls, code, start_index, se=True, full=True, skip_first=False):
        """
        Return operator after applying snake of Y from start index and bouncing in SE (NW) direction(s).

        NOTE: If start_index is out of bounds, then identity is returned.

        For example::

            ----Y--------
              |   |   |
            --------Y----
              |   |   |
            ------------Y

            Y---Y----
              |   Y
            Y-------Y
              Y   |
            ----Y---Y

        :param code: Planar code
        :type code: PlanarCode
        :param start_index: Start index in format (row, column).
        :type start_index: 2-tuple of int
        :param se: Should snake in SE direction initially (default=True, falsy=NW direction).
        :type se: bool
        :param full: Should snake in opposite direction if not looped in initial direction (default=True).
        :type full: bool
        :param skip_first: Should not apply Y to start_index (default=False).
        :type skip_first: bool
        :return: Operator in binary symplectic form.
        :rtype: numpy.array (1d)
        """
        # start with identity
        pauli = code.new_pauli()
        # if start_index out of bounds, return identity
        if not code.is_in_bounds(start_index):
            return pauli.to_bsf()
        # expand indices
        start_r, start_c = start_index
        min_r, min_c = 0, 0
        max_r, max_c = code.bounds
        # index iterators
        if se:  # snake in SE direction
            index_it = zip(
                itertools.cycle(  # cycle row indices
                    itertools.chain(
                        range(start_r, max_r +
                              2),  # from start_r to just beyond lower boundary
                        range(
                            max_r, min_r - 2, -1
                        ),  # from lower boundary to just beyond upper boundary
                        range(min_r, start_r
                              )  # from upper boundary to just before start_r
                    )),
                itertools.cycle(  # cycle column indices
                    itertools.chain(
                        range(start_c, max_c +
                              2),  # from start_c to just beyond right boundary
                        range(
                            max_c, min_c - 2, -1
                        ),  # from right boundary to just beyond left boundary
                        range(min_c, start_c
                              )  # from left boundary to just before start_c
                    )))
        else:  # snake in NW direction
            index_it = zip(
                itertools.cycle(  # cycle row indices
                    itertools.chain(
                        range(
                            start_r, min_r - 2,
                            -1),  # from start_r to just beyond upper boundary
                        range(
                            min_r, max_r + 2
                        ),  # from upper boundary to just beyond lower boundary
                        range(max_r, start_r,
                              -1)  # from lower boundary to just before start_r
                    )),
                itertools.cycle(  # cycle column indices
                    itertools.chain(
                        range(start_c, min_c - 2,
                              -1),  # from start_c to just beyond left boundary
                        range(
                            min_c, max_c + 2
                        ),  # from left boundary to just beyond right boundary
                        range(max_c, start_c,
                              -1)  # from right boundary to just before start_c
                    )))
        # initialise return value
        looped = False
        # infinite loop protection
        max_count = code.n_k_d[0] * 100
        count = 0
        # initialise indices
        previous_index, current_index = None, None
        # snake
        for next_index in index_it:
            # stop if next_index is initial_index and in given direction from current_index (i.e. we've looped)
            if next_index == start_index and (tuple(
                    np.subtract(next_index, (1, 1) if se else
                                (-1, -1))) == current_index):
                looped = True
                break
            # stop if next_index is previous_index (i.e. we've bounced off a corner)
            if next_index == previous_index:
                break
            # bump indices
            previous_index, current_index = current_index, next_index
            # apply Y (unless skip_first and null previous_index)
            if not (skip_first and previous_index is None):
                pauli.site('Y', current_index)
            # infinite loop protection
            count += 1
            if count > max_count:
                break
        # report infinite loop protection
        if count > max_count:
            raise QecsimError(
                'Infinite loop applying Y to {} starting at {} in se={} direction.'
                .format(code, start_index, se))
        # convert to bsf
        operator = pauli.to_bsf()
        # if full requested and we have not looped, then apply snake in opposite direction
        if full and not looped:
            operator ^ cls._snake(
                code, start_index, full=False, se=not se, skip_first=True)
        # return as bsf
        return operator