コード例 #1
0
ファイル: _planarydecoder.py プロジェクト: silky/qecsim
    def _residual_recovery(cls, code, syndrome):
        """
        Return residual recovery consistent with (lower boundary) syndrome (if possible).

        :param code: Planar code
        :type code: PlanarCode
        :param syndrome: Lower boundary syndrome as binary vector.
        :type syndrome: numpy.array (1d)
        :return: Residual recovery operation in binary symplectic form.
        :rtype: numpy.array (1d)
        """
        try:
            # get residual recovery from map
            return pt.unpack(
                cls._residual_syndrome_to_recovery_map(code)[pt.pack(
                    syndrome)])
        except KeyError:
            # N.B. this should not happen if a pure Y-noise model is used
            log_data = {
                # parameters
                'code': repr(code),
                'syndrome': pt.pack(syndrome),
            }
            logger.warning('RESIDUAL RECOVERY NOT FOUND: {}'.format(
                json.dumps(log_data, sort_keys=True)))
            # return identity
            return code.new_pauli().to_bsf()
コード例 #2
0
    def decode(
            self,
            code,
            perm_mat,
            syndrome,
            error_model=DepolarizingErrorModel(),  # noqa: B008
            error_probability=0.1,
            **kwargs):
        """
        See :meth:`qecsim.model.Decoder.decode`

        Note: The optional keyword parameters ``error_model`` and ``error_probability`` are used to determine the prior
        probability distribution for use in the decoding algorithm. Any provided error model must implement
        :meth:`~qecsim.model.ErrorModel.probability_distribution`.

        :param code: Rotated planar code.
        :type code: RotatedPlanarCode
        :param syndrome: Syndrome as binary vector.
        :type syndrome: numpy.array (1d)
        :param error_model: Error model. (default=DepolarizingErrorModel())
        :type error_model: ErrorModel
        :param error_probability: Overall probability of an error on a single qubit. (default=0.1)
        :type error_probability: float
        :return: Recovery operation as binary symplectic vector.
        :rtype: numpy.array (1d)
        """
        # any recovery
        any_recovery = self.sample_recovery(code, syndrome)
        # probability distribution
        prob_dist = error_model.probability_distribution(error_probability)
        # coset probabilities,recovery operations
        coset_ps, recoveries = self._coset_probabilities(
            prob_dist, perm_mat, any_recovery)
        # most likely recovery operation
        max_coset_p, max_recovery = max(
            zip(coset_ps, recoveries),
            key=lambda coset_p_recovery: coset_p_recovery[0])
        # logging
        if not (mp.isfinite(max_coset_p) and max_coset_p > 0):
            log_data = {
                # instance
                'decoder': repr(self),
                # method parameters
                'code': repr(code),
                'syndrome': pt.pack(syndrome),
                'error_model': repr(error_model),
                'error_probability': error_probability,
                # variables
                'prob_dist': prob_dist,
                'coset_ps': [repr(p) for p in coset_ps
                             ],  # convert to string because mp.mpf
                # context
                'error':
                pt.pack(kwargs['error']) if 'error' in kwargs else None,
            }
            logger.warning(
                'NON-POSITIVE-FINITE MAX COSET PROBABILITY: {}'.format(
                    json.dumps(log_data, sort_keys=True)))
        # return most likely recovery operation as bsf
        return max_recovery.to_bsf(), coset_ps, max_coset_p
コード例 #3
0
def test_file_error_model_generate_skip_to_start():
    print()
    fem = FileErrorModel(os.path.join(FILES_DIR, 'fivequbitcode-errors-p0.4-bias10.jsonl'), 4)
    fqc = FiveQubitCode()
    packed_errors = (
        pt.pack(fem.generate(fqc, 0.4)),
        pt.pack(fem.generate(fqc, 0.4)),
        pt.pack(fem.generate(fqc, 0.4)),
    )
    expected_packed_errors = (("8400", 10), ("5280", 10), ("1080", 10))
    assert packed_errors == expected_packed_errors
コード例 #4
0
def test_pack_unpack_random():
    rng = np.random.default_rng()
    for length in range(0, 5000):
        binary_array = rng.choice(2, length)
        packed_binary_array = pt.pack(binary_array)
        unpacked_binary_array = pt.unpack(packed_binary_array)
        assert np.array_equal(binary_array, unpacked_binary_array), (
            'Unpacked binary array {} does not equal expected {}.'.format(
                unpacked_binary_array, binary_array))
コード例 #5
0
def test_pack_unpack_65bit():
    binary_array = np.array([
        0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0,
        0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1,
        1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1
    ])
    print(len(binary_array))
    packed_binary_array = pt.pack(binary_array)
    unpacked_binary_array = pt.unpack(packed_binary_array)
    assert np.array_equal(binary_array, unpacked_binary_array), (
        'Unpacked binary array {} does not equal expected {}.'.format(
            unpacked_binary_array, binary_array))
コード例 #6
0
ファイル: _planarydecoder.py プロジェクト: silky/qecsim
        def _add(residual_recoveries, skip_trivial):
            """
            Add residual recoveries to residual_map keyed by syndrome as tuple.

            NOTE: This method processes recoveries in chunks so it is memory efficient to pass an iterator.

            :param residual_recoveries: Residual recoveries in bsf
            :type residual_recoveries: iterator of numpy.array (1d)
            :param skip_trivial: Do not add recoveries with trivial syndromes
            :type skip_trivial: bool
            """
            for chunk in util.chunker(residual_recoveries,
                                      PlanarYDecoder.CHUNK_LEN):
                residual_recoveries_chunk = np.array(tuple(chunk))
                # residual syndromes
                residual_syndromes_chunk = pt.bsp(residual_recoveries_chunk,
                                                  code.stabilizers.T)
                # add to map
                for syndrome, recovery in zip(residual_syndromes_chunk,
                                              residual_recoveries_chunk):
                    if not (skip_trivial and not np.any(syndrome)):
                        residual_map.setdefault(pt.pack(syndrome),
                                                pt.pack(recovery))
コード例 #7
0
    def _coset_probabilities(self, prob_dist, perm_mat, sample_pauli):
        r"""
        Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group
        :math:`G` of the planar code with respect to the given sample Pauli :math:`f`,as well as for the cosets
        :math:`f\bar{X}G`,:math:`f\bar{Y}G` and :math:`f\bar{Z}G`.

        :param prob_dist: Tuple of probability distribution in the format (P(I),P(X),P(Y),P(Z)).
        :type prob_dist: 4-tuple of float
        :param sample_pauli: Sample planar Pauli.
        :type sample_pauli: PlanarPauli
        :return: Coset probabilities,Sample Paulis (both in order I,X,Y,Z)
            E.g. (0.20,0.10,0.05,0.10),(PlanarPauli(...),PlanarPauli(...),PlanarPauli(...),PlanarPauli(...))
        :rtype: 4-tuple of mp.mpf,4-tuple of PlanarPauli
        """
        # NOTE: all list/tuples in this method are ordered (i,x,y,z)
        # empty log warnings
        log_warnings = []
        # sample paulis
        sample_paulis = (sample_pauli, sample_pauli.copy().logical_x(),
                         sample_pauli.copy().logical_x().logical_z(),
                         sample_pauli.copy().logical_z())
        # tensor networks: tns are common to both contraction by column and by row (after transposition)
        tns = [
            self._tnc.create_tn(prob_dist, sp, perm_mat)
            for sp in sample_paulis
        ]
        # probabilities
        coset_ps = (0.0, 0.0, 0.0, 0.0)  # default coset probabilities
        coset_ps_col = coset_ps_row = None  # undefined coset probabilities by column and row
        # N.B. After multiplication by mult,coset_ps will be of type mp.mpf so don't process with numpy!
        if self._mode in ('c', 'a'):
            # evaluate coset probabilities by column
            coset_ps_col = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
            for i in range(len(tns)):
                try:
                    coset_ps_col[i] = tt.mps2d.contract(tns[i],
                                                        chi=self._chi,
                                                        tol=self._tol)
                except (ValueError, np.linalg.LinAlgError) as ex:
                    log_warnings.append(
                        'CONTRACTION BY COL FOR {} COSET FAILED: {!r}'.format(
                            'IXYZ'[i], ex))
            # treat nan as inf so it doesn't get lost
            coset_ps_col = [
                mp.inf if mp.isnan(coset_p) else coset_p
                for coset_p in coset_ps_col
            ]
        if self._mode in ('r', 'a'):
            # evaluate coset probabilities by row
            coset_ps_row = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
            # transpose tensor networks
            tns = [tt.mps2d.transpose(tn) for tn in tns]
            for i in range(len(tns)):
                try:
                    coset_ps_row[i] = tt.mps2d.contract(tns[i],
                                                        chi=self._chi,
                                                        tol=self._tol)
                except (ValueError, np.linalg.LinAlgError) as ex:
                    log_warnings.append(
                        'CONTRACTION BY ROW FOR {} COSET FAILED: {!r}'.format(
                            'IXYZ'[i], ex))
            # treat nan as inf so it doesn't get lost
            coset_ps_row = [
                mp.inf if mp.isnan(coset_p) else coset_p
                for coset_p in coset_ps_row
            ]
        if self._mode == 'c':
            coset_ps = coset_ps_col
        elif self._mode == 'r':
            coset_ps = coset_ps_row
        elif self._mode == 'a':
            # average coset probabilities
            coset_ps = [
                sum(coset_p) / len(coset_p)
                for coset_p in zip(coset_ps_col, coset_ps_row)
            ]

        # renormalize probabilities
        C = coset_ps[0] + coset_ps[1] + coset_ps[2] + coset_ps[3]
        coset_ps[0] = coset_ps[0] / C
        coset_ps[1] = coset_ps[1] / C
        coset_ps[2] = coset_ps[2] / C
        coset_ps[3] = coset_ps[3] / C

        # logging
        if log_warnings:
            log_data = {
                # instance
                'decoder':
                repr(self),
                # method parameters
                'prob_dist':
                prob_dist,
                'sample_pauli':
                pt.pack(sample_pauli.to_bsf()),
                # variables (convert to string because mp.mpf)
                'coset_ps_col':
                [repr(p) for p in coset_ps_col] if coset_ps_col else None,
                'coset_ps_row':
                [repr(p) for p in coset_ps_row] if coset_ps_row else None,
                'coset_ps': [repr(p) for p in coset_ps],
            }
            logger.warning('{}: {}'.format(
                ' | '.join(log_warnings), json.dumps(log_data,
                                                     sort_keys=True)))
        # results
        return tuple(coset_ps), sample_paulis
コード例 #8
0
    def _coset_probabilities(self, prob_dist, hadamard_vec,hadamard_mat, sample_pauli):
        r"""
        Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group
        :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets
        :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`.

        :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)).
        :type prob_dist: 4-tuple of float
        :param sample_pauli: Sample planar Pauli.
        :type sample_pauli: PlanarPauli
        :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z)
            E.g. (0.20, 0.10, 0.05, 0.10), (PlanarPauli(...), PlanarPauli(...), PlanarPauli(...), PlanarPauli(...))
        :rtype: 4-tuple of mp.mpf, 4-tuple of PlanarPauli
        """
        # NOTE: all list/tuples in this method are ordered (i, x, y, z)
        # empty log warnings
        log_warnings = []
        # sample paulis
        sample_paulis = (
            sample_pauli,
            sample_pauli.copy().logical_x(),
            sample_pauli.copy().logical_x().logical_z(),
            sample_pauli.copy().logical_z()
        )
        # tensor networks: tns are common to both contraction by column and by row (after transposition)
        tns = [self._tnc.create_tn(prob_dist, sp) for sp in sample_paulis]







        tns= [self._tnc.modify_tn(tn, had_prob_dist, hadamard_mat,sample_pauli)

        # probabilities
        coset_ps = (0.0, 0.0, 0.0, 0.0)  # default coset probabilities
        coset_ps_col = coset_ps_row = None  # undefined coset probabilities by column and row
        # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy!
        if self._mode in ('c', 'a'):
            # evaluate coset probabilities by column
            coset_ps_col = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
            for i in range(len(tns)):
                try:
                    coset_ps_col[i] = tt.mps2d.contract(tns[i], chi=self._chi, tol=self._tol)
                except (ValueError, np.linalg.LinAlgError) as ex:
                    log_warnings.append('CONTRACTION BY COL FOR {} COSET FAILED: {!r}'.format('IXYZ'[i], ex))
            # treat nan as inf so it doesn't get lost
            coset_ps_col = [mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_col]
        if self._mode in ('r', 'a'):
            # evaluate coset probabilities by row
            coset_ps_row = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
            # transpose tensor networks
            tns = [tt.mps2d.transpose(tn) for tn in tns]
            for i in range(len(tns)):
                try:
                    coset_ps_row[i] = tt.mps2d.contract(tns[i], chi=self._chi, tol=self._tol)
                except (ValueError, np.linalg.LinAlgError) as ex:
                    log_warnings.append('CONTRACTION BY ROW FOR {} COSET FAILED: {!r}'.format('IXYZ'[i], ex))
            # treat nan as inf so it doesn't get lost
            coset_ps_row = [mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_row]
        if self._mode == 'c':
            coset_ps = coset_ps_col
        elif self._mode == 'r':
            coset_ps = coset_ps_row
        elif self._mode == 'a':
            # average coset probabilities
            coset_ps = [sum(coset_p) / len(coset_p) for coset_p in zip(coset_ps_col, coset_ps_row)]
        # logging
        if log_warnings:
            log_data = {
                # instance
                'decoder': repr(self),
                # method parameters
                'prob_dist': prob_dist,
                'sample_pauli': pt.pack(sample_pauli.to_bsf()),
                # variables (convert to string because mp.mpf)
                'coset_ps_col': [repr(p) for p in coset_ps_col] if coset_ps_col else None,
                'coset_ps_row': [repr(p) for p in coset_ps_row] if coset_ps_row else None,
                'coset_ps': [repr(p) for p in coset_ps],
            }
            logger.warning('{}: {}'.format(' | '.join(log_warnings), json.dumps(log_data, sort_keys=True)))
        # results
        return tuple(coset_ps), sample_paulis

    def decode(self, code, hadamard_vec,hadamard_mat, syndrome,
               error_model=DepolarizingErrorModel(),  # noqa: B008
               error_probability=0.1, **kwargs):
        """
        See :meth:`qecsim.model.Decoder.decode`

        Note: The optional keyword parameters ``error_model`` and ``error_probability`` are used to determine the prior
        probability distribution for use in the decoding algorithm. Any provided error model must implement
        :meth:`~qecsim.model.ErrorModel.probability_distribution`.

        :param code: Rotated planar code.
        :type code: RotatedPlanarCode
        :param syndrome: Syndrome as binary vector.
        :type syndrome: numpy.array (1d)
        :param error_model: Error model. (default=DepolarizingErrorModel())
        :type error_model: ErrorModel
        :param error_probability: Overall probability of an error on a single qubit. (default=0.1)
        :type error_probability: float
        :return: Recovery operation as binary symplectic vector.
        :rtype: numpy.array (1d)
        """
        # any recovery
        any_recovery = self.sample_recovery(code, syndrome)
        # probability distribution
        prob_dist = error_model.probability_distribution(error_probability)
        # coset probabilities, recovery operations
        coset_ps, recoveries = self._coset_probabilities(prob_dist, hadamard_vec,hadamard_mat, any_recovery)
        # most likely recovery operation
        max_coset_p, max_recovery = max(zip(coset_ps, recoveries), key=lambda coset_p_recovery: coset_p_recovery[0])
        # logging
        if not (mp.isfinite(max_coset_p) and max_coset_p > 0):
            log_data = {
                # instance
                'decoder': repr(self),
                # method parameters
                'code': repr(code),
                'syndrome': pt.pack(syndrome),
                'error_model': repr(error_model),
                'error_probability': error_probability,
                # variables
                'prob_dist': prob_dist,
                'coset_ps': [repr(p) for p in coset_ps],  # convert to string because mp.mpf
                # context
                'error': pt.pack(kwargs['error']) if 'error' in kwargs else None,
            }
            logger.warning('NON-POSITIVE-FINITE MAX COSET PROBABILITY: {}'.format(json.dumps(log_data, sort_keys=True)))
        # return most likely recovery operation as bsf
        return max_recovery.to_bsf()

    @property
    def label(self):
        """See :meth:`qecsim.model.Decoder.label`"""
        params = [('chi', self._chi), ('mode', self._mode), ('tol', self._tol), ]
        return 'Rotated planar MPS ({})'.format(', '.join('{}={}'.format(k, v) for k, v in params if v))

    def __repr__(self):
        return '{}({!r}, {!r}, {!r})'.format(type(self).__name__, self._chi, self._mode, self._tol)





    class TNC:
        """Tensor network creator"""

        @functools.lru_cache()
        def h_node_value(self, prob_dist, f, n, e, s, w):
            """Return horizontal edge tensor element value."""
            paulis = ('I', 'X', 'Y', 'Z')
            op_to_pr = dict(zip(paulis, prob_dist))
            f = pt.pauli_to_bsf(f)
            I, X, Y, Z = pt.pauli_to_bsf(paulis)
            # n, e, s, w are in {0, 1} so multiply op to turn on or off
            op = (f + (n * Z) + (e * X) + (s * Z) + (w * X)) % 2
            return op_to_pr[pt.bsf_to_pauli(op)]

        @functools.lru_cache()
        def v_node_value(self, prob_dist, f, n, e, s, w):
            """Return vertical edge tensor element value."""
            # N.B. for v_node order of nesw is rotated relative to h_node
            return self.h_node_value(prob_dist, f, e, s, w, n)

        @functools.lru_cache()
        def create_h_node(self, prob_dist, f, compass_direction=None):
            """Return horizontal qubit tensor, i.e. has X plaquettes to left/right and Z plaquettes above/below."""

            def _shape(compass_direction=None):
                """Return shape of tensor including dummy indices."""
                return {  # (ne, se, sw, nw)
                    'n': (2, 2, 2, 1),
                    'ne': (1, 2, 2, 1),
                    'e': (1, 2, 2, 2),
                    'se': (1, 1, 2, 2),
                    's': (2, 1, 2, 2),
                    'sw': (2, 1, 1, 2),
                    'w': (2, 2, 1, 2),
                    'nw': (2, 2, 1, 1),
                }.get(compass_direction, (2, 2, 2, 2))

            # create bare h_node
            node = np.empty(_shape(compass_direction), dtype=np.float64)
            # fill values
            for n, e, s, w in np.ndindex(node.shape):
                node[(n, e, s, w)] = self.h_node_value(prob_dist, f, n, e, s, w)
            return node

        @functools.lru_cache()
        def create_v_node(self, prob_dist, f, compass_direction=None):
            """Return vertical qubit tensor, i.e. has Z plaquettes to left/right and X plaquettes above/below."""

            def _shape(compass_direction=None):
                """Return shape of tensor including dummy indices."""
                return {  # (ne, se, sw, nw)
                    'n': (1, 2, 2, 2),
                    'ne': (1, 1, 2, 2),
                    'e': (2, 1, 2, 2),
                    'se': (2, 1, 1, 2),
                    's': (2, 2, 1, 2),
                    # 'sw': (2, 2, 1, 1),  # cannot happen
                    'w': (2, 2, 2, 1),
                    'nw': (1, 2, 2, 1),
                }.get(compass_direction, (2, 2, 2, 2))

            # create bare v_node
            node = np.empty(_shape(compass_direction), dtype=np.float64)
            # fill values
            for n, e, s, w in np.ndindex(node.shape):
                node[(n, e, s, w)] = self.v_node_value(prob_dist, f, n, e, s, w)
            return node

        @functools.lru_cache()
        def create_s_node(self, compass_direction=None):
            """Return stabilizer tensor."""

            def _shape(compass_direction=None):
                """Return shape of tensor including dummy indices."""
                return {  # (ne, se, sw, nw)
                    'n': (1, 2, 2, 1),
                    'e': (1, 1, 2, 2),
                    's': (2, 1, 1, 2),
                    'w': (2, 2, 1, 1),
                }.get(compass_direction, (2, 2, 2, 2))

            node = tt.tsr.delta(_shape(compass_direction))
            return node

        def create_tn(self, prob_dist, sample_pauli):
            """Return a network (numpy.array 2d) of tensors (numpy.array 4d).
            Note: The network contracts to the coset probability of the given sample_pauli.
            """
            def _rotate_q_index(index, code):
                """Convert code site index in format (x, y) to tensor network q-node index in format (r, c)"""
                site_x, site_y = index  # qubit index in (x, y)
                site_r, site_c = code.site_bounds[1] - site_y, site_x  # qubit index in (r, c)
                return code.site_bounds[0] - site_c + site_r, site_r + site_c  # q-node index in (r, c)

            def _rotate_p_index(index, code):
                """Convert code plaquette index in format (x, y) to tensor network s-node index in format (r, c)"""
                q_node_r, q_node_c = _rotate_q_index(index, code)  # q-node index in (r, c)
                return q_node_r - 1, q_node_c  # s-node index in (r, c)

            def _compass_q_direction(index, code):
                """if the code site index lies on border of lattice then give that direction, else empty string."""
                direction = {code.site_bounds[1]: 'n', 0: 's'}.get(index[1], '')
                direction += {0: 'w', code.site_bounds[0]: 'e'}.get(index[0], '')
                return direction

            def _compass_p_direction(index, code):
                """if the code plaquette index lies on border of lattice then give that direction, else empty string."""
                direction = {code.site_bounds[1]: 'n', -1: 's'}.get(index[1], '')
                direction += {-1: 'w', code.site_bounds[0]: 'e'}.get(index[0], '')
                return direction

            
            pi,px,py,pz=prob_dist
            had_prob_dist= pi,pz,py,px

            # extract code
            code = sample_pauli.code
            # initialise empty tn
            tn_max_r, _ = _rotate_q_index((0, 0), code)
            _, tn_max_c = _rotate_q_index((code.site_bounds[0], 0), code)
            tn = np.empty((tn_max_r + 1, tn_max_c + 1), dtype=object)
            # iterate over
            max_site_x, max_site_y = code.site_bounds
            for code_index in itertools.product(range(-1, max_site_x + 1), range(-1, max_site_y + 1)):
                is_z_plaquette = code.is_z_plaquette(code_index)
                if code.is_in_site_bounds(code_index):
                    q_node_index = _rotate_q_index(code_index, code)
                    q_pauli = sample_pauli.operator(code_index)
                    if is_z_plaquette:
                        #print(code_index)
                        q_node = self.create_h_node(prob_dist, q_pauli, _compass_q_direction(code_index, code))
                    else:
                        q_node = self.create_v_node(prob_dist, q_pauli, _compass_q_direction(code_index, code))
                    tn[q_node_index] = q_node
                if code.is_in_plaquette_bounds(code_index):
                    s_node_index = _rotate_p_index(code_index, code)
                    s_node = self.create_s_node(_compass_p_direction(code_index, code))
                    tn[s_node_index] = s_node
            return tn


        def modify_tn(tn, had_prob_dist, hadamard_mat,sample_pauli):
            """Return a network (numpy.array 2d) of tensors (numpy.array 4d).
            Note: The network contracts to the coset probability of the given sample_pauli.
            """

            def _rotate_q_index(index, code):
                """Convert code site index in format (x, y) to tensor network q-node index in format (r, c)"""
                site_x, site_y = index  # qubit index in (x, y)
                site_r, site_c = code.site_bounds[1] - site_y, site_x  # qubit index in (r, c)
                return code.site_bounds[0] - site_c + site_r, site_r + site_c  # q-node index in (r, c)
                
            def _compass_q_direction(index, code):
                """if the code site index lies on border of lattice then give that direction, else empty string."""
                direction = {code.site_bounds[1]: 'n', 0: 's'}.get(index[1], '')
                direction += {0: 'w', code.site_bounds[0]: 'e'}.get(index[0], '')
                return direction

            code = sample_pauli.code
            max_site_x, max_site_y = code.site_bounds
            for code_index in itertools.product(range(-1, max_site_x + 1), range(-1, max_site_y + 1)):
                is_z_plaquette = code.is_z_plaquette(code_index)
                if code.is_in_site_bounds(code_index):
                    q_node_index = _rotate_q_index(code_index, code)
                    q_pauli = sample_pauli.operator(code_index)
                    if is_z_plaquette:
                        #print(code_index)
                        if hadamard_mat[code_index]:
                            q_node = self.create_h_node(had_prob_dist, q_pauli, _compass_q_direction(code_index, code))
                    else:
                        if hadamard_mat[code_index]:
                            q_node = self.create_v_node(had_prob_dist, q_pauli, _compass_q_direction(code_index, code))
                    tn[q_node_index] = q_node
            return tn   
コード例 #9
0
ファイル: _color666mpsdecoder.py プロジェクト: silky/qecsim
    def _coset_probabilities(self, prob_dist, sample_pauli):
        r"""
        Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group
        :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets
        :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`.

        :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)).
        :type prob_dist: 4-tuple of float
        :param sample_pauli: Sample color 666 Pauli.
        :type sample_pauli: Color666Pauli
        :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z)
        E.g. (0.20, 0.10, 0.05, 0.10), (Color666Pauli(...), Color666Pauli(...), Color666Pauli(...), Color666Pauli(...))
        :rtype: 4-tuple of mp.mpf, 4-tuple of Color666Pauli
        """
        # NOTE: all list/tuples in this method are ordered (i, x, y, z)
        # empty log warnings
        log_warnings = []
        # sample_paulis
        sample_paulis = [
            sample_pauli,
            sample_pauli.copy().logical_x(),
            sample_pauli.copy().logical_x().logical_z(),
            sample_pauli.copy().logical_z()
        ]
        # tensor networks
        tns = [
            self._tnc.create_tn(prob_dist, pauli) for pauli in sample_paulis
        ]
        # probabilities
        coset_ps = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
        # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy!
        try:
            # note: cosets differ only in the first column
            ket_i, mult = tt.mps2d.contract(tns[0],
                                            chi=self._chi,
                                            tol=self._tol,
                                            start=-1,
                                            stop=0,
                                            step=-1)  # tns.i
            coset_ps[0] = tt.mps.inner_product(tns[0][:, 0],
                                               ket_i) * mult  # coset_ps.i
            coset_ps[1] = tt.mps.inner_product(tns[1][:, 0],
                                               ket_i) * mult  # coset_ps.x
            coset_ps[2] = tt.mps.inner_product(tns[2][:, 0],
                                               ket_i) * mult  # coset_ps.y
            coset_ps[3] = tt.mps.inner_product(tns[3][:, 0],
                                               ket_i) * mult  # coset_ps.z
        except (ValueError, np.linalg.LinAlgError) as ex:
            log_warnings.append(
                'CONTRACTION FOR I COSET FAILED: {!r}'.format(ex))

        # treat nan as inf so it doesn't get lost
        coset_ps = [
            mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps
        ]

        # logging
        if log_warnings:
            log_data = {
                # instance
                'decoder': repr(self),
                # method parameters
                'prob_dist': prob_dist,
                'sample_pauli': pt.pack(sample_pauli.to_bsf()),
                # variables (convert to string because mp.mpf)
                'coset_ps': [repr(p) for p in coset_ps],
            }
            logger.warning('{}: {}'.format(
                ' | '.join(log_warnings), json.dumps(log_data,
                                                     sort_keys=True)))
        # results
        return tuple(coset_ps), tuple(sample_paulis)
コード例 #10
0
ファイル: app_defp.py プロジェクト: dua-arpit/qecsim
def _run_once_defp(mode, code, time_steps, error_model, decoder,
                   error_probability, perm_rates, perm_mat, perm_vec,
                   code_name, layout, measurement_error_probability, rng):
    """Implements run_once and run_once_ftp functions"""
    # assumptions
    assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp'
    if code_name[:6] == 'random':
        perm_mat, perm_vec = deform_matsvecs(code, decoder, error_model,
                                             perm_rates, code_name, layout)

    # generate step_error,step_syndrome and step_measurement_error for each time step
    n_qubits = code.n_k_d[0]

    for _ in range(time_steps):
        # hadamard_mat,hadamard_vec,XYperm_mat,XYperm_vec,ZYperm_mat,ZYperm_vec= deform_matsvecs(code,decoder,error_model)
        step_errors, step_syndromes, step_measurement_errors = [], [], []

        rng = np.random.default_rng() if rng is None else rng
        error_Pauli = rng.choice(
            ('I', 'X', 'Y', 'Z'),
            size=n_qubits,
            p=error_model.probability_distribution(error_probability))
        step_error = permute_error_Pauli(error_Pauli, perm_vec)

        step_errors.append(step_error)
        # step_syndrome: stabilizers that do not commute with the error
        step_syndrome = pt.bsp(step_error, code.stabilizers.T)
        step_syndromes.append(step_syndrome)
        # step_measurement_error: random syndrome bit flips based on measurement_error_probability
        if measurement_error_probability:
            step_measurement_error = rng.choice(
                (0, 1),
                size=step_syndrome.shape,
                p=(1 - measurement_error_probability,
                   measurement_error_probability))
        else:
            step_measurement_error = np.zeros(step_syndrome.shape, dtype=int)
        step_measurement_errors.append(step_measurement_error)
    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: step_errors={}'.format(step_errors))
        logger.debug('run: step_syndromes={}'.format(step_syndromes))
        logger.debug(
            'run: step_measurement_errors={}'.format(step_measurement_errors))

    # error: sum of errors at each time step
    error = np.bitwise_xor.reduce(step_errors)
    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: error={}'.format(error))

    # syndrome: apply measurement_error at times t-1 and t to syndrome at time t
    syndrome = []
    for t in range(time_steps):
        syndrome.append(step_measurement_errors[t - 1] ^ step_syndromes[t]
                        ^ step_measurement_errors[t])
    # convert syndrome to 2d numpy array
    syndrome = np.array(syndrome)
    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: syndrome={}'.format(syndrome))

    # decoding: boolean or best match recovery operation based on decoder
    ctx = {
        'error_model': error_model,
        'error_probability': error_probability,
        'error': error,
        'step_errors': step_errors,
        'measurement_error_probability': measurement_error_probability,
        'step_measurement_errors': step_measurement_errors
    }
    # convert syndrome to 1d if mode is 'ideal'
    if mode == 'ideal':  # convert syndrome to 1d and call decode
        decoding = decoder.decode(code, perm_mat, syndrome[0], **ctx)
    if mode == 'ftp':  # call decode_ftp
        decoding = decoder.decode_ftp(code, time_steps, syndrome, **ctx)

    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: decoding={}'.format(decoding))

    # if decoding is not DecodeResult,convert to DecodeResult
    if not isinstance(decoding, DecodeResult):
        # decoding is recovery,so wrap in DecodeResult
        decoding = DecodeResult(
            recovery=decoding)  # raises error if recovery is None
    # extract outcomes from decoding
    success = decoding.success
    logical_commutations = decoding.logical_commutations
    custom_values = decoding.custom_values
    # if recovery specified,resolve success and logical_commutations
    if decoding.recovery is not None:
        # recovered code
        recovered = decoding.recovery[1] ^ error
        max_coset_p = decoding.recovery[0]
        # success checks
        commutes_with_stabilizers = np.all(
            pt.bsp(recovered, code.stabilizers.T) == 0)
        if not commutes_with_stabilizers:
            log_data = {  # enough data to recreate issue
                # models
                'code': repr(code),'error_model': repr(error_model),'decoder': repr(decoder),
                # variables
                'error': pt.pack(error),'recovery': pt.pack(decoding.recovery),
                # step variables
                'step_errors': [pt.pack(v) for v in step_errors],
                'step_measurement_errors': [pt.pack(v) for v in step_measurement_errors],
            }
            logger.warning('RECOVERY DOES NOT RETURN TO CODESPACE: {}'.format(
                json.dumps(log_data, sort_keys=True)))
        resolved_logical_commutations = pt.bsp(recovered, code.logicals.T)
        commutes_with_logicals = np.all(resolved_logical_commutations == 0)
        resolved_success = commutes_with_stabilizers and commutes_with_logicals
        # fill in unspecified outcomes
        success = resolved_success if success is None else success
        logical_commutations = resolved_logical_commutations if logical_commutations is None else logical_commutations

    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: success={}'.format(success))
        logger.debug(
            'run: logical_commutations={!r}'.format(logical_commutations))
        logger.debug('run: custom_values={!r}'.format(custom_values))

    data = {
        'error_weight': pt.bsf_wt(np.array(step_errors)),
        'success': bool(success),
        'max_coset_p': max_coset_p,
        'logical_commutations': logical_commutations,
        'custom_values': custom_values,
    }

    return data
コード例 #11
0
    def _coset_probabilities(self, prob_dist, sample_pauli, perm_mat):
        r"""
        Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group
        :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets
        :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`.

        :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)).
        :type prob_dist: 4-tuple of float
        :param sample_pauli: Sample planar Pauli.
        :type sample_pauli: PlanarPauli
        :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z)
            E.g. (0.20, 0.10, 0.05, 0.10), (PlanarPauli(...), PlanarPauli(...), PlanarPauli(...), PlanarPauli(...))
        :rtype: 4-tuple of mp.mpf, 4-tuple of PlanarPauli
        """
        # NOTE: all list/tuples in this method are ordered (i, x, y, z)
        # empty log warnings
        log_warnings = []
        # sample_paulis
        sample_paulis = [
            sample_pauli,
            sample_pauli.copy().logical_x(),
            sample_pauli.copy().logical_x().logical_z(),
            sample_pauli.copy().logical_z()
        ]
        # tensor networks
        tns = [
            self._tnc.create_tn(prob_dist, pauli, perm_mat)
            for pauli in sample_paulis
        ]
        mask = self._tnc.create_mask(self._stp,
                                     tns[0].shape)  # same mask for all tns
        # probabilities
        coset_ps = (0.0, 0.0, 0.0, 0.0)  # default coset probabilities
        coset_ps_col = coset_ps_row = None  # undefined coset probabilities by column and row
        # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy!
        if self._mode in ('c', 'a'):
            # evaluate coset probabilities by column
            coset_ps_col = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
            # note: I,X and Z,Y cosets differ only in the last column (logical X)
            try:
                bra_i, mult = tt.mps2d.contract(tns[0],
                                                chi=self._chi,
                                                tol=self._tol,
                                                stop=-1,
                                                mask=mask)  # tns.i
                coset_ps_col[0] = tt.mps.inner_product(
                    bra_i, tns[0][:, -1]) * mult  # coset_ps_col.i
                coset_ps_col[1] = tt.mps.inner_product(
                    bra_i, tns[1][:, -1]) * mult  # coset_ps_col.x
            except (ValueError, np.linalg.LinAlgError) as ex:
                log_warnings.append(
                    'CONTRACTION BY COL FOR I COSET FAILED: {!r}'.format(ex))
            try:
                bra_z, mult = tt.mps2d.contract(tns[3],
                                                chi=self._chi,
                                                tol=self._tol,
                                                stop=-1,
                                                mask=mask)  # tns.z
                coset_ps_col[2] = tt.mps.inner_product(
                    bra_z, tns[2][:, -1]) * mult  # coset_ps_col.y
                coset_ps_col[3] = tt.mps.inner_product(
                    bra_z, tns[3][:, -1]) * mult  # coset_ps_col.z
            except (ValueError, np.linalg.LinAlgError) as ex:
                log_warnings.append(
                    'CONTRACTION BY COL FOR Z COSET FAILED: {!r}'.format(ex))
            # treat nan as inf so it doesn't get lost
            coset_ps_col = [
                mp.inf if mp.isnan(coset_p) else coset_p
                for coset_p in coset_ps_col
            ]
        if self._mode in ('r', 'a'):
            # evaluate coset probabilities by row
            coset_ps_row = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
            # transpose tensor networks
            tns = [tt.mps2d.transpose(tn) for tn in tns]
            mask = None if mask is None else mask.transpose()
            # note: I,Z and X,Y cosets differ only in the last row (logical Z)
            try:
                bra_i, mult = tt.mps2d.contract(tns[0],
                                                chi=self._chi,
                                                tol=self._tol,
                                                stop=-1,
                                                mask=mask)  # tns.i
                coset_ps_row[0] = tt.mps.inner_product(
                    bra_i, tns[0][:, -1]) * mult  # coset_ps_row.i
                coset_ps_row[3] = tt.mps.inner_product(
                    bra_i, tns[3][:, -1]) * mult  # coset_ps_row.z
            except (ValueError, np.linalg.LinAlgError) as ex:
                log_warnings.append(
                    'CONTRACTION BY ROW FOR I COSET FAILED: {!r}'.format(ex))
            try:
                bra_x, mult = tt.mps2d.contract(tns[1],
                                                chi=self._chi,
                                                tol=self._tol,
                                                stop=-1,
                                                mask=mask)  # tns.x
                coset_ps_row[1] = tt.mps.inner_product(
                    bra_x, tns[1][:, -1]) * mult  # coset_ps_row.x
                coset_ps_row[2] = tt.mps.inner_product(
                    bra_x, tns[2][:, -1]) * mult  # coset_ps_row.y
            except (ValueError, np.linalg.LinAlgError) as ex:
                log_warnings.append(
                    'CONTRACTION BY ROW FOR X COSET FAILED: {!r}'.format(ex))
            # treat nan as inf so it doesn't get lost
            coset_ps_row = [
                mp.inf if mp.isnan(coset_p) else coset_p
                for coset_p in coset_ps_row
            ]
        if self._mode == 'c':
            coset_ps = coset_ps_col
        elif self._mode == 'r':
            coset_ps = coset_ps_row
        elif self._mode == 'a':
            # average coset probabilities
            coset_ps = [
                sum(coset_p) / len(coset_p)
                for coset_p in zip(coset_ps_col, coset_ps_row)
            ]

        # logging
        if log_warnings:
            log_data = {
                # instance
                'decoder':
                repr(self),
                # method parameters
                'prob_dist':
                prob_dist,
                'sample_pauli':
                pt.pack(sample_pauli.to_bsf()),
                # variables (convert to string because mp.mpf)
                'coset_ps_col':
                [repr(p) for p in coset_ps_col] if coset_ps_col else None,
                'coset_ps_row':
                [repr(p) for p in coset_ps_row] if coset_ps_row else None,
                'coset_ps': [repr(p) for p in coset_ps],
            }
            logger.warning('{}: {}'.format(
                ' | '.join(log_warnings), json.dumps(log_data,
                                                     sort_keys=True)))
        # results
        return tuple(coset_ps), tuple(sample_paulis)
コード例 #12
0
ファイル: app_defN.py プロジェクト: dua-arpit/qecsim
def _run_once_defN(mode, code,hadamard_mat, time_steps, error_model, decoder, n_errors_code, measurement_error_probability, rng):
    """Implements run_once and run_once_ftp functions"""
    # assumptions
    assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp'

    # generate step_error, step_syndrome and step_measurement_error for each time step
    
    error_probability_sample=0.1
    (pI,pX,pY,pZ)=error_model.probability_distribution(error_probability_sample)
    error_Pauli=[]
    error_Pauli.extend('X'*round(n_errors_code*pX/error_probability_sample))    
    error_Pauli.extend('Y'*round(n_errors_code*pY/error_probability_sample))    
    error_Pauli.extend('Z'*round(n_errors_code*pZ/error_probability_sample))    
    error_Pauli.extend('I'*(n_qubits-len(error_Pauli)))

    n_qubits = code.n_k_d[0]
    hadamard_vec=np.zeros(n_qubits)

    Nx=code.site_bounds[0]+1
    Ny=code.site_bounds[1]+1
    for i,j in np.ndindex(hadamard_mat.shape):
        if hadamard_mat[i,j]==1:
            hadamard_vec[j+(Ny-1-i)*Nx]=1

    for _ in range(time_steps):
        step_errors, step_syndromes, step_measurement_errors = [], [], []

        # step_error: random error based on error probability
        shuffle(error_Pauli)
        # for i in range(n_qubits):
        #     if hadamard_vec[i]==1:
        #         if error_Pauli[i]=='X':
        #             error_Pauli[i]='Z'
        #         elif error_Pauli[i]=='Z':
        #             error_Pauli[i]='X'

        step_error=pt.pauli_to_bsf(''.join(error_Pauli))

        # step_error = error_model.generate(code, error_probability, rng)
        for i in range(n_qubits):
            if hadamard_vec[i]==1:
                step_error_temp=step_error[i]
                step_error[i]=step_error[n_qubits+i]
                step_error[n_qubits+i]=step_error_temp

        step_errors.append(step_error)
        # step_syndrome: stabilizers that do not commute with the error
        step_syndrome = pt.bsp(step_error, code.stabilizers.T)
        step_syndromes.append(step_syndrome)
        # step_measurement_error: random syndrome bit flips based on measurement_error_probability
        if measurement_error_probability:
            step_measurement_error = rng.choice(
                (0, 1),
                size=step_syndrome.shape,
                p=(1 - measurement_error_probability, measurement_error_probability)
            )
        else:
            step_measurement_error = np.zeros(step_syndrome.shape, dtype=int)
        step_measurement_errors.append(step_measurement_error)
    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: step_errors={}'.format(step_errors))
        logger.debug('run: step_syndromes={}'.format(step_syndromes))
        logger.debug('run: step_measurement_errors={}'.format(step_measurement_errors))

    # error: sum of errors at each time step
    error = np.bitwise_xor.reduce(step_errors)
    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: error={}'.format(error))

    # syndrome: apply measurement_error at times t-1 and t to syndrome at time t
    syndrome = []
    for t in range(time_steps):
        syndrome.append(step_measurement_errors[t - 1] ^ step_syndromes[t] ^ step_measurement_errors[t])
    # convert syndrome to 2d numpy array
    syndrome = np.array(syndrome)
    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: syndrome={}'.format(syndrome))

    # decoding: boolean or best match recovery operation based on decoder
    ctx = {'error_model': error_model, 'n_errors_code': n_errors_code, 'error': error,
           'step_errors': step_errors, 'measurement_error_probability': measurement_error_probability,
           'step_measurement_errors': step_measurement_errors}
    # convert syndrome to 1d if mode is 'ideal'
    if mode == 'ideal':  # convert syndrome to 1d and call decode
        decoding = decoder.decode(code,hadamard_mat, syndrome[0], **ctx)
    if mode == 'ftp':  # call decode_ftp
        decoding = decoder.decode_ftp(code, time_steps, syndrome, **ctx)

    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: decoding={}'.format(decoding))

    # if decoding is not DecodeResult, convert to DecodeResult
    if not isinstance(decoding, DecodeResult):
        # decoding is recovery, so wrap in DecodeResult
        decoding = DecodeResult(recovery=decoding)  # raises error if recovery is None
    # extract outcomes from decoding
    success = decoding.success
    logical_commutations = decoding.logical_commutations
    custom_values = decoding.custom_values
    # if recovery specified, resolve success and logical_commutations
    if decoding.recovery is not None:
        # recovered code
        recovered = decoding.recovery ^ error
        # success checks
        commutes_with_stabilizers = np.all(pt.bsp(recovered, code.stabilizers.T) == 0)
        if not commutes_with_stabilizers:
            log_data = {  # enough data to recreate issue
                # models
                'code': repr(code), 'error_model': repr(error_model), 'decoder': repr(decoder),
                # variables
                'error': pt.pack(error), 'recovery': pt.pack(decoding.recovery),
                # step variables
                'step_errors': [pt.pack(v) for v in step_errors],
                'step_measurement_errors': [pt.pack(v) for v in step_measurement_errors],
            }
            logger.warning('RECOVERY DOES NOT RETURN TO CODESPACE: {}'.format(json.dumps(log_data, sort_keys=True)))
        resolved_logical_commutations = pt.bsp(recovered, code.logicals.T)
        commutes_with_logicals = np.all(resolved_logical_commutations == 0)
        resolved_success = commutes_with_stabilizers and commutes_with_logicals
        # fill in unspecified outcomes
        success = resolved_success if success is None else success
        logical_commutations = resolved_logical_commutations if logical_commutations is None else logical_commutations

    if logger.isEnabledFor(logging.DEBUG):
        logger.debug('run: success={}'.format(success))
        logger.debug('run: logical_commutations={!r}'.format(logical_commutations))
        logger.debug('run: custom_values={!r}'.format(custom_values))

    data = {
        'error_weight': pt.bsf_wt(np.array(step_errors)),
        'success': bool(success),
        'logical_commutations': logical_commutations,
        'custom_values': custom_values,
    }

    return data
コード例 #13
0
ファイル: _planarrmpsdecoder.py プロジェクト: silky/qecsim
    def _coset_probabilities(self, prob_dist, sample_pauli):
        r"""
        Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group
        :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets
        :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`.

        :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)).
        :type prob_dist: 4-tuple of float
        :param sample_pauli: Sample planar Pauli.
        :type sample_pauli: PlanarPauli
        :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z)
            E.g. (0.20, 0.10, 0.05, 0.10), (PlanarPauli(...), PlanarPauli(...), PlanarPauli(...), PlanarPauli(...))
        :rtype: 4-tuple of mp.mpf, 4-tuple of PlanarPauli
        """

        # NOTE: OPTIMIZED CONTRACTION BY COLUMN
        #
        # * Partial contraction of coset-I tensor network by _column_
        #
        # pauli          create         contract       contract
        #                rotated TN     0-1 as bra_i   5-4 as ket_i
        #                               (left_stop=2)  (right_stop=3)
        #
        #  0123456        012345         012345         012345
        # 0. . . .       0  .           0  .           0  .
        # 1 . . .        1 ...          1 *..          1 *..
        # 2. . . .  -->  2.....   -->   2 *...   -->   2 *..*
        # 3 . . .        3 .....        3 *....        3 *..*
        # 4. . . .       4  ...         4  ...         4  ..*
        #                5   .          5   .          5   .
        #
        # * Optimized contraction of coset-Z tensor network (for example) by _column_
        #   using partial contraction of coset-I tensor network
        #
        # pauli with     create         combine and contract
        # logical        rotated TN     (bra_i + 2-3 + ket_i)
        #
        #  0123456        012345         012345
        # 0z . . .       0  z           0  z
        # 1 z . .        1 .z.          1 *z.
        # 2. z . .  -->  2..z..   -->   2 *z.*   -->   \pi
        # 3 . z .        3 .z...        3 *z.*
        # 4. . z z       4  z..         4  z.*
        #                5   z          5   z
        #

        # NOTE: OPTIMIZED CONTRACTION BY ROW
        #
        # * Partial contraction of coset-I tensor network by _row_
        #
        # pauli          create         transpose      contract       contract
        #                rotated TN                    0-1 as bra_i   5-4 as ket_i
        #                                              (left_stop=2)  (right_stop=3)
        #
        #  0123456        012345         012345         012345         012345
        # 0. . . .       0  .           0  .           0  .           0  .
        # 1 . . .        1 ...          1 ...          1 *..          1 *..
        # 2. . . .  -->  2.....   -->   2.....   -->   2 *...   -->   2 *..*
        # 3 . . .        3 .....        3 .....        3 *....        3 *..*
        # 4. . . .       4  ...         4  ...         4  ...         4  ..*
        #                5   .          5   .          5   .          5   .
        #
        # * Optimized contraction of coset-Z tensor network (for example) by _row_
        #   using partial contraction of coset-I tensor network
        #
        # pauli with     create         transpose      combine and contract
        # logical        rotated TN                    (bra_i + 2-3 + ket_i)
        #
        #  0123456        012345         012345         012345
        # 0. . z z       0  .           0  z           0  z
        # 1 . z .        1 ...          1 .z.          1 *z.
        # 2. z . .  -->  2zzzzz   -->   2..z..   -->   2 *z.*   -->   \pi
        # 3 z . .        3 ....z        3 .z...        3 *z.*
        # 4z . . .       4  ...         4  z..         4  z.*
        #                5   .          5   z          5   z
        #

        # NOTE: logicals along major diagonal for various shape codes
        #
        # logical I     logical X     logical Y     logical Z
        #
        #  01234         01234         01234         01234
        # 0. . .        0x . .        0y . .        0z . .
        # 1 . .         1 x .         1 y .         1 z .
        # 2. . .        2. x .        2. y .        2. z .
        # 3 . .         3 . x         3 . y         3 . z
        # 4. . .        4. . x        4. . y        4. . z
        #
        #  0123456       0123456       0123456       0123456
        # 0. . . .      0x . . .      0y . . .      0z . . .
        # 1 . . .       1 x . .       1 y . .       1 z . .
        # 2. . . .      2. x . .      2. y . .      2. z . .
        # 3 . . .       3 . x .       3 . y .       3 . z .
        # 4. . . .      4. . x .      4. . y z      4. . z z
        #
        #  01234         01234         01234         01234
        # 0. . .        0x . .        0y . .        0z . .
        # 1 . .         1 x .         1 y .         1 z .
        # 2. . .        2. x .        2. y .        2. z .
        # 3 . .         3 . x         3 . y         3 . z
        # 4. . .        4. . x        4. . y        4. . z
        # 5 . .         5 . .         5 . .         5 . .
        # 6. . .        6. . x        6. . x        4. . .
        #

        def _logical_x(pauli, major=True):
            """return pauli after applying X along the major/minor diagonal"""
            max_row, max_col = pauli.code.bounds
            # define site indices
            site_indices = itertools.chain(
                zip(range(max_row + 1),
                    range(max_col + 1)),  # along major diagonal
                ((r, max_col) for r in range(max_col + 2, max_row +
                                             1, 2)),  # down rightmost column
            )
            # if not major, switch to minor diagonal
            if not major:
                site_indices = ((max_row - r, c) for r, c in site_indices)
            # apply X on sites
            return pauli.site('X', *site_indices)

        def _logical_z(pauli, major=True):
            """return pauli after applying Z along the major/minor diagonal"""
            max_row, max_col = pauli.code.bounds
            # define site indices
            site_indices = itertools.chain(
                zip(range(max_row + 1),
                    range(max_col + 1)),  # along major diagonal
                ((max_row, c) for c in range(max_row + 2, max_col +
                                             1, 2)),  # across bottom row
            )
            # if not major, switch to minor diagonal
            if not major:
                site_indices = ((max_row - r, c) for r, c in site_indices)
            # apply X on sites
            return pauli.site('Z', *site_indices)

        def _tn_contract_optimized(code, coset_ps, tns, mask):
            """update coset_ps with optimized contraction of tns"""
            # left_stop
            left_stop = min(code.size) - 1
            # note: for optimization we contract tn_i from left to left_stop as bra common to all cosets
            bra_i, bra_i_mult = tt.mps2d.contract(tns[0],
                                                  chi=self._chi,
                                                  tol=self._tol,
                                                  mask=mask,
                                                  stop=left_stop)
            # right_stop
            right_stop = tns[0].shape[1] - min(code.size)
            # note: for optimization we contract tn_i from right to right_stop as ket common to all cosets
            ket_i, ket_i_mult = tt.mps2d.contract(tns[0],
                                                  chi=self._chi,
                                                  tol=self._tol,
                                                  mask=mask,
                                                  start=-1,
                                                  stop=right_stop,
                                                  step=-1)
            # for each tn, combine and contract to coset probability
            for j in range(len(tns)):
                # combine bra_i, tn_j[:, left_stop:right_stop + 1], ket_i as partially contracted tn
                partial_tn = np.column_stack(
                    (bra_i, tns[j][:, left_stop:right_stop + 1], ket_i))
                # slice mask to match partially contracted tn
                partial_mask = None if mask is None else mask[:, left_stop -
                                                              1:right_stop + 2]
                # contract
                result = tt.mps2d.contract(partial_tn,
                                           chi=self._chi,
                                           tol=self._tol,
                                           mask=partial_mask,
                                           step=-1)
                # multiply by multipliers
                coset_ps[j] = result * bra_i_mult * ket_i_mult

        # NOTE: all list/tuples in this method are ordered (i, x, y, z)
        # empty log warnings
        log_warnings = []
        # tensor networks: tn_i is common to both contraction by column and by row (after transposition)
        tn_i = self._tnc.create_tn(prob_dist, sample_pauli)
        mask = self._tnc.create_mask(self._stp,
                                     tn_i.shape)  # same mask for all tns
        # probabilities
        coset_ps = (0.0, 0.0, 0.0, 0.0)  # default coset probabilities
        coset_ps_col = coset_ps_row = None  # undefined coset probabilities by column and row
        # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy!
        if self._mode in ('c', 'a'):
            # note: for optimization we choose cosets to differ only on the major diagonal
            sample_x = _logical_x(sample_pauli.copy())
            tns = [
                tn_i,
                self._tnc.create_tn(prob_dist, sample_x),
                self._tnc.create_tn(prob_dist, _logical_z(sample_x.copy())),
                self._tnc.create_tn(prob_dist, _logical_z(sample_pauli.copy()))
            ]
            # evaluate coset probabilities by column
            coset_ps_col = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
            try:
                _tn_contract_optimized(sample_pauli.code, coset_ps_col, tns,
                                       mask)
            except (ValueError, np.linalg.LinAlgError) as ex:
                log_warnings.append(
                    'CONTRACTION BY COL FAILED: {!r}'.format(ex))
            # treat nan as inf so it doesn't get lost
            coset_ps_col = [
                mp.inf if mp.isnan(coset_p) else coset_p
                for coset_p in coset_ps_col
            ]
        if self._mode in ('r', 'a'):
            # note: for optimization we choose cosets to differ only on the minor diagonal
            sample_x = _logical_x(sample_pauli.copy(), major=False)
            tns = [
                tn_i,
                self._tnc.create_tn(prob_dist, sample_x),
                self._tnc.create_tn(prob_dist,
                                    _logical_z(sample_x.copy(), major=False)),
                self._tnc.create_tn(
                    prob_dist, _logical_z(sample_pauli.copy(), major=False))
            ]
            # evaluate coset probabilities by row
            coset_ps_row = [0.0, 0.0, 0.0, 0.0]  # default coset probabilities
            # transpose tensor networks
            tns = [tt.mps2d.transpose(tn) for tn in tns]
            mask = None if mask is None else mask.transpose()
            try:
                _tn_contract_optimized(sample_pauli.code, coset_ps_row, tns,
                                       mask)
            except (ValueError, np.linalg.LinAlgError) as ex:
                log_warnings.append(
                    'CONTRACTION BY ROW FAILED: {!r}'.format(ex))
            # treat nan as inf so it doesn't get lost
            coset_ps_row = [
                mp.inf if mp.isnan(coset_p) else coset_p
                for coset_p in coset_ps_row
            ]
        if self._mode == 'c':
            coset_ps = coset_ps_col
        elif self._mode == 'r':
            coset_ps = coset_ps_row
        elif self._mode == 'a':
            # average coset probabilities
            coset_ps = [
                sum(coset_p) / len(coset_p)
                for coset_p in zip(coset_ps_col, coset_ps_row)
            ]
        # logging
        if log_warnings:
            log_data = {
                # instance
                'decoder':
                repr(self),
                # method parameters
                'prob_dist':
                prob_dist,
                'sample_pauli':
                pt.pack(sample_pauli.to_bsf()),
                # variables (convert to string because mp.mpf)
                'coset_ps_col':
                [repr(p) for p in coset_ps_col] if coset_ps_col else None,
                'coset_ps_row':
                [repr(p) for p in coset_ps_row] if coset_ps_row else None,
                'coset_ps': [repr(p) for p in coset_ps],
            }
            logger.warning('{}: {}'.format(
                ' | '.join(log_warnings), json.dumps(log_data,
                                                     sort_keys=True)))
        # results
        sample_paulis = (sample_pauli, sample_pauli.copy().logical_x(),
                         sample_pauli.copy().logical_x().logical_z(),
                         sample_pauli.copy().logical_z())
        return tuple(coset_ps), sample_paulis