def decode( self, code, perm_mat, syndrome, error_model=DepolarizingErrorModel(), # noqa: B008 error_probability=0.1, **kwargs): """ See :meth:`qecsim.model.Decoder.decode` Note: The optional keyword parameters ``error_model`` and ``error_probability`` are used to determine the prior probability distribution for use in the decoding algorithm. Any provided error model must implement :meth:`~qecsim.model.ErrorModel.probability_distribution`. :param code: Rotated planar code. :type code: RotatedPlanarCode :param syndrome: Syndrome as binary vector. :type syndrome: numpy.array (1d) :param error_model: Error model. (default=DepolarizingErrorModel()) :type error_model: ErrorModel :param error_probability: Overall probability of an error on a single qubit. (default=0.1) :type error_probability: float :return: Recovery operation as binary symplectic vector. :rtype: numpy.array (1d) """ # any recovery any_recovery = self.sample_recovery(code, syndrome) # probability distribution prob_dist = error_model.probability_distribution(error_probability) # coset probabilities,recovery operations coset_ps, recoveries = self._coset_probabilities( prob_dist, perm_mat, any_recovery) # most likely recovery operation max_coset_p, max_recovery = max( zip(coset_ps, recoveries), key=lambda coset_p_recovery: coset_p_recovery[0]) # logging if not (mp.isfinite(max_coset_p) and max_coset_p > 0): log_data = { # instance 'decoder': repr(self), # method parameters 'code': repr(code), 'syndrome': pt.pack(syndrome), 'error_model': repr(error_model), 'error_probability': error_probability, # variables 'prob_dist': prob_dist, 'coset_ps': [repr(p) for p in coset_ps ], # convert to string because mp.mpf # context 'error': pt.pack(kwargs['error']) if 'error' in kwargs else None, } logger.warning( 'NON-POSITIVE-FINITE MAX COSET PROBABILITY: {}'.format( json.dumps(log_data, sort_keys=True))) # return most likely recovery operation as bsf return max_recovery.to_bsf(), coset_ps, max_coset_p
def test_planar_mps_decoder_positive_max_coset_probability(mode): # parameters code = PlanarCode(9, 9) decoder = PlanarMPSDecoder(chi=48, mode=mode) error_model = BiasedDepolarizingErrorModel(bias=100) error_probability = 0.41 # logged run values error = pt.unpack([ "c96aa012210dc2254031f15d9ce80c871fb864b510c91086e112a018f8aece7406638fdc00", 290 ]) syndrome = pt.unpack(["8f59cd273bd1c027b3b925085af85f2aaf22", 144]) assert np.array_equal(syndrome, pt.bsp(error, code.stabilizers.T)) # debug # print(code.ascii_art(syndrome, code.new_pauli(error))) # decode prob_dist = error_model.probability_distribution(error_probability) any_recovery = decoder.sample_recovery(code, syndrome) # coset probabilities coset_ps, recoveries = decoder._coset_probabilities( prob_dist, any_recovery) print('mode={}, coset_ps={}'.format(mode, coset_ps)) max_coset_p, max_recovery = max( zip(coset_ps, recoveries), key=lambda coset_p_recovery: coset_p_recovery[0]) success = np.all( pt.bsp(max_recovery.to_bsf() ^ error, code.logicals.T) == 0) print('### success=', success) assert mp.isfinite( max_coset_p ) and max_coset_p > 0, 'Max coset probability not as expected'
def test_planar_mps_decoder_small_code_negative_coset_probability(chi, mode): # parameters code = PlanarCode(3, 3) decoder = PlanarMPSDecoder(chi=chi, mode=mode) error_model = DepolarizingErrorModel() error_probability = 0.1 # logged run values error = pt.unpack(["e0048000", 26]) syndrome = pt.bsp(error, code.stabilizers.T) # debug print() print(code.ascii_art(syndrome, code.new_pauli(error))) # decode prob_dist = error_model.probability_distribution(error_probability) any_recovery = decoder.sample_recovery(code, syndrome) # coset probabilities coset_ps, recoveries = decoder._coset_probabilities( prob_dist, any_recovery) print('chi={}, mode={}, coset_ps={}'.format(chi, mode, coset_ps)) max_coset_p, max_recovery = max( zip(coset_ps, recoveries), key=lambda coset_p_recovery: coset_p_recovery[0]) success = np.all( pt.bsp(max_recovery.to_bsf() ^ error, code.logicals.T) == 0) print('### success=', success) assert mp.isfinite( max_coset_p ) and max_coset_p > 0, 'Max coset probability not as expected' assert np.all( np.array(coset_ps) >= 0), 'At least one coset probability is negative'
def test_planar_mps_decoder_zero_max_coset_probability(code, chi): decoder = PlanarMPSDecoder(chi=chi, mode='c') error_model = BiasedDepolarizingErrorModel(bias=1000) random_seed = 69 # probabilities probability = 0.4 prob_dist = error_model.probability_distribution(probability) # error error = error_model.generate(code, probability, np.random.default_rng(random_seed)) # syndrome syndrome = pt.bsp(error, code.stabilizers.T) # any_recovery any_recovery = decoder.sample_recovery(code, syndrome) # coset probabilities coset_ps, _ = decoder._coset_probabilities(prob_dist, any_recovery) print(coset_ps) max_coset_p = max(coset_ps) assert mp.isfinite( max_coset_p ) and max_coset_p > 0, 'Max coset probability out of bounds {}'.format( coset_ps)
def _coset_probabilities(self, prob_dist, hadamard_vec,hadamard_mat, sample_pauli): r""" Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`. :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)). :type prob_dist: 4-tuple of float :param sample_pauli: Sample planar Pauli. :type sample_pauli: PlanarPauli :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z) E.g. (0.20, 0.10, 0.05, 0.10), (PlanarPauli(...), PlanarPauli(...), PlanarPauli(...), PlanarPauli(...)) :rtype: 4-tuple of mp.mpf, 4-tuple of PlanarPauli """ # NOTE: all list/tuples in this method are ordered (i, x, y, z) # empty log warnings log_warnings = [] # sample paulis sample_paulis = ( sample_pauli, sample_pauli.copy().logical_x(), sample_pauli.copy().logical_x().logical_z(), sample_pauli.copy().logical_z() ) # tensor networks: tns are common to both contraction by column and by row (after transposition) tns = [self._tnc.create_tn(prob_dist, sp) for sp in sample_paulis] tns= [self._tnc.modify_tn(tn, had_prob_dist, hadamard_mat,sample_pauli) # probabilities coset_ps = (0.0, 0.0, 0.0, 0.0) # default coset probabilities coset_ps_col = coset_ps_row = None # undefined coset probabilities by column and row # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy! if self._mode in ('c', 'a'): # evaluate coset probabilities by column coset_ps_col = [0.0, 0.0, 0.0, 0.0] # default coset probabilities for i in range(len(tns)): try: coset_ps_col[i] = tt.mps2d.contract(tns[i], chi=self._chi, tol=self._tol) except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append('CONTRACTION BY COL FOR {} COSET FAILED: {!r}'.format('IXYZ'[i], ex)) # treat nan as inf so it doesn't get lost coset_ps_col = [mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_col] if self._mode in ('r', 'a'): # evaluate coset probabilities by row coset_ps_row = [0.0, 0.0, 0.0, 0.0] # default coset probabilities # transpose tensor networks tns = [tt.mps2d.transpose(tn) for tn in tns] for i in range(len(tns)): try: coset_ps_row[i] = tt.mps2d.contract(tns[i], chi=self._chi, tol=self._tol) except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append('CONTRACTION BY ROW FOR {} COSET FAILED: {!r}'.format('IXYZ'[i], ex)) # treat nan as inf so it doesn't get lost coset_ps_row = [mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_row] if self._mode == 'c': coset_ps = coset_ps_col elif self._mode == 'r': coset_ps = coset_ps_row elif self._mode == 'a': # average coset probabilities coset_ps = [sum(coset_p) / len(coset_p) for coset_p in zip(coset_ps_col, coset_ps_row)] # logging if log_warnings: log_data = { # instance 'decoder': repr(self), # method parameters 'prob_dist': prob_dist, 'sample_pauli': pt.pack(sample_pauli.to_bsf()), # variables (convert to string because mp.mpf) 'coset_ps_col': [repr(p) for p in coset_ps_col] if coset_ps_col else None, 'coset_ps_row': [repr(p) for p in coset_ps_row] if coset_ps_row else None, 'coset_ps': [repr(p) for p in coset_ps], } logger.warning('{}: {}'.format(' | '.join(log_warnings), json.dumps(log_data, sort_keys=True))) # results return tuple(coset_ps), sample_paulis def decode(self, code, hadamard_vec,hadamard_mat, syndrome, error_model=DepolarizingErrorModel(), # noqa: B008 error_probability=0.1, **kwargs): """ See :meth:`qecsim.model.Decoder.decode` Note: The optional keyword parameters ``error_model`` and ``error_probability`` are used to determine the prior probability distribution for use in the decoding algorithm. Any provided error model must implement :meth:`~qecsim.model.ErrorModel.probability_distribution`. :param code: Rotated planar code. :type code: RotatedPlanarCode :param syndrome: Syndrome as binary vector. :type syndrome: numpy.array (1d) :param error_model: Error model. (default=DepolarizingErrorModel()) :type error_model: ErrorModel :param error_probability: Overall probability of an error on a single qubit. (default=0.1) :type error_probability: float :return: Recovery operation as binary symplectic vector. :rtype: numpy.array (1d) """ # any recovery any_recovery = self.sample_recovery(code, syndrome) # probability distribution prob_dist = error_model.probability_distribution(error_probability) # coset probabilities, recovery operations coset_ps, recoveries = self._coset_probabilities(prob_dist, hadamard_vec,hadamard_mat, any_recovery) # most likely recovery operation max_coset_p, max_recovery = max(zip(coset_ps, recoveries), key=lambda coset_p_recovery: coset_p_recovery[0]) # logging if not (mp.isfinite(max_coset_p) and max_coset_p > 0): log_data = { # instance 'decoder': repr(self), # method parameters 'code': repr(code), 'syndrome': pt.pack(syndrome), 'error_model': repr(error_model), 'error_probability': error_probability, # variables 'prob_dist': prob_dist, 'coset_ps': [repr(p) for p in coset_ps], # convert to string because mp.mpf # context 'error': pt.pack(kwargs['error']) if 'error' in kwargs else None, } logger.warning('NON-POSITIVE-FINITE MAX COSET PROBABILITY: {}'.format(json.dumps(log_data, sort_keys=True))) # return most likely recovery operation as bsf return max_recovery.to_bsf() @property def label(self): """See :meth:`qecsim.model.Decoder.label`""" params = [('chi', self._chi), ('mode', self._mode), ('tol', self._tol), ] return 'Rotated planar MPS ({})'.format(', '.join('{}={}'.format(k, v) for k, v in params if v)) def __repr__(self): return '{}({!r}, {!r}, {!r})'.format(type(self).__name__, self._chi, self._mode, self._tol) class TNC: """Tensor network creator""" @functools.lru_cache() def h_node_value(self, prob_dist, f, n, e, s, w): """Return horizontal edge tensor element value.""" paulis = ('I', 'X', 'Y', 'Z') op_to_pr = dict(zip(paulis, prob_dist)) f = pt.pauli_to_bsf(f) I, X, Y, Z = pt.pauli_to_bsf(paulis) # n, e, s, w are in {0, 1} so multiply op to turn on or off op = (f + (n * Z) + (e * X) + (s * Z) + (w * X)) % 2 return op_to_pr[pt.bsf_to_pauli(op)] @functools.lru_cache() def v_node_value(self, prob_dist, f, n, e, s, w): """Return vertical edge tensor element value.""" # N.B. for v_node order of nesw is rotated relative to h_node return self.h_node_value(prob_dist, f, e, s, w, n) @functools.lru_cache() def create_h_node(self, prob_dist, f, compass_direction=None): """Return horizontal qubit tensor, i.e. has X plaquettes to left/right and Z plaquettes above/below.""" def _shape(compass_direction=None): """Return shape of tensor including dummy indices.""" return { # (ne, se, sw, nw) 'n': (2, 2, 2, 1), 'ne': (1, 2, 2, 1), 'e': (1, 2, 2, 2), 'se': (1, 1, 2, 2), 's': (2, 1, 2, 2), 'sw': (2, 1, 1, 2), 'w': (2, 2, 1, 2), 'nw': (2, 2, 1, 1), }.get(compass_direction, (2, 2, 2, 2)) # create bare h_node node = np.empty(_shape(compass_direction), dtype=np.float64) # fill values for n, e, s, w in np.ndindex(node.shape): node[(n, e, s, w)] = self.h_node_value(prob_dist, f, n, e, s, w) return node @functools.lru_cache() def create_v_node(self, prob_dist, f, compass_direction=None): """Return vertical qubit tensor, i.e. has Z plaquettes to left/right and X plaquettes above/below.""" def _shape(compass_direction=None): """Return shape of tensor including dummy indices.""" return { # (ne, se, sw, nw) 'n': (1, 2, 2, 2), 'ne': (1, 1, 2, 2), 'e': (2, 1, 2, 2), 'se': (2, 1, 1, 2), 's': (2, 2, 1, 2), # 'sw': (2, 2, 1, 1), # cannot happen 'w': (2, 2, 2, 1), 'nw': (1, 2, 2, 1), }.get(compass_direction, (2, 2, 2, 2)) # create bare v_node node = np.empty(_shape(compass_direction), dtype=np.float64) # fill values for n, e, s, w in np.ndindex(node.shape): node[(n, e, s, w)] = self.v_node_value(prob_dist, f, n, e, s, w) return node @functools.lru_cache() def create_s_node(self, compass_direction=None): """Return stabilizer tensor.""" def _shape(compass_direction=None): """Return shape of tensor including dummy indices.""" return { # (ne, se, sw, nw) 'n': (1, 2, 2, 1), 'e': (1, 1, 2, 2), 's': (2, 1, 1, 2), 'w': (2, 2, 1, 1), }.get(compass_direction, (2, 2, 2, 2)) node = tt.tsr.delta(_shape(compass_direction)) return node def create_tn(self, prob_dist, sample_pauli): """Return a network (numpy.array 2d) of tensors (numpy.array 4d). Note: The network contracts to the coset probability of the given sample_pauli. """ def _rotate_q_index(index, code): """Convert code site index in format (x, y) to tensor network q-node index in format (r, c)""" site_x, site_y = index # qubit index in (x, y) site_r, site_c = code.site_bounds[1] - site_y, site_x # qubit index in (r, c) return code.site_bounds[0] - site_c + site_r, site_r + site_c # q-node index in (r, c) def _rotate_p_index(index, code): """Convert code plaquette index in format (x, y) to tensor network s-node index in format (r, c)""" q_node_r, q_node_c = _rotate_q_index(index, code) # q-node index in (r, c) return q_node_r - 1, q_node_c # s-node index in (r, c) def _compass_q_direction(index, code): """if the code site index lies on border of lattice then give that direction, else empty string.""" direction = {code.site_bounds[1]: 'n', 0: 's'}.get(index[1], '') direction += {0: 'w', code.site_bounds[0]: 'e'}.get(index[0], '') return direction def _compass_p_direction(index, code): """if the code plaquette index lies on border of lattice then give that direction, else empty string.""" direction = {code.site_bounds[1]: 'n', -1: 's'}.get(index[1], '') direction += {-1: 'w', code.site_bounds[0]: 'e'}.get(index[0], '') return direction pi,px,py,pz=prob_dist had_prob_dist= pi,pz,py,px # extract code code = sample_pauli.code # initialise empty tn tn_max_r, _ = _rotate_q_index((0, 0), code) _, tn_max_c = _rotate_q_index((code.site_bounds[0], 0), code) tn = np.empty((tn_max_r + 1, tn_max_c + 1), dtype=object) # iterate over max_site_x, max_site_y = code.site_bounds for code_index in itertools.product(range(-1, max_site_x + 1), range(-1, max_site_y + 1)): is_z_plaquette = code.is_z_plaquette(code_index) if code.is_in_site_bounds(code_index): q_node_index = _rotate_q_index(code_index, code) q_pauli = sample_pauli.operator(code_index) if is_z_plaquette: #print(code_index) q_node = self.create_h_node(prob_dist, q_pauli, _compass_q_direction(code_index, code)) else: q_node = self.create_v_node(prob_dist, q_pauli, _compass_q_direction(code_index, code)) tn[q_node_index] = q_node if code.is_in_plaquette_bounds(code_index): s_node_index = _rotate_p_index(code_index, code) s_node = self.create_s_node(_compass_p_direction(code_index, code)) tn[s_node_index] = s_node return tn def modify_tn(tn, had_prob_dist, hadamard_mat,sample_pauli): """Return a network (numpy.array 2d) of tensors (numpy.array 4d). Note: The network contracts to the coset probability of the given sample_pauli. """ def _rotate_q_index(index, code): """Convert code site index in format (x, y) to tensor network q-node index in format (r, c)""" site_x, site_y = index # qubit index in (x, y) site_r, site_c = code.site_bounds[1] - site_y, site_x # qubit index in (r, c) return code.site_bounds[0] - site_c + site_r, site_r + site_c # q-node index in (r, c) def _compass_q_direction(index, code): """if the code site index lies on border of lattice then give that direction, else empty string.""" direction = {code.site_bounds[1]: 'n', 0: 's'}.get(index[1], '') direction += {0: 'w', code.site_bounds[0]: 'e'}.get(index[0], '') return direction code = sample_pauli.code max_site_x, max_site_y = code.site_bounds for code_index in itertools.product(range(-1, max_site_x + 1), range(-1, max_site_y + 1)): is_z_plaquette = code.is_z_plaquette(code_index) if code.is_in_site_bounds(code_index): q_node_index = _rotate_q_index(code_index, code) q_pauli = sample_pauli.operator(code_index) if is_z_plaquette: #print(code_index) if hadamard_mat[code_index]: q_node = self.create_h_node(had_prob_dist, q_pauli, _compass_q_direction(code_index, code)) else: if hadamard_mat[code_index]: q_node = self.create_v_node(had_prob_dist, q_pauli, _compass_q_direction(code_index, code)) tn[q_node_index] = q_node return tn