def xlogy(x, y): """ x*log(y) If x is 0 and y is not nan, 0 is returned. *See also:* `mpsci.fun.xlog1py` """ if mp.isnan(x) or mp.isnan(y): return mp.nan if x == 0: return mp.zero else: return x * mp.log(y)
def xlog1py(x, y): """ x*log(1+y) If x is 0 and y is not nan, 0 is returned. This function is mathematically equivalent to `mpsci.fun.xlogy(1 + y)`. It avoids the loss of precision that can result if y is very small. *See also:* `mpsci.fun.xlogy` """ if mp.isnan(x) or mp.isnan(y): return mp.nan if x == 0: return mp.zero else: return x * mp.log1p(y)
def _coset_probabilities(self, prob_dist, perm_mat, sample_pauli): r""" Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group :math:`G` of the planar code with respect to the given sample Pauli :math:`f`,as well as for the cosets :math:`f\bar{X}G`,:math:`f\bar{Y}G` and :math:`f\bar{Z}G`. :param prob_dist: Tuple of probability distribution in the format (P(I),P(X),P(Y),P(Z)). :type prob_dist: 4-tuple of float :param sample_pauli: Sample planar Pauli. :type sample_pauli: PlanarPauli :return: Coset probabilities,Sample Paulis (both in order I,X,Y,Z) E.g. (0.20,0.10,0.05,0.10),(PlanarPauli(...),PlanarPauli(...),PlanarPauli(...),PlanarPauli(...)) :rtype: 4-tuple of mp.mpf,4-tuple of PlanarPauli """ # NOTE: all list/tuples in this method are ordered (i,x,y,z) # empty log warnings log_warnings = [] # sample paulis sample_paulis = (sample_pauli, sample_pauli.copy().logical_x(), sample_pauli.copy().logical_x().logical_z(), sample_pauli.copy().logical_z()) # tensor networks: tns are common to both contraction by column and by row (after transposition) tns = [ self._tnc.create_tn(prob_dist, sp, perm_mat) for sp in sample_paulis ] # probabilities coset_ps = (0.0, 0.0, 0.0, 0.0) # default coset probabilities coset_ps_col = coset_ps_row = None # undefined coset probabilities by column and row # N.B. After multiplication by mult,coset_ps will be of type mp.mpf so don't process with numpy! if self._mode in ('c', 'a'): # evaluate coset probabilities by column coset_ps_col = [0.0, 0.0, 0.0, 0.0] # default coset probabilities for i in range(len(tns)): try: coset_ps_col[i] = tt.mps2d.contract(tns[i], chi=self._chi, tol=self._tol) except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION BY COL FOR {} COSET FAILED: {!r}'.format( 'IXYZ'[i], ex)) # treat nan as inf so it doesn't get lost coset_ps_col = [ mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_col ] if self._mode in ('r', 'a'): # evaluate coset probabilities by row coset_ps_row = [0.0, 0.0, 0.0, 0.0] # default coset probabilities # transpose tensor networks tns = [tt.mps2d.transpose(tn) for tn in tns] for i in range(len(tns)): try: coset_ps_row[i] = tt.mps2d.contract(tns[i], chi=self._chi, tol=self._tol) except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION BY ROW FOR {} COSET FAILED: {!r}'.format( 'IXYZ'[i], ex)) # treat nan as inf so it doesn't get lost coset_ps_row = [ mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_row ] if self._mode == 'c': coset_ps = coset_ps_col elif self._mode == 'r': coset_ps = coset_ps_row elif self._mode == 'a': # average coset probabilities coset_ps = [ sum(coset_p) / len(coset_p) for coset_p in zip(coset_ps_col, coset_ps_row) ] # renormalize probabilities C = coset_ps[0] + coset_ps[1] + coset_ps[2] + coset_ps[3] coset_ps[0] = coset_ps[0] / C coset_ps[1] = coset_ps[1] / C coset_ps[2] = coset_ps[2] / C coset_ps[3] = coset_ps[3] / C # logging if log_warnings: log_data = { # instance 'decoder': repr(self), # method parameters 'prob_dist': prob_dist, 'sample_pauli': pt.pack(sample_pauli.to_bsf()), # variables (convert to string because mp.mpf) 'coset_ps_col': [repr(p) for p in coset_ps_col] if coset_ps_col else None, 'coset_ps_row': [repr(p) for p in coset_ps_row] if coset_ps_row else None, 'coset_ps': [repr(p) for p in coset_ps], } logger.warning('{}: {}'.format( ' | '.join(log_warnings), json.dumps(log_data, sort_keys=True))) # results return tuple(coset_ps), sample_paulis
def _coset_probabilities(self, prob_dist, hadamard_vec,hadamard_mat, sample_pauli): r""" Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`. :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)). :type prob_dist: 4-tuple of float :param sample_pauli: Sample planar Pauli. :type sample_pauli: PlanarPauli :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z) E.g. (0.20, 0.10, 0.05, 0.10), (PlanarPauli(...), PlanarPauli(...), PlanarPauli(...), PlanarPauli(...)) :rtype: 4-tuple of mp.mpf, 4-tuple of PlanarPauli """ # NOTE: all list/tuples in this method are ordered (i, x, y, z) # empty log warnings log_warnings = [] # sample paulis sample_paulis = ( sample_pauli, sample_pauli.copy().logical_x(), sample_pauli.copy().logical_x().logical_z(), sample_pauli.copy().logical_z() ) # tensor networks: tns are common to both contraction by column and by row (after transposition) tns = [self._tnc.create_tn(prob_dist, sp) for sp in sample_paulis] tns= [self._tnc.modify_tn(tn, had_prob_dist, hadamard_mat,sample_pauli) # probabilities coset_ps = (0.0, 0.0, 0.0, 0.0) # default coset probabilities coset_ps_col = coset_ps_row = None # undefined coset probabilities by column and row # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy! if self._mode in ('c', 'a'): # evaluate coset probabilities by column coset_ps_col = [0.0, 0.0, 0.0, 0.0] # default coset probabilities for i in range(len(tns)): try: coset_ps_col[i] = tt.mps2d.contract(tns[i], chi=self._chi, tol=self._tol) except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append('CONTRACTION BY COL FOR {} COSET FAILED: {!r}'.format('IXYZ'[i], ex)) # treat nan as inf so it doesn't get lost coset_ps_col = [mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_col] if self._mode in ('r', 'a'): # evaluate coset probabilities by row coset_ps_row = [0.0, 0.0, 0.0, 0.0] # default coset probabilities # transpose tensor networks tns = [tt.mps2d.transpose(tn) for tn in tns] for i in range(len(tns)): try: coset_ps_row[i] = tt.mps2d.contract(tns[i], chi=self._chi, tol=self._tol) except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append('CONTRACTION BY ROW FOR {} COSET FAILED: {!r}'.format('IXYZ'[i], ex)) # treat nan as inf so it doesn't get lost coset_ps_row = [mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_row] if self._mode == 'c': coset_ps = coset_ps_col elif self._mode == 'r': coset_ps = coset_ps_row elif self._mode == 'a': # average coset probabilities coset_ps = [sum(coset_p) / len(coset_p) for coset_p in zip(coset_ps_col, coset_ps_row)] # logging if log_warnings: log_data = { # instance 'decoder': repr(self), # method parameters 'prob_dist': prob_dist, 'sample_pauli': pt.pack(sample_pauli.to_bsf()), # variables (convert to string because mp.mpf) 'coset_ps_col': [repr(p) for p in coset_ps_col] if coset_ps_col else None, 'coset_ps_row': [repr(p) for p in coset_ps_row] if coset_ps_row else None, 'coset_ps': [repr(p) for p in coset_ps], } logger.warning('{}: {}'.format(' | '.join(log_warnings), json.dumps(log_data, sort_keys=True))) # results return tuple(coset_ps), sample_paulis def decode(self, code, hadamard_vec,hadamard_mat, syndrome, error_model=DepolarizingErrorModel(), # noqa: B008 error_probability=0.1, **kwargs): """ See :meth:`qecsim.model.Decoder.decode` Note: The optional keyword parameters ``error_model`` and ``error_probability`` are used to determine the prior probability distribution for use in the decoding algorithm. Any provided error model must implement :meth:`~qecsim.model.ErrorModel.probability_distribution`. :param code: Rotated planar code. :type code: RotatedPlanarCode :param syndrome: Syndrome as binary vector. :type syndrome: numpy.array (1d) :param error_model: Error model. (default=DepolarizingErrorModel()) :type error_model: ErrorModel :param error_probability: Overall probability of an error on a single qubit. (default=0.1) :type error_probability: float :return: Recovery operation as binary symplectic vector. :rtype: numpy.array (1d) """ # any recovery any_recovery = self.sample_recovery(code, syndrome) # probability distribution prob_dist = error_model.probability_distribution(error_probability) # coset probabilities, recovery operations coset_ps, recoveries = self._coset_probabilities(prob_dist, hadamard_vec,hadamard_mat, any_recovery) # most likely recovery operation max_coset_p, max_recovery = max(zip(coset_ps, recoveries), key=lambda coset_p_recovery: coset_p_recovery[0]) # logging if not (mp.isfinite(max_coset_p) and max_coset_p > 0): log_data = { # instance 'decoder': repr(self), # method parameters 'code': repr(code), 'syndrome': pt.pack(syndrome), 'error_model': repr(error_model), 'error_probability': error_probability, # variables 'prob_dist': prob_dist, 'coset_ps': [repr(p) for p in coset_ps], # convert to string because mp.mpf # context 'error': pt.pack(kwargs['error']) if 'error' in kwargs else None, } logger.warning('NON-POSITIVE-FINITE MAX COSET PROBABILITY: {}'.format(json.dumps(log_data, sort_keys=True))) # return most likely recovery operation as bsf return max_recovery.to_bsf() @property def label(self): """See :meth:`qecsim.model.Decoder.label`""" params = [('chi', self._chi), ('mode', self._mode), ('tol', self._tol), ] return 'Rotated planar MPS ({})'.format(', '.join('{}={}'.format(k, v) for k, v in params if v)) def __repr__(self): return '{}({!r}, {!r}, {!r})'.format(type(self).__name__, self._chi, self._mode, self._tol) class TNC: """Tensor network creator""" @functools.lru_cache() def h_node_value(self, prob_dist, f, n, e, s, w): """Return horizontal edge tensor element value.""" paulis = ('I', 'X', 'Y', 'Z') op_to_pr = dict(zip(paulis, prob_dist)) f = pt.pauli_to_bsf(f) I, X, Y, Z = pt.pauli_to_bsf(paulis) # n, e, s, w are in {0, 1} so multiply op to turn on or off op = (f + (n * Z) + (e * X) + (s * Z) + (w * X)) % 2 return op_to_pr[pt.bsf_to_pauli(op)] @functools.lru_cache() def v_node_value(self, prob_dist, f, n, e, s, w): """Return vertical edge tensor element value.""" # N.B. for v_node order of nesw is rotated relative to h_node return self.h_node_value(prob_dist, f, e, s, w, n) @functools.lru_cache() def create_h_node(self, prob_dist, f, compass_direction=None): """Return horizontal qubit tensor, i.e. has X plaquettes to left/right and Z plaquettes above/below.""" def _shape(compass_direction=None): """Return shape of tensor including dummy indices.""" return { # (ne, se, sw, nw) 'n': (2, 2, 2, 1), 'ne': (1, 2, 2, 1), 'e': (1, 2, 2, 2), 'se': (1, 1, 2, 2), 's': (2, 1, 2, 2), 'sw': (2, 1, 1, 2), 'w': (2, 2, 1, 2), 'nw': (2, 2, 1, 1), }.get(compass_direction, (2, 2, 2, 2)) # create bare h_node node = np.empty(_shape(compass_direction), dtype=np.float64) # fill values for n, e, s, w in np.ndindex(node.shape): node[(n, e, s, w)] = self.h_node_value(prob_dist, f, n, e, s, w) return node @functools.lru_cache() def create_v_node(self, prob_dist, f, compass_direction=None): """Return vertical qubit tensor, i.e. has Z plaquettes to left/right and X plaquettes above/below.""" def _shape(compass_direction=None): """Return shape of tensor including dummy indices.""" return { # (ne, se, sw, nw) 'n': (1, 2, 2, 2), 'ne': (1, 1, 2, 2), 'e': (2, 1, 2, 2), 'se': (2, 1, 1, 2), 's': (2, 2, 1, 2), # 'sw': (2, 2, 1, 1), # cannot happen 'w': (2, 2, 2, 1), 'nw': (1, 2, 2, 1), }.get(compass_direction, (2, 2, 2, 2)) # create bare v_node node = np.empty(_shape(compass_direction), dtype=np.float64) # fill values for n, e, s, w in np.ndindex(node.shape): node[(n, e, s, w)] = self.v_node_value(prob_dist, f, n, e, s, w) return node @functools.lru_cache() def create_s_node(self, compass_direction=None): """Return stabilizer tensor.""" def _shape(compass_direction=None): """Return shape of tensor including dummy indices.""" return { # (ne, se, sw, nw) 'n': (1, 2, 2, 1), 'e': (1, 1, 2, 2), 's': (2, 1, 1, 2), 'w': (2, 2, 1, 1), }.get(compass_direction, (2, 2, 2, 2)) node = tt.tsr.delta(_shape(compass_direction)) return node def create_tn(self, prob_dist, sample_pauli): """Return a network (numpy.array 2d) of tensors (numpy.array 4d). Note: The network contracts to the coset probability of the given sample_pauli. """ def _rotate_q_index(index, code): """Convert code site index in format (x, y) to tensor network q-node index in format (r, c)""" site_x, site_y = index # qubit index in (x, y) site_r, site_c = code.site_bounds[1] - site_y, site_x # qubit index in (r, c) return code.site_bounds[0] - site_c + site_r, site_r + site_c # q-node index in (r, c) def _rotate_p_index(index, code): """Convert code plaquette index in format (x, y) to tensor network s-node index in format (r, c)""" q_node_r, q_node_c = _rotate_q_index(index, code) # q-node index in (r, c) return q_node_r - 1, q_node_c # s-node index in (r, c) def _compass_q_direction(index, code): """if the code site index lies on border of lattice then give that direction, else empty string.""" direction = {code.site_bounds[1]: 'n', 0: 's'}.get(index[1], '') direction += {0: 'w', code.site_bounds[0]: 'e'}.get(index[0], '') return direction def _compass_p_direction(index, code): """if the code plaquette index lies on border of lattice then give that direction, else empty string.""" direction = {code.site_bounds[1]: 'n', -1: 's'}.get(index[1], '') direction += {-1: 'w', code.site_bounds[0]: 'e'}.get(index[0], '') return direction pi,px,py,pz=prob_dist had_prob_dist= pi,pz,py,px # extract code code = sample_pauli.code # initialise empty tn tn_max_r, _ = _rotate_q_index((0, 0), code) _, tn_max_c = _rotate_q_index((code.site_bounds[0], 0), code) tn = np.empty((tn_max_r + 1, tn_max_c + 1), dtype=object) # iterate over max_site_x, max_site_y = code.site_bounds for code_index in itertools.product(range(-1, max_site_x + 1), range(-1, max_site_y + 1)): is_z_plaquette = code.is_z_plaquette(code_index) if code.is_in_site_bounds(code_index): q_node_index = _rotate_q_index(code_index, code) q_pauli = sample_pauli.operator(code_index) if is_z_plaquette: #print(code_index) q_node = self.create_h_node(prob_dist, q_pauli, _compass_q_direction(code_index, code)) else: q_node = self.create_v_node(prob_dist, q_pauli, _compass_q_direction(code_index, code)) tn[q_node_index] = q_node if code.is_in_plaquette_bounds(code_index): s_node_index = _rotate_p_index(code_index, code) s_node = self.create_s_node(_compass_p_direction(code_index, code)) tn[s_node_index] = s_node return tn def modify_tn(tn, had_prob_dist, hadamard_mat,sample_pauli): """Return a network (numpy.array 2d) of tensors (numpy.array 4d). Note: The network contracts to the coset probability of the given sample_pauli. """ def _rotate_q_index(index, code): """Convert code site index in format (x, y) to tensor network q-node index in format (r, c)""" site_x, site_y = index # qubit index in (x, y) site_r, site_c = code.site_bounds[1] - site_y, site_x # qubit index in (r, c) return code.site_bounds[0] - site_c + site_r, site_r + site_c # q-node index in (r, c) def _compass_q_direction(index, code): """if the code site index lies on border of lattice then give that direction, else empty string.""" direction = {code.site_bounds[1]: 'n', 0: 's'}.get(index[1], '') direction += {0: 'w', code.site_bounds[0]: 'e'}.get(index[0], '') return direction code = sample_pauli.code max_site_x, max_site_y = code.site_bounds for code_index in itertools.product(range(-1, max_site_x + 1), range(-1, max_site_y + 1)): is_z_plaquette = code.is_z_plaquette(code_index) if code.is_in_site_bounds(code_index): q_node_index = _rotate_q_index(code_index, code) q_pauli = sample_pauli.operator(code_index) if is_z_plaquette: #print(code_index) if hadamard_mat[code_index]: q_node = self.create_h_node(had_prob_dist, q_pauli, _compass_q_direction(code_index, code)) else: if hadamard_mat[code_index]: q_node = self.create_v_node(had_prob_dist, q_pauli, _compass_q_direction(code_index, code)) tn[q_node_index] = q_node return tn
def _coset_probabilities(self, prob_dist, sample_pauli): r""" Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`. :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)). :type prob_dist: 4-tuple of float :param sample_pauli: Sample color 666 Pauli. :type sample_pauli: Color666Pauli :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z) E.g. (0.20, 0.10, 0.05, 0.10), (Color666Pauli(...), Color666Pauli(...), Color666Pauli(...), Color666Pauli(...)) :rtype: 4-tuple of mp.mpf, 4-tuple of Color666Pauli """ # NOTE: all list/tuples in this method are ordered (i, x, y, z) # empty log warnings log_warnings = [] # sample_paulis sample_paulis = [ sample_pauli, sample_pauli.copy().logical_x(), sample_pauli.copy().logical_x().logical_z(), sample_pauli.copy().logical_z() ] # tensor networks tns = [ self._tnc.create_tn(prob_dist, pauli) for pauli in sample_paulis ] # probabilities coset_ps = [0.0, 0.0, 0.0, 0.0] # default coset probabilities # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy! try: # note: cosets differ only in the first column ket_i, mult = tt.mps2d.contract(tns[0], chi=self._chi, tol=self._tol, start=-1, stop=0, step=-1) # tns.i coset_ps[0] = tt.mps.inner_product(tns[0][:, 0], ket_i) * mult # coset_ps.i coset_ps[1] = tt.mps.inner_product(tns[1][:, 0], ket_i) * mult # coset_ps.x coset_ps[2] = tt.mps.inner_product(tns[2][:, 0], ket_i) * mult # coset_ps.y coset_ps[3] = tt.mps.inner_product(tns[3][:, 0], ket_i) * mult # coset_ps.z except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION FOR I COSET FAILED: {!r}'.format(ex)) # treat nan as inf so it doesn't get lost coset_ps = [ mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps ] # logging if log_warnings: log_data = { # instance 'decoder': repr(self), # method parameters 'prob_dist': prob_dist, 'sample_pauli': pt.pack(sample_pauli.to_bsf()), # variables (convert to string because mp.mpf) 'coset_ps': [repr(p) for p in coset_ps], } logger.warning('{}: {}'.format( ' | '.join(log_warnings), json.dumps(log_data, sort_keys=True))) # results return tuple(coset_ps), tuple(sample_paulis)
def _coset_probabilities(self, prob_dist, sample_pauli, perm_mat): r""" Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`. :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)). :type prob_dist: 4-tuple of float :param sample_pauli: Sample planar Pauli. :type sample_pauli: PlanarPauli :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z) E.g. (0.20, 0.10, 0.05, 0.10), (PlanarPauli(...), PlanarPauli(...), PlanarPauli(...), PlanarPauli(...)) :rtype: 4-tuple of mp.mpf, 4-tuple of PlanarPauli """ # NOTE: all list/tuples in this method are ordered (i, x, y, z) # empty log warnings log_warnings = [] # sample_paulis sample_paulis = [ sample_pauli, sample_pauli.copy().logical_x(), sample_pauli.copy().logical_x().logical_z(), sample_pauli.copy().logical_z() ] # tensor networks tns = [ self._tnc.create_tn(prob_dist, pauli, perm_mat) for pauli in sample_paulis ] mask = self._tnc.create_mask(self._stp, tns[0].shape) # same mask for all tns # probabilities coset_ps = (0.0, 0.0, 0.0, 0.0) # default coset probabilities coset_ps_col = coset_ps_row = None # undefined coset probabilities by column and row # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy! if self._mode in ('c', 'a'): # evaluate coset probabilities by column coset_ps_col = [0.0, 0.0, 0.0, 0.0] # default coset probabilities # note: I,X and Z,Y cosets differ only in the last column (logical X) try: bra_i, mult = tt.mps2d.contract(tns[0], chi=self._chi, tol=self._tol, stop=-1, mask=mask) # tns.i coset_ps_col[0] = tt.mps.inner_product( bra_i, tns[0][:, -1]) * mult # coset_ps_col.i coset_ps_col[1] = tt.mps.inner_product( bra_i, tns[1][:, -1]) * mult # coset_ps_col.x except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION BY COL FOR I COSET FAILED: {!r}'.format(ex)) try: bra_z, mult = tt.mps2d.contract(tns[3], chi=self._chi, tol=self._tol, stop=-1, mask=mask) # tns.z coset_ps_col[2] = tt.mps.inner_product( bra_z, tns[2][:, -1]) * mult # coset_ps_col.y coset_ps_col[3] = tt.mps.inner_product( bra_z, tns[3][:, -1]) * mult # coset_ps_col.z except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION BY COL FOR Z COSET FAILED: {!r}'.format(ex)) # treat nan as inf so it doesn't get lost coset_ps_col = [ mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_col ] if self._mode in ('r', 'a'): # evaluate coset probabilities by row coset_ps_row = [0.0, 0.0, 0.0, 0.0] # default coset probabilities # transpose tensor networks tns = [tt.mps2d.transpose(tn) for tn in tns] mask = None if mask is None else mask.transpose() # note: I,Z and X,Y cosets differ only in the last row (logical Z) try: bra_i, mult = tt.mps2d.contract(tns[0], chi=self._chi, tol=self._tol, stop=-1, mask=mask) # tns.i coset_ps_row[0] = tt.mps.inner_product( bra_i, tns[0][:, -1]) * mult # coset_ps_row.i coset_ps_row[3] = tt.mps.inner_product( bra_i, tns[3][:, -1]) * mult # coset_ps_row.z except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION BY ROW FOR I COSET FAILED: {!r}'.format(ex)) try: bra_x, mult = tt.mps2d.contract(tns[1], chi=self._chi, tol=self._tol, stop=-1, mask=mask) # tns.x coset_ps_row[1] = tt.mps.inner_product( bra_x, tns[1][:, -1]) * mult # coset_ps_row.x coset_ps_row[2] = tt.mps.inner_product( bra_x, tns[2][:, -1]) * mult # coset_ps_row.y except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION BY ROW FOR X COSET FAILED: {!r}'.format(ex)) # treat nan as inf so it doesn't get lost coset_ps_row = [ mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_row ] if self._mode == 'c': coset_ps = coset_ps_col elif self._mode == 'r': coset_ps = coset_ps_row elif self._mode == 'a': # average coset probabilities coset_ps = [ sum(coset_p) / len(coset_p) for coset_p in zip(coset_ps_col, coset_ps_row) ] # logging if log_warnings: log_data = { # instance 'decoder': repr(self), # method parameters 'prob_dist': prob_dist, 'sample_pauli': pt.pack(sample_pauli.to_bsf()), # variables (convert to string because mp.mpf) 'coset_ps_col': [repr(p) for p in coset_ps_col] if coset_ps_col else None, 'coset_ps_row': [repr(p) for p in coset_ps_row] if coset_ps_row else None, 'coset_ps': [repr(p) for p in coset_ps], } logger.warning('{}: {}'.format( ' | '.join(log_warnings), json.dumps(log_data, sort_keys=True))) # results return tuple(coset_ps), tuple(sample_paulis)
def _coset_probabilities(self, prob_dist, sample_pauli): r""" Return the (approximate) probability and sample Pauli for the left coset :math:`fG` of the stabilizer group :math:`G` of the planar code with respect to the given sample Pauli :math:`f`, as well as for the cosets :math:`f\bar{X}G`, :math:`f\bar{Y}G` and :math:`f\bar{Z}G`. :param prob_dist: Tuple of probability distribution in the format (P(I), P(X), P(Y), P(Z)). :type prob_dist: 4-tuple of float :param sample_pauli: Sample planar Pauli. :type sample_pauli: PlanarPauli :return: Coset probabilities, Sample Paulis (both in order I, X, Y, Z) E.g. (0.20, 0.10, 0.05, 0.10), (PlanarPauli(...), PlanarPauli(...), PlanarPauli(...), PlanarPauli(...)) :rtype: 4-tuple of mp.mpf, 4-tuple of PlanarPauli """ # NOTE: OPTIMIZED CONTRACTION BY COLUMN # # * Partial contraction of coset-I tensor network by _column_ # # pauli create contract contract # rotated TN 0-1 as bra_i 5-4 as ket_i # (left_stop=2) (right_stop=3) # # 0123456 012345 012345 012345 # 0. . . . 0 . 0 . 0 . # 1 . . . 1 ... 1 *.. 1 *.. # 2. . . . --> 2..... --> 2 *... --> 2 *..* # 3 . . . 3 ..... 3 *.... 3 *..* # 4. . . . 4 ... 4 ... 4 ..* # 5 . 5 . 5 . # # * Optimized contraction of coset-Z tensor network (for example) by _column_ # using partial contraction of coset-I tensor network # # pauli with create combine and contract # logical rotated TN (bra_i + 2-3 + ket_i) # # 0123456 012345 012345 # 0z . . . 0 z 0 z # 1 z . . 1 .z. 1 *z. # 2. z . . --> 2..z.. --> 2 *z.* --> \pi # 3 . z . 3 .z... 3 *z.* # 4. . z z 4 z.. 4 z.* # 5 z 5 z # # NOTE: OPTIMIZED CONTRACTION BY ROW # # * Partial contraction of coset-I tensor network by _row_ # # pauli create transpose contract contract # rotated TN 0-1 as bra_i 5-4 as ket_i # (left_stop=2) (right_stop=3) # # 0123456 012345 012345 012345 012345 # 0. . . . 0 . 0 . 0 . 0 . # 1 . . . 1 ... 1 ... 1 *.. 1 *.. # 2. . . . --> 2..... --> 2..... --> 2 *... --> 2 *..* # 3 . . . 3 ..... 3 ..... 3 *.... 3 *..* # 4. . . . 4 ... 4 ... 4 ... 4 ..* # 5 . 5 . 5 . 5 . # # * Optimized contraction of coset-Z tensor network (for example) by _row_ # using partial contraction of coset-I tensor network # # pauli with create transpose combine and contract # logical rotated TN (bra_i + 2-3 + ket_i) # # 0123456 012345 012345 012345 # 0. . z z 0 . 0 z 0 z # 1 . z . 1 ... 1 .z. 1 *z. # 2. z . . --> 2zzzzz --> 2..z.. --> 2 *z.* --> \pi # 3 z . . 3 ....z 3 .z... 3 *z.* # 4z . . . 4 ... 4 z.. 4 z.* # 5 . 5 z 5 z # # NOTE: logicals along major diagonal for various shape codes # # logical I logical X logical Y logical Z # # 01234 01234 01234 01234 # 0. . . 0x . . 0y . . 0z . . # 1 . . 1 x . 1 y . 1 z . # 2. . . 2. x . 2. y . 2. z . # 3 . . 3 . x 3 . y 3 . z # 4. . . 4. . x 4. . y 4. . z # # 0123456 0123456 0123456 0123456 # 0. . . . 0x . . . 0y . . . 0z . . . # 1 . . . 1 x . . 1 y . . 1 z . . # 2. . . . 2. x . . 2. y . . 2. z . . # 3 . . . 3 . x . 3 . y . 3 . z . # 4. . . . 4. . x . 4. . y z 4. . z z # # 01234 01234 01234 01234 # 0. . . 0x . . 0y . . 0z . . # 1 . . 1 x . 1 y . 1 z . # 2. . . 2. x . 2. y . 2. z . # 3 . . 3 . x 3 . y 3 . z # 4. . . 4. . x 4. . y 4. . z # 5 . . 5 . . 5 . . 5 . . # 6. . . 6. . x 6. . x 4. . . # def _logical_x(pauli, major=True): """return pauli after applying X along the major/minor diagonal""" max_row, max_col = pauli.code.bounds # define site indices site_indices = itertools.chain( zip(range(max_row + 1), range(max_col + 1)), # along major diagonal ((r, max_col) for r in range(max_col + 2, max_row + 1, 2)), # down rightmost column ) # if not major, switch to minor diagonal if not major: site_indices = ((max_row - r, c) for r, c in site_indices) # apply X on sites return pauli.site('X', *site_indices) def _logical_z(pauli, major=True): """return pauli after applying Z along the major/minor diagonal""" max_row, max_col = pauli.code.bounds # define site indices site_indices = itertools.chain( zip(range(max_row + 1), range(max_col + 1)), # along major diagonal ((max_row, c) for c in range(max_row + 2, max_col + 1, 2)), # across bottom row ) # if not major, switch to minor diagonal if not major: site_indices = ((max_row - r, c) for r, c in site_indices) # apply X on sites return pauli.site('Z', *site_indices) def _tn_contract_optimized(code, coset_ps, tns, mask): """update coset_ps with optimized contraction of tns""" # left_stop left_stop = min(code.size) - 1 # note: for optimization we contract tn_i from left to left_stop as bra common to all cosets bra_i, bra_i_mult = tt.mps2d.contract(tns[0], chi=self._chi, tol=self._tol, mask=mask, stop=left_stop) # right_stop right_stop = tns[0].shape[1] - min(code.size) # note: for optimization we contract tn_i from right to right_stop as ket common to all cosets ket_i, ket_i_mult = tt.mps2d.contract(tns[0], chi=self._chi, tol=self._tol, mask=mask, start=-1, stop=right_stop, step=-1) # for each tn, combine and contract to coset probability for j in range(len(tns)): # combine bra_i, tn_j[:, left_stop:right_stop + 1], ket_i as partially contracted tn partial_tn = np.column_stack( (bra_i, tns[j][:, left_stop:right_stop + 1], ket_i)) # slice mask to match partially contracted tn partial_mask = None if mask is None else mask[:, left_stop - 1:right_stop + 2] # contract result = tt.mps2d.contract(partial_tn, chi=self._chi, tol=self._tol, mask=partial_mask, step=-1) # multiply by multipliers coset_ps[j] = result * bra_i_mult * ket_i_mult # NOTE: all list/tuples in this method are ordered (i, x, y, z) # empty log warnings log_warnings = [] # tensor networks: tn_i is common to both contraction by column and by row (after transposition) tn_i = self._tnc.create_tn(prob_dist, sample_pauli) mask = self._tnc.create_mask(self._stp, tn_i.shape) # same mask for all tns # probabilities coset_ps = (0.0, 0.0, 0.0, 0.0) # default coset probabilities coset_ps_col = coset_ps_row = None # undefined coset probabilities by column and row # N.B. After multiplication by mult, coset_ps will be of type mp.mpf so don't process with numpy! if self._mode in ('c', 'a'): # note: for optimization we choose cosets to differ only on the major diagonal sample_x = _logical_x(sample_pauli.copy()) tns = [ tn_i, self._tnc.create_tn(prob_dist, sample_x), self._tnc.create_tn(prob_dist, _logical_z(sample_x.copy())), self._tnc.create_tn(prob_dist, _logical_z(sample_pauli.copy())) ] # evaluate coset probabilities by column coset_ps_col = [0.0, 0.0, 0.0, 0.0] # default coset probabilities try: _tn_contract_optimized(sample_pauli.code, coset_ps_col, tns, mask) except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION BY COL FAILED: {!r}'.format(ex)) # treat nan as inf so it doesn't get lost coset_ps_col = [ mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_col ] if self._mode in ('r', 'a'): # note: for optimization we choose cosets to differ only on the minor diagonal sample_x = _logical_x(sample_pauli.copy(), major=False) tns = [ tn_i, self._tnc.create_tn(prob_dist, sample_x), self._tnc.create_tn(prob_dist, _logical_z(sample_x.copy(), major=False)), self._tnc.create_tn( prob_dist, _logical_z(sample_pauli.copy(), major=False)) ] # evaluate coset probabilities by row coset_ps_row = [0.0, 0.0, 0.0, 0.0] # default coset probabilities # transpose tensor networks tns = [tt.mps2d.transpose(tn) for tn in tns] mask = None if mask is None else mask.transpose() try: _tn_contract_optimized(sample_pauli.code, coset_ps_row, tns, mask) except (ValueError, np.linalg.LinAlgError) as ex: log_warnings.append( 'CONTRACTION BY ROW FAILED: {!r}'.format(ex)) # treat nan as inf so it doesn't get lost coset_ps_row = [ mp.inf if mp.isnan(coset_p) else coset_p for coset_p in coset_ps_row ] if self._mode == 'c': coset_ps = coset_ps_col elif self._mode == 'r': coset_ps = coset_ps_row elif self._mode == 'a': # average coset probabilities coset_ps = [ sum(coset_p) / len(coset_p) for coset_p in zip(coset_ps_col, coset_ps_row) ] # logging if log_warnings: log_data = { # instance 'decoder': repr(self), # method parameters 'prob_dist': prob_dist, 'sample_pauli': pt.pack(sample_pauli.to_bsf()), # variables (convert to string because mp.mpf) 'coset_ps_col': [repr(p) for p in coset_ps_col] if coset_ps_col else None, 'coset_ps_row': [repr(p) for p in coset_ps_row] if coset_ps_row else None, 'coset_ps': [repr(p) for p in coset_ps], } logger.warning('{}: {}'.format( ' | '.join(log_warnings), json.dumps(log_data, sort_keys=True))) # results sample_paulis = (sample_pauli, sample_pauli.copy().logical_x(), sample_pauli.copy().logical_x().logical_z(), sample_pauli.copy().logical_z()) return tuple(coset_ps), sample_paulis