def test_decode_result(): # valid options no error raised DecodeResult(success=True) DecodeResult(success=True, logical_commutations=np.array([0, 0])) DecodeResult(recovery=np.array([0, 0, 0, 0])) DecodeResult(success=True, recovery=np.array([0, 0, 0, 0])) DecodeResult(logical_commutations=np.array([0, 0]), recovery=np.array([0, 0, 0, 0])) DecodeResult(success=True, logical_commutations=np.array([0, 0]), recovery=np.array([0, 0, 0, 0])) with pytest.raises(QecsimError): # at least one of success and recovery must be specified DecodeResult() # raises expected error with pytest.raises(QecsimError): # at least one of success and recovery must be specified DecodeResult(logical_commutations=np.array([0, 0])) # raises expected error
def _run_once_defp(mode, code, time_steps, error_model, decoder, error_probability, perm_rates, perm_mat, perm_vec, code_name, layout, measurement_error_probability, rng): """Implements run_once and run_once_ftp functions""" # assumptions assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp' if code_name[:6] == 'random': perm_mat, perm_vec = deform_matsvecs(code, decoder, error_model, perm_rates, code_name, layout) # generate step_error,step_syndrome and step_measurement_error for each time step n_qubits = code.n_k_d[0] for _ in range(time_steps): # hadamard_mat,hadamard_vec,XYperm_mat,XYperm_vec,ZYperm_mat,ZYperm_vec= deform_matsvecs(code,decoder,error_model) step_errors, step_syndromes, step_measurement_errors = [], [], [] rng = np.random.default_rng() if rng is None else rng error_Pauli = rng.choice( ('I', 'X', 'Y', 'Z'), size=n_qubits, p=error_model.probability_distribution(error_probability)) step_error = permute_error_Pauli(error_Pauli, perm_vec) step_errors.append(step_error) # step_syndrome: stabilizers that do not commute with the error step_syndrome = pt.bsp(step_error, code.stabilizers.T) step_syndromes.append(step_syndrome) # step_measurement_error: random syndrome bit flips based on measurement_error_probability if measurement_error_probability: step_measurement_error = rng.choice( (0, 1), size=step_syndrome.shape, p=(1 - measurement_error_probability, measurement_error_probability)) else: step_measurement_error = np.zeros(step_syndrome.shape, dtype=int) step_measurement_errors.append(step_measurement_error) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: step_errors={}'.format(step_errors)) logger.debug('run: step_syndromes={}'.format(step_syndromes)) logger.debug( 'run: step_measurement_errors={}'.format(step_measurement_errors)) # error: sum of errors at each time step error = np.bitwise_xor.reduce(step_errors) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: error={}'.format(error)) # syndrome: apply measurement_error at times t-1 and t to syndrome at time t syndrome = [] for t in range(time_steps): syndrome.append(step_measurement_errors[t - 1] ^ step_syndromes[t] ^ step_measurement_errors[t]) # convert syndrome to 2d numpy array syndrome = np.array(syndrome) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: syndrome={}'.format(syndrome)) # decoding: boolean or best match recovery operation based on decoder ctx = { 'error_model': error_model, 'error_probability': error_probability, 'error': error, 'step_errors': step_errors, 'measurement_error_probability': measurement_error_probability, 'step_measurement_errors': step_measurement_errors } # convert syndrome to 1d if mode is 'ideal' if mode == 'ideal': # convert syndrome to 1d and call decode decoding = decoder.decode(code, perm_mat, syndrome[0], **ctx) if mode == 'ftp': # call decode_ftp decoding = decoder.decode_ftp(code, time_steps, syndrome, **ctx) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: decoding={}'.format(decoding)) # if decoding is not DecodeResult,convert to DecodeResult if not isinstance(decoding, DecodeResult): # decoding is recovery,so wrap in DecodeResult decoding = DecodeResult( recovery=decoding) # raises error if recovery is None # extract outcomes from decoding success = decoding.success logical_commutations = decoding.logical_commutations custom_values = decoding.custom_values # if recovery specified,resolve success and logical_commutations if decoding.recovery is not None: # recovered code recovered = decoding.recovery[1] ^ error max_coset_p = decoding.recovery[0] # success checks commutes_with_stabilizers = np.all( pt.bsp(recovered, code.stabilizers.T) == 0) if not commutes_with_stabilizers: log_data = { # enough data to recreate issue # models 'code': repr(code),'error_model': repr(error_model),'decoder': repr(decoder), # variables 'error': pt.pack(error),'recovery': pt.pack(decoding.recovery), # step variables 'step_errors': [pt.pack(v) for v in step_errors], 'step_measurement_errors': [pt.pack(v) for v in step_measurement_errors], } logger.warning('RECOVERY DOES NOT RETURN TO CODESPACE: {}'.format( json.dumps(log_data, sort_keys=True))) resolved_logical_commutations = pt.bsp(recovered, code.logicals.T) commutes_with_logicals = np.all(resolved_logical_commutations == 0) resolved_success = commutes_with_stabilizers and commutes_with_logicals # fill in unspecified outcomes success = resolved_success if success is None else success logical_commutations = resolved_logical_commutations if logical_commutations is None else logical_commutations if logger.isEnabledFor(logging.DEBUG): logger.debug('run: success={}'.format(success)) logger.debug( 'run: logical_commutations={!r}'.format(logical_commutations)) logger.debug('run: custom_values={!r}'.format(custom_values)) data = { 'error_weight': pt.bsf_wt(np.array(step_errors)), 'success': bool(success), 'max_coset_p': max_coset_p, 'logical_commutations': logical_commutations, 'custom_values': custom_values, } return data
def decode_ftp(self, code, time_steps, syndrome, error_model=BitPhaseFlipErrorModel(), # noqa: B008 error_probability=0.1, measurement_error_probability=0.1, step_measurement_errors=None, **kwargs): """ See :meth:`qecsim.model.DecoderFTP.decode_ftp` Note: * The optional keyword parameters ``error_model`` and ``error_probability`` are used to determine the prior probability distribution for use in the decoding algorithm. Any provided error model must implement :meth:`~qecsim.model.ErrorModel.probability_distribution`. * This method always returns a ``DecodeResult`` with the following parameters:: DecodeResult( success=None, # None indicates to be evaluated by app # False indicates time-like logical failure (overrides evaluation by app) logical_commutations=None, # None indicates to be evaluated by app recovery=np.array(...), # recovery operation (used by app to evaluate success and logical_commutations) custom_values=np.array([0, 0]), # [0, 0] no time-like logical failure # [1, 0] time-like logical failure through X plaquettes # [0, 1] time-like logical failure through Z plaquettes # [1, 1] time-like logical failure through both X and Z plaquettes ) :param code: Rotated toric code. :type code: RotatedToricCode :param time_steps: Number of time steps. :type time_steps: int :param syndrome: Syndrome as binary array. :type syndrome: numpy.array (2d) :param error_model: Error model. (default=BitPhaseFlipErrorModel()) :type error_model: ErrorModel :param error_probability: Overall probability of an error on a single qubit. (default=0.1) :type error_probability: float :param measurement_error_probability: Overall probability of an error on a single measurement. (default=0.1) :type measurement_error_probability: float :param step_measurement_errors: list of measurement error bits applied to step-syndromes index by time-step. :type step_measurement_errors: list of numpy.array (1d) :return: Decode result. :rtype: DecodeResult """ # deduce bias (potentially overridden by eta) bias = self._bias(error_model) # IDENTITY RECOVERY AND T-PARITIES recovery = code.new_pauli().to_bsf() recovery_x_tp = 0 recovery_z_tp = 0 # SYMMETRY MATCHING # prepare graphs graphs = self._graphs(code, time_steps, syndrome, error_probability, measurement_error_probability, bias) # minimum weight matching matches = self._matching(graphs) del graphs # release heavy object # cluster matches clusters = self._clusters(matches) del matches # release heavy object # resolve symmetry recovery from fusing within clusters symmetry_recovery, symmetry_recovery_x_tp, symmetry_recovery_z_tp = self._recovery_tparities( code, time_steps, clusters) # add symmetry recovery and t-parities recovery ^= symmetry_recovery recovery_x_tp ^= symmetry_recovery_x_tp recovery_z_tp ^= symmetry_recovery_z_tp # RESIDUAL CLUSTER SYNDROME cluster_syndrome = np.bitwise_xor.reduce(syndrome) ^ pt.bsp(recovery, code.stabilizers.T) # warn if infinite bias and non-null cluster syndrome if bias is None and np.any(cluster_syndrome): logger.warning('UNEXPECTED CLUSTER SYNDROME WITH INFINITE BIAS') # CLUSTER RECOVERY # prepare cluster graph cluster_graph = self._cluster_graph(code, time_steps, clusters) del clusters # release heavy object # minimum weight matching cluster_matches = self._matching([cluster_graph]) del cluster_graph # release heavy object # resolve cluster recovery from fusing between clusters cluster_recovery, cluster_recovery_x_tp, cluster_recovery_z_tp = self._cluster_recovery_tparities( code, time_steps, cluster_matches) del cluster_matches # release heavy object # add cluster recovery and t-parities recovery ^= cluster_recovery recovery_x_tp ^= cluster_recovery_x_tp recovery_z_tp ^= cluster_recovery_z_tp # TEST T-PARITY if self._itp or time_steps == 1: if logger.isEnabledFor(logging.DEBUG): logger.debug('decode: ignoring t-parity. itp={}, time_steps={}'.format(self._itp, time_steps)) else: if logger.isEnabledFor(logging.DEBUG): logger.debug('decode: testing t-parity. itp={}, time_steps={}'.format(self._itp, time_steps)) if not step_measurement_errors: raise QecsimError('Failed to test t-parity. step_measurement_errors not provided.') # extract t-parity for measurement errors measurement_error_tps = self._measurement_error_tparities(code, step_measurement_errors[-1]) # total t-parity total_tps = np.array((recovery_x_tp, recovery_z_tp)) ^ measurement_error_tps # return false decode-result if t-parity fails, with time-parity as custom_values if np.any(total_tps != 0): return DecodeResult(success=False, recovery=recovery, custom_values=total_tps) # return recovery with zeros time parity custom values return DecodeResult(recovery=recovery, custom_values=np.array((0, 0)))
PlanarCode(2, 2).new_pauli().to_bsf(), {'success': True, 'logical_commutations': np.array([0, 0]), 'custom_values': None}), # logical X failure (PlanarCode(2, 2), PlanarCode(2, 2).new_pauli().to_bsf(), PlanarCode(2, 2).new_pauli().logical_x().to_bsf(), {'success': False, 'logical_commutations': np.array([0, 1]), 'custom_values': None}), # logical Z failure (PlanarCode(2, 2), PlanarCode(2, 2).new_pauli().to_bsf(), PlanarCode(2, 2).new_pauli().logical_z().to_bsf(), {'success': False, 'logical_commutations': np.array([1, 0]), 'custom_values': None}), # identity via decode-result (PlanarCode(2, 2), PlanarCode(2, 2).new_pauli().to_bsf(), DecodeResult(recovery=PlanarCode(2, 2).new_pauli().to_bsf()), {'success': True, 'logical_commutations': np.array([0, 0]), 'custom_values': None}), # logical X failure via decode-result (PlanarCode(2, 2), PlanarCode(2, 2).new_pauli().to_bsf(), DecodeResult(recovery=PlanarCode(2, 2).new_pauli().logical_x().to_bsf()), {'success': False, 'logical_commutations': np.array([0, 1]), 'custom_values': None}), # logical Z failure via decode-result (PlanarCode(2, 2), PlanarCode(2, 2).new_pauli().to_bsf(), DecodeResult(recovery=PlanarCode(2, 2).new_pauli().logical_z().to_bsf()), {'success': False, 'logical_commutations': np.array([1, 0]), 'custom_values': None}), # identity but override success (PlanarCode(2, 2), PlanarCode(2, 2).new_pauli().to_bsf(), DecodeResult(success=False, recovery=PlanarCode(2, 2).new_pauli().to_bsf()),
def _run_once_defN(mode, code,hadamard_mat, time_steps, error_model, decoder, n_errors_code, measurement_error_probability, rng): """Implements run_once and run_once_ftp functions""" # assumptions assert (mode == 'ideal' and time_steps == 1) or mode == 'ftp' # generate step_error, step_syndrome and step_measurement_error for each time step error_probability_sample=0.1 (pI,pX,pY,pZ)=error_model.probability_distribution(error_probability_sample) error_Pauli=[] error_Pauli.extend('X'*round(n_errors_code*pX/error_probability_sample)) error_Pauli.extend('Y'*round(n_errors_code*pY/error_probability_sample)) error_Pauli.extend('Z'*round(n_errors_code*pZ/error_probability_sample)) error_Pauli.extend('I'*(n_qubits-len(error_Pauli))) n_qubits = code.n_k_d[0] hadamard_vec=np.zeros(n_qubits) Nx=code.site_bounds[0]+1 Ny=code.site_bounds[1]+1 for i,j in np.ndindex(hadamard_mat.shape): if hadamard_mat[i,j]==1: hadamard_vec[j+(Ny-1-i)*Nx]=1 for _ in range(time_steps): step_errors, step_syndromes, step_measurement_errors = [], [], [] # step_error: random error based on error probability shuffle(error_Pauli) # for i in range(n_qubits): # if hadamard_vec[i]==1: # if error_Pauli[i]=='X': # error_Pauli[i]='Z' # elif error_Pauli[i]=='Z': # error_Pauli[i]='X' step_error=pt.pauli_to_bsf(''.join(error_Pauli)) # step_error = error_model.generate(code, error_probability, rng) for i in range(n_qubits): if hadamard_vec[i]==1: step_error_temp=step_error[i] step_error[i]=step_error[n_qubits+i] step_error[n_qubits+i]=step_error_temp step_errors.append(step_error) # step_syndrome: stabilizers that do not commute with the error step_syndrome = pt.bsp(step_error, code.stabilizers.T) step_syndromes.append(step_syndrome) # step_measurement_error: random syndrome bit flips based on measurement_error_probability if measurement_error_probability: step_measurement_error = rng.choice( (0, 1), size=step_syndrome.shape, p=(1 - measurement_error_probability, measurement_error_probability) ) else: step_measurement_error = np.zeros(step_syndrome.shape, dtype=int) step_measurement_errors.append(step_measurement_error) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: step_errors={}'.format(step_errors)) logger.debug('run: step_syndromes={}'.format(step_syndromes)) logger.debug('run: step_measurement_errors={}'.format(step_measurement_errors)) # error: sum of errors at each time step error = np.bitwise_xor.reduce(step_errors) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: error={}'.format(error)) # syndrome: apply measurement_error at times t-1 and t to syndrome at time t syndrome = [] for t in range(time_steps): syndrome.append(step_measurement_errors[t - 1] ^ step_syndromes[t] ^ step_measurement_errors[t]) # convert syndrome to 2d numpy array syndrome = np.array(syndrome) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: syndrome={}'.format(syndrome)) # decoding: boolean or best match recovery operation based on decoder ctx = {'error_model': error_model, 'n_errors_code': n_errors_code, 'error': error, 'step_errors': step_errors, 'measurement_error_probability': measurement_error_probability, 'step_measurement_errors': step_measurement_errors} # convert syndrome to 1d if mode is 'ideal' if mode == 'ideal': # convert syndrome to 1d and call decode decoding = decoder.decode(code,hadamard_mat, syndrome[0], **ctx) if mode == 'ftp': # call decode_ftp decoding = decoder.decode_ftp(code, time_steps, syndrome, **ctx) if logger.isEnabledFor(logging.DEBUG): logger.debug('run: decoding={}'.format(decoding)) # if decoding is not DecodeResult, convert to DecodeResult if not isinstance(decoding, DecodeResult): # decoding is recovery, so wrap in DecodeResult decoding = DecodeResult(recovery=decoding) # raises error if recovery is None # extract outcomes from decoding success = decoding.success logical_commutations = decoding.logical_commutations custom_values = decoding.custom_values # if recovery specified, resolve success and logical_commutations if decoding.recovery is not None: # recovered code recovered = decoding.recovery ^ error # success checks commutes_with_stabilizers = np.all(pt.bsp(recovered, code.stabilizers.T) == 0) if not commutes_with_stabilizers: log_data = { # enough data to recreate issue # models 'code': repr(code), 'error_model': repr(error_model), 'decoder': repr(decoder), # variables 'error': pt.pack(error), 'recovery': pt.pack(decoding.recovery), # step variables 'step_errors': [pt.pack(v) for v in step_errors], 'step_measurement_errors': [pt.pack(v) for v in step_measurement_errors], } logger.warning('RECOVERY DOES NOT RETURN TO CODESPACE: {}'.format(json.dumps(log_data, sort_keys=True))) resolved_logical_commutations = pt.bsp(recovered, code.logicals.T) commutes_with_logicals = np.all(resolved_logical_commutations == 0) resolved_success = commutes_with_stabilizers and commutes_with_logicals # fill in unspecified outcomes success = resolved_success if success is None else success logical_commutations = resolved_logical_commutations if logical_commutations is None else logical_commutations if logger.isEnabledFor(logging.DEBUG): logger.debug('run: success={}'.format(success)) logger.debug('run: logical_commutations={!r}'.format(logical_commutations)) logger.debug('run: custom_values={!r}'.format(custom_values)) data = { 'error_weight': pt.bsf_wt(np.array(step_errors)), 'success': bool(success), 'logical_commutations': logical_commutations, 'custom_values': custom_values, } return data