Exemplo n.º 1
0
def test_planar_mps_decoder_positive_max_coset_probability(mode):
    # parameters
    code = PlanarCode(9, 9)
    decoder = PlanarMPSDecoder(chi=48, mode=mode)
    error_model = BiasedDepolarizingErrorModel(bias=100)
    error_probability = 0.41
    # logged run values
    error = pt.unpack([
        "c96aa012210dc2254031f15d9ce80c871fb864b510c91086e112a018f8aece7406638fdc00",
        290
    ])
    syndrome = pt.unpack(["8f59cd273bd1c027b3b925085af85f2aaf22", 144])
    assert np.array_equal(syndrome, pt.bsp(error, code.stabilizers.T))
    # debug
    # print(code.ascii_art(syndrome, code.new_pauli(error)))
    # decode
    prob_dist = error_model.probability_distribution(error_probability)
    any_recovery = decoder.sample_recovery(code, syndrome)
    # coset probabilities
    coset_ps, recoveries = decoder._coset_probabilities(
        prob_dist, any_recovery)
    print('mode={}, coset_ps={}'.format(mode, coset_ps))
    max_coset_p, max_recovery = max(
        zip(coset_ps, recoveries),
        key=lambda coset_p_recovery: coset_p_recovery[0])
    success = np.all(
        pt.bsp(max_recovery.to_bsf() ^ error, code.logicals.T) == 0)
    print('### success=', success)
    assert mp.isfinite(
        max_coset_p
    ) and max_coset_p > 0, 'Max coset probability not as expected'
Exemplo n.º 2
0
def test_planar_mps_decoder_small_code_negative_coset_probability(chi, mode):
    # parameters
    code = PlanarCode(3, 3)
    decoder = PlanarMPSDecoder(chi=chi, mode=mode)
    error_model = DepolarizingErrorModel()
    error_probability = 0.1
    # logged run values
    error = pt.unpack(["e0048000", 26])
    syndrome = pt.bsp(error, code.stabilizers.T)
    # debug
    print()
    print(code.ascii_art(syndrome, code.new_pauli(error)))
    # decode
    prob_dist = error_model.probability_distribution(error_probability)
    any_recovery = decoder.sample_recovery(code, syndrome)
    # coset probabilities
    coset_ps, recoveries = decoder._coset_probabilities(
        prob_dist, any_recovery)
    print('chi={}, mode={}, coset_ps={}'.format(chi, mode, coset_ps))
    max_coset_p, max_recovery = max(
        zip(coset_ps, recoveries),
        key=lambda coset_p_recovery: coset_p_recovery[0])
    success = np.all(
        pt.bsp(max_recovery.to_bsf() ^ error, code.logicals.T) == 0)
    print('### success=', success)
    assert mp.isfinite(
        max_coset_p
    ) and max_coset_p > 0, 'Max coset probability not as expected'
    assert np.all(
        np.array(coset_ps) >= 0), 'At least one coset probability is negative'
Exemplo n.º 3
0
    def _residual_recovery(cls, code, syndrome):
        """
        Return residual recovery consistent with (lower boundary) syndrome (if possible).

        :param code: Planar code
        :type code: PlanarCode
        :param syndrome: Lower boundary syndrome as binary vector.
        :type syndrome: numpy.array (1d)
        :return: Residual recovery operation in binary symplectic form.
        :rtype: numpy.array (1d)
        """
        try:
            # get residual recovery from map
            return pt.unpack(
                cls._residual_syndrome_to_recovery_map(code)[pt.pack(
                    syndrome)])
        except KeyError:
            # N.B. this should not happen if a pure Y-noise model is used
            log_data = {
                # parameters
                'code': repr(code),
                'syndrome': pt.pack(syndrome),
            }
            logger.warning('RESIDUAL RECOVERY NOT FOUND: {}'.format(
                json.dumps(log_data, sort_keys=True)))
            # return identity
            return code.new_pauli().to_bsf()
Exemplo n.º 4
0
def test_pack_unpack_random():
    rng = np.random.default_rng()
    for length in range(0, 5000):
        binary_array = rng.choice(2, length)
        packed_binary_array = pt.pack(binary_array)
        unpacked_binary_array = pt.unpack(packed_binary_array)
        assert np.array_equal(binary_array, unpacked_binary_array), (
            'Unpacked binary array {} does not equal expected {}.'.format(
                unpacked_binary_array, binary_array))
Exemplo n.º 5
0
def test_planar_mps_decoder_decode_logging_nonpositivefinite_max_coset_probability(
        caplog):
    # taken from corner case mode='a' of test_planar_mps_decoder_positive_max_coset_probability
    code = PlanarCode(9, 9)
    decoder = PlanarMPSDecoder(chi=48, mode='a')
    error_model = BiasedDepolarizingErrorModel(bias=100)
    error_probability = 0.41
    error = pt.unpack([
        "c96aa012210dc2254031f15d9ce80c871fb864b510c91086e112a018f8aece7406638fdc00",
        290
    ])
    syndrome = pt.unpack(["8f59cd273bd1c027b3b925085af85f2aaf22", 144])
    assert np.array_equal(syndrome, pt.bsp(error, code.stabilizers.T))
    decoder.decode(code,
                   syndrome,
                   error_model=error_model,
                   error_probability=error_probability)
    assert 'NON-POSITIVE-FINITE MAX COSET PROBABILITY' in caplog.text, (
        'Non-positive-finite max coset probability not logged')
Exemplo n.º 6
0
def test_pack_unpack_65bit():
    binary_array = np.array([
        0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0,
        0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1,
        1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1
    ])
    print(len(binary_array))
    packed_binary_array = pt.pack(binary_array)
    unpacked_binary_array = pt.unpack(packed_binary_array)
    assert np.array_equal(binary_array, unpacked_binary_array), (
        'Unpacked binary array {} does not equal expected {}.'.format(
            unpacked_binary_array, binary_array))
Exemplo n.º 7
0
def test_planar_mps_decoder_svd_does_not_converge():
    code = PlanarCode(21, 21)
    decoder = PlanarMPSDecoder(chi=4)
    error = pt.unpack((
        '001281500200080080000000000080001000000c0000002012000000801040004000000100000000004000002100000800800000000000'
        '02000100028022001000002044841000080080008110020000400801200000801040112008010004400000000000000002000000402201'
        '10040000000000000481000200000601000080080000000820200020000000008820000100000010045000004000010000000000000000'
        '40010000840010200008000400024000880000000004000000004000200890040001082000000000000002000000',
        1682))
    syndrome = pt.bsp(error, code.stabilizers.T)
    recovery = decoder.decode(code, syndrome)  # no error raised
    assert np.all(pt.bsp(recovery ^ error, code.stabilizers.T) == 0), (
        'recovery ^ error ({} ^ {}) does not commute with stabilizers.'.format(
            recovery, error))
Exemplo n.º 8
0
    def generate(self, code, probability, rng=None):
        """
        Return next error from file.

        :param code: Stabilizer code.
        :type code: StabilizerCode
        :param probability: Overall probability of an error on a single qubit.
        :type probability: float
        :param rng: Random number generator. (default=None, ignored)
        :type rng: numpy.random.Generator
        :return: Next error as binary symplectic vector.
        :rtype: numpy.array (1d)
        :raises ValueError: if probability does not equal probability given in file header.
        :raises EOFError: if next error unavailable.
        :raises ValueError: if file parsing fails.
        :raises ValueError: if length of error is inconsistent with number of qubits in code.
        """
        if probability != self._probability:
            raise ValueError("Probability does not match probability given in file header")
        # pull error, unpack and validate
        error = pt.unpack(self._json_lines.pull())
        if len(error) != 2 * code.n_k_d[0]:
            raise ValueError("Length of error inconsistent with number of qubits in code")
        return error
Exemplo n.º 9
0
    def _residual_syndrome_to_recovery_map(cls, code):
        """
        Return map of residual syndromes to residual recovery operators.

        NOTE: In order to reduce the number of residual syndromes, they are placed on the smaller boundary. That is,
        for a p x q code, if p < q then syndrome bits are pushed to right boundary, otherwise to lower boundary.

        NOTE: For memory efficiency, syndromes and recoveries stored in the map in packed format, see
        :func:`qecsim.paulitools.pack`.

        :param code: Planar code
        :type code: PlanarCode
        :return: Residual syndrome to recovery operator map.
        :rtype: dict of packed bsf to packed bsf
        """
        # initialise return values
        residual_map = {}

        def _add(residual_recoveries, skip_trivial):
            """
            Add residual recoveries to residual_map keyed by syndrome as tuple.

            NOTE: This method processes recoveries in chunks so it is memory efficient to pass an iterator.

            :param residual_recoveries: Residual recoveries in bsf
            :type residual_recoveries: iterator of numpy.array (1d)
            :param skip_trivial: Do not add recoveries with trivial syndromes
            :type skip_trivial: bool
            """
            for chunk in util.chunker(residual_recoveries,
                                      PlanarYDecoder.CHUNK_LEN):
                residual_recoveries_chunk = np.array(tuple(chunk))
                # residual syndromes
                residual_syndromes_chunk = pt.bsp(residual_recoveries_chunk,
                                                  code.stabilizers.T)
                # add to map
                for syndrome, recovery in zip(residual_syndromes_chunk,
                                              residual_recoveries_chunk):
                    if not (skip_trivial and not np.any(syndrome)):
                        residual_map.setdefault(pt.pack(syndrome),
                                                pt.pack(recovery))

        # N.B. we add identity at the end so it is not included in the all possible products part
        # snake fill to trigger syndrome bits on one boundary
        if code.size[0] < code.size[1]:  # push syndrome bits to right boundary
            # add snake-fill-right operators for each edge on left boundary
            _add((cls._snake_fill(code, (start_r, 0), down=False)
                  for start_r in range(0, code.bounds[0] + 1, 2)),
                 skip_trivial=True)
        else:  # push syndrome bits to lower boundary
            # add snake-fill-down operators for each edge on upper boundary
            _add((cls._snake_fill(code, (0, start_c), down=True)
                  for start_c in range(0, code.bounds[1] + 1, 2)),
                 skip_trivial=True)
        # add product of all combinations of operators (any length, without repetition)
        operators = list(
            pt.unpack(o)
            for o in residual_map.values())  # copy of operators in map so far
        _add((np.sum(operator_set, axis=0) % 2
              for operator_set in itertools.chain.from_iterable(
                  itertools.combinations(operators, n_operators)
                  for n_operators in range(1,
                                           len(operators) + 1))),
             skip_trivial=True)
        # add identity
        _add([code.new_pauli().to_bsf()], skip_trivial=False)
        # return map
        return residual_map