Beispiel #1
0
def remove_swap(circuit: Circuit) -> tuple[Circuit, dict[any, any]]:
    """
    Iteratively remove SWAP's from circuit by actually swapping qubits.
    The output map will have the form new_qubit -> old_qubit.
    """

    # Initialize map
    _qubits_map = {q: q for q in circuit.all_qubits()}

    # Initialize circuit
    _circ = Circuit()

    # Get ideal SWAP
    _SWAP = Gate('SWAP').matrix()

    # For each gate in circuit ..
    for gate in circuit:

        # Check if gate is close to SWAP
        if gate.n_qubits == 2 and gate.qubits and np.allclose(
                gate.matrix(), _SWAP):

            # If true, swap qubits
            _q0 = next(k for k, v in _qubits_map.items() if v == gate.qubits[0])
            _q1 = next(k for k, v in _qubits_map.items() if v == gate.qubits[1])
            _qubits_map[_q0], _qubits_map[_q1] = _qubits_map[_q1], _qubits_map[
                _q0]

        # Otherwise, remap qubits and append
        else:

            # Get the right qubits
            _qubits = [
                next(k
                     for k, v in _qubits_map.items()
                     if v == q)
                for q in gate.qubits
            ]

            # Append to the new circuit
            _circ.append(gate.on(_qubits))

    # Return circuit and map
    return _circ, _qubits_map
Beispiel #2
0
def _simulate_tn_mpi(circuit: Circuit, initial_state: any, final_state: any,
                     optimize: any, backend: any, complex_type: any,
                     tensor_only: bool, verbose: bool, **kwargs):
    import quimb.tensor as tn
    import cotengra as ctg

    # Get MPI
    _mpi_comm = MPI.COMM_WORLD
    _mpi_size = _mpi_comm.Get_size()
    _mpi_rank = _mpi_comm.Get_rank()

    # Set default parameters
    kwargs.setdefault('compress', 2)
    kwargs.setdefault('simplify_tn', 'RC')
    kwargs.setdefault('max_iterations', 1)
    kwargs.setdefault('methods', ['kahypar', 'greedy'])
    kwargs.setdefault('max_time', 120)
    kwargs.setdefault('max_repeats', 16)
    kwargs.setdefault('minimize', 'combo')
    kwargs.setdefault('target_largest_intermediate', 0)
    kwargs.setdefault('max_largest_intermediate', 2**26)
    kwargs.setdefault('temperatures', [1.0, 0.1, 0.01])
    kwargs.setdefault('parallel', None)
    kwargs.setdefault('cotengra', {})
    kwargs.setdefault('max_n_slices', None)
    kwargs.setdefault('return_info', False)

    # Get random leaves_prefix
    leaves_prefix = ''.join(
        np.random.choice(list('abcdefghijklmnopqrstuvwxyz'), size=20))

    # Initialize info
    _sim_info = {}

    # Alias for tn
    if optimize == 'tn':
        optimize = 'cotengra'

    if isinstance(circuit, Circuit):

        if not kwargs['parallel']:
            kwargs['parallel'] = 1
        else:
            # If number of threads not provided, just use half of the number of available cpus
            if isinstance(kwargs['parallel'],
                          bool) and kwargs['parallel'] == True:
                kwargs['parallel'] = cpu_count() // 2

        if optimize is not None and kwargs['parallel'] and kwargs[
                'max_iterations'] == 1:
            warn("Parallelization for MPI works for multiple iterations only. "
                 "For a better performance, use: 'max_iterations' > 1")

        # Get number of qubits
        qubits = circuit.all_qubits()
        n_qubits = len(qubits)

        # If initial/final state is None, set to all .'s
        initial_state = '.' * n_qubits if initial_state is None else initial_state
        final_state = '.' * n_qubits if final_state is None else final_state

        # Initial and final states must be valid strings
        for state, sname in [(initial_state, 'initial_state'),
                             (final_state, 'final_state')]:
            # Get alphabet
            from string import ascii_letters

            # Check if string
            if not isinstance(state, str):
                raise ValueError(f"'{sname}' must be a valid string.")

            # Deprecated error
            if any(x in 'xX' for x in state):
                from warnings import warn

                # Define new DeprecationWarning (to always print the warning
                # signal)
                class DeprecationWarning(Warning):
                    pass

                # Warn the user that '.' is used to represent open qubits
                warn(
                    "Since '0.6.3', letters in the alphabet are used to "
                    "trace selected qubits (including 'x' and 'X'). "
                    "Instead, '.' is used to represent an open qubit.",
                    DeprecationWarning)

            # Check only valid symbols are present
            if set(state).difference('01+-.' + ascii_letters):
                raise ValueError(f"'{sname}' contains invalid symbols.")

            # Check number of qubits
            if len(state) != n_qubits:
                raise ValueError(f"'{sname}' has the wrong number of qubits "
                                 f"(expected {n_qubits}, got {len(state)})")

        # Check memory
        if 2**(initial_state.count('.') +
               final_state.count('.')) > kwargs['max_largest_intermediate']:
            raise MemoryError("Memory for the given number of open qubits "
                              "exceeds the 'max_largest_intermediate'.")

        # Compress circuit
        if kwargs['compress']:
            if verbose:
                print(
                    f"Compress circuit (max_n_qubits={kwargs['compress']}): ",
                    end='',
                    file=stderr)
                _time = time()

            circuit = utils.compress(
                circuit,
                kwargs['compress']['max_n_qubits'] if isinstance(
                    kwargs['compress'], dict) else kwargs['compress'],
                verbose=verbose,
                **({
                    k: v
                    for k, v in kwargs['compress'].items()
                    if k != 'max_n_qubits'
                } if isinstance(kwargs['compress'], dict) else {}))

            circuit = Circuit(
                utils.to_matrix_gate(c, complex_type=complex_type)
                for c in circuit)
            if verbose:
                print(f"Done! ({time()-_time:1.2f}s)", file=stderr)

        # Get tensor network representation of circuit
        tensor, tn_qubits_map = utils.to_tn(circuit,
                                            return_qubits_map=True,
                                            leaves_prefix=leaves_prefix)

        # Define basic MPS
        _mps = {
            '0': np.array([1, 0]),
            '1': np.array([0, 1]),
            '+': np.array([1, 1]) / np.sqrt(2),
            '-': np.array([1, -1]) / np.sqrt(2)
        }

        # Attach initial/final state
        for state, ext in [(initial_state, 'i'), (final_state, 'f')]:
            for s, q in ((s, q) for s, q in zip(state, qubits) if s in _mps):
                inds = [f'{leaves_prefix}_{tn_qubits_map[q]}_{ext}']
                tensor &= tn.Tensor(_mps[s], inds=inds, tags=inds)

        # For each unique letter, apply trace
        for x in set(initial_state + final_state).difference(''.join(_mps) +
                                                             '.'):
            # Get indexes
            inds = [
                f'{leaves_prefix}_{tn_qubits_map[q]}_i'
                for s, q in zip(initial_state, qubits) if s == x
            ]
            inds += [
                f'{leaves_prefix}_{tn_qubits_map[q]}_f'
                for s, q in zip(final_state, qubits) if s == x
            ]

            # Apply trace
            tensor &= tn.Tensor(np.reshape([1] + [0] * (2**len(inds) - 2) +
                                           [1], (2, ) * len(inds)),
                                inds=inds)

        # Simplify if requested
        if kwargs['simplify_tn']:
            tensor.full_simplify_(kwargs['simplify_tn']).astype_(complex_type)
        else:
            # Otherwise, just convert to the given complex_type
            tensor.astype_(complex_type)

        # Get contraction from heuristic
        if optimize == 'cotengra' and kwargs['max_iterations'] > 0:

            # Set cotengra parameters
            def cotengra_params():
                # Get HyperOptimizer
                q = ctg.HyperOptimizer(methods=kwargs['methods'],
                                       max_time=kwargs['max_time'],
                                       max_repeats=kwargs['max_repeats'],
                                       minimize=kwargs['minimize'],
                                       progbar=False,
                                       parallel=False,
                                       **kwargs['cotengra'])

                # For some optlib, HyperOptimizer._retrieve_params is not
                # pickeable. Let's fix the problem by hand.
                q._retrieve_params = __FunctionWrap(q._retrieve_params)

                # Return HyperOptimizer
                return q

            # Get target size
            tli = kwargs['target_largest_intermediate']

            with Pool(kwargs['parallel']) as pool:

                # Sumbit jobs
                _opts = [
                    cotengra_params() for _ in range(kwargs['max_iterations'])
                ]
                _map = [
                    pool.apply_async(tensor.contract, (all, ),
                                     dict(optimize=_opt, get='path-info'))
                    for _opt in _opts
                ]

                with tqdm(total=len(_map),
                          disable=not verbose,
                          desc='Collecting contractions') as pbar:

                    _old_completed = 0
                    while 1:

                        # Count number of completed
                        _completed = 0
                        for _w in _map:
                            _completed += _w.ready()
                            if _w.ready() and not _w.successful():
                                _w.get()

                        # Update pbar
                        pbar.update(_completed - _old_completed)
                        _old_completed = _completed

                        if _completed == len(_map):
                            break

                        # Wait
                        sleep(1)

                # Collect results
                _infos = [_w.get() for _w in _map]

            if kwargs['minimize'] == 'size':
                opt, info = sort(
                    zip(_opts, _infos),
                    key=lambda w:
                    (w[1].largest_intermediate, w[0].best['flops']))[0]
            else:
                opt, info = sort(
                    zip(_opts, _infos),
                    key=lambda w:
                    (w[0].best['flops'], w[1].largest_intermediate))[0]

        if optimize == 'cotengra':

            # Gather best contractions
            _cost = _mpi_comm.gather(
                (info.largest_intermediate, info.opt_cost, _mpi_rank), root=0)
            if _mpi_rank == 0:
                if kwargs['minimize'] == 'size':
                    _best_rank = sort(_cost, key=lambda x: (x[0], x[1]))[0][-1]
                else:
                    _best_rank = sort(_cost, key=lambda x: (x[1], x[0]))[0][-1]
            else:
                _best_rank = None
            _best_rank = _mpi_comm.bcast(_best_rank, root=0)

            if hasattr(opt, '_pool'):
                del (opt._pool)

            # Distribute opt/info
            tensor, info, opt = _mpi_comm.bcast((tensor, info, opt),
                                                root=_best_rank)

        # Just return tensor if required
        if tensor_only:
            if optimize == 'cotengra' and kwargs['max_iterations'] > 0:
                return tensor, (info, opt)
            else:
                return tensor

    else:

        # Set tensor
        tensor = circuit

        if len(optimize) == 2 and isinstance(
                optimize[0], PathInfo) and isinstance(
                    optimize[1], ctg.hyper.HyperOptimizer):

            # Get info and opt from optimize
            info, opt = optimize

            # Set optimization
            optimize = 'cotengra'

        else:

            # Get tensor and path
            tensor = circuit

    # Print some info
    if verbose and _mpi_rank == 0:
        print(
            f'Largest Intermediate: 2^{np.log2(float(info.largest_intermediate)):1.2f}',
            file=stderr)
        print(
            f'Max Largest Intermediate: 2^{np.log2(float(kwargs["max_largest_intermediate"])):1.2f}',
            file=stderr)
        print(f'Flops: 2^{np.log2(float(info.opt_cost)):1.2f}', file=stderr)

    if optimize == 'cotengra':

        if _mpi_rank == 0:

            # Get indexes
            _inds = tensor.outer_inds()

            # Get input indexes and output indexes
            _i_inds = sort([x for x in _inds if x[-2:] == '_i'],
                           key=lambda x: int(x.split('_')[1]))
            _f_inds = sort([x for x in _inds if x[-2:] == '_f'],
                           key=lambda x: int(x.split('_')[1]))

            # Get order
            _inds = [_inds.index(x) for x in _i_inds + _f_inds]

            # Get slice finder
            sf = ctg.SliceFinder(
                info,
                target_size=kwargs['max_largest_intermediate'],
                allow_outer=False)

            # Find slices
            with tqdm(kwargs['temperatures'], disable=not verbose,
                      leave=False) as pbar:
                for _temp in pbar:
                    pbar.set_description(f'Find slices (T={_temp})')
                    ix_sl, cost_sl = sf.search(temperature=_temp)

            # Get slice contractor
            sc = sf.SlicedContractor([t.data for t in tensor])

            # Make sure that no open qubits are sliced
            assert (not {
                ix: i
                for i, ix in enumerate(sc.output) if ix in sc.sliced
            })

            # Print some infos
            if verbose:
                print(
                    f'Number of slices: 2^{np.log2(float(cost_sl.nslices)):1.2f}',
                    file=stderr)
                print(
                    f'Flops+Cuts: 2^{np.log2(float(cost_sl.total_flops)):1.2f}',
                    file=stderr)

            # Update infos
            _sim_info.update({
                'flops': info.opt_cost,
                'largest_intermediate': info.largest_intermediate,
                'n_slices': cost_sl.nslices,
                'total_flops': cost_sl.total_flops
            })

            # Get slices
            slices = list(range(cost_sl.nslices + 1)) + [None] * (
                _mpi_size -
                cost_sl.nslices) if cost_sl.nslices < _mpi_size else [
                    cost_sl.nslices / _mpi_size * i for i in range(_mpi_size)
                ] + [cost_sl.nslices]
            if not np.alltrue(
                [int(x) == x
                 for x in slices if x is not None]) or not np.alltrue([
                     slices[i] < slices[i + 1] for i in range(_mpi_size)
                     if slices[i] is not None and slices[i + 1] is not None
                 ]):
                raise RuntimeError('Something went wrong')

            # Convert all to integers
            slices = [int(x) if x is not None else None for x in slices]

        else:

            sc = slices = None

        # Distribute slicer and slices
        sc, slices = _mpi_comm.bcast((sc, slices), root=0)

        _n_slices = max(x for x in slices if x)
        if kwargs['max_n_slices'] and _n_slices > kwargs['max_n_slices']:
            raise RuntimeError(
                f'Too many slices ({_n_slices} > {kwargs["max_n_slices"]})')

        # Contract slices
        _tensor = None
        if slices[_mpi_rank] is not None and slices[_mpi_rank + 1] is not None:
            for i in tqdm(range(slices[_mpi_rank], slices[_mpi_rank + 1]),
                          desc='Contracting slices',
                          disable=not verbose,
                          leave=False):
                if _tensor is None:
                    _tensor = np.copy(sc.contract_slice(i, backend=backend))
                else:
                    _tensor += sc.contract_slice(i, backend=backend)

        # Gather tensors
        if _mpi_rank != 0:
            _mpi_comm.send(_tensor, dest=0, tag=11)
        elif _mpi_rank == 0:
            for i in tqdm(range(1, _mpi_size),
                          desc='Collecting tensors',
                          disable=not verbose):
                _p_tensor = _mpi_comm.recv(source=i, tag=11)
                if _p_tensor is not None:
                    _tensor += _p_tensor

        if _mpi_rank == 0:

            # Create map
            _map = ''.join([get_symbol(x) for x in range(len(_inds))])
            _map += '->'
            _map += ''.join([get_symbol(x) for x in _inds])

            # Reorder tensor
            tensor = contract(_map, _tensor)

            # Deprecated
            ## Reshape tensor
            #if _inds:
            #    if _i_inds and _f_inds:
            #        tensor = np.reshape(tensor,
            #                            (2**len(_i_inds), 2**len(_f_inds)))
            #    else:
            #        tensor = np.reshape(tensor,
            #                            (2**max(len(_i_inds), len(_f_inds)),))

        else:

            tensor = None

    else:

        if _mpi_rank == 0:

            # Contract tensor
            tensor = tensor.contract(optimize=optimize, backend=backend)

            if hasattr(tensor, 'inds'):

                # Get input indexes and output indexes
                _i_inds = sort([x for x in tensor.inds if x[-2:] == '_i'],
                               key=lambda x: int(x.split('_')[1]))
                _f_inds = sort([x for x in tensor.inds if x[-2:] == '_f'],
                               key=lambda x: int(x.split('_')[1]))

                # Transpose tensor
                tensor.transpose(*(_i_inds + _f_inds), inplace=True)

                # Deprecated
                ## Reshape tensor
                #if _i_inds and _f_inds:
                #    tensor = np.reshape(tensor,
                #                        (2**len(_i_inds), 2**len(_f_inds)))
                #else:
                #    tensor = np.reshape(tensor,
                #                        (2**max(len(_i_inds), len(_f_inds)),))

        else:

            tensor = None

    if kwargs['return_info']:
        return tensor, _sim_info
    else:
        return tensor
Beispiel #3
0
def add_dephasing_noise(circuit: Circuit,
                        probs: {float, list[float, ...], dict[any, float]},
                        pauli_indexes: {int, list[int, ...], dict[any,
                                                                  int]} = 3,
                        where: {'before', 'after'} = 'after',
                        verbose: bool = False):
    """
    Given a `Circuit`, add dephasing noise after each instance of a `Gate`,
    which acts independently on the qubits of the gate.
    Note, noise will not be added after an instance of `BaseChannel`

    circuit: Circuit
        The `Circuit` which will be modified. Note, a new `Circuit` is
        returned (this is not in place).
    probs: {float, list[float, ...], dict[any, float]}
        Dephasing probabilities for `circuit`. If `probs` is a single `float`,
        the same probability is applied to all qubits. If `probs` is a list,
        the k-th value is used as the probability for the k-th qubit. If `probs`
        is a `dict`, `probs[q]` will be used as probability for the qubit `q`.
        If `probs[q]` is missing, the probability for a qubit `q` will fallback
        to `probs[any]` (if provided).
    pauli_indexes: {int, list[int, ...], dict[any, int]}
        Pauli indexes representing the dephasing axis (Pauli matrix).  If
        `pauli_indexes` is a single `int`, the same axis is applied to all
        qubits. If `pauli_indexes` is a list, the k-th value is used as the
        axis for the k-th qubit. If `pauli_indexes` is a `dict`,
        `pauli_indexes[q]` will be used as axis for the qubit `q`. If
        `pauli_indexes[q]` is missing, the axis for a qubit `q` will fallback
        to `pauli_indexes[any]` (if provided).
    where: {'before', 'after', 'both'}
        Add noise either `'before'` or `'after'` every gate (default: `after`).
    verbose: bool, optional
        Verbose output.
    """
    from hybridq.circuit import Circuit

    # Check 'where'
    if where not in ['before', 'after']:
        raise ValueError("'where' can only be either 'before' or 'after'")

    # Convert circuit
    circuit = Circuit(circuit)

    # Get all qubits
    qubits = circuit.all_qubits()

    # Convert gammas and probs
    probs = channel.__get_params(qubits, probs, value_type=float)
    pauli_indexes = channel.__get_params(qubits, pauli_indexes, value_type=int)

    # Define how to add noise
    def _add_noise(g):
        # Get gammas and probs
        _probs = {q: probs[q] for q in g.qubits}
        _axis = {q: pauli_indexes[q] for q in g.qubits}

        # Get noise
        noise = channel.LocalDephasingChannel(g.qubits,
                                              p=_probs,
                                              pauli_index=_axis)

        # Return gates
        return [g] if isinstance(
            g, BaseChannel) else ((g, ) +
                                  noise if where == 'after' else noise + (g, ))

    # Update circuit
    return SuperCircuit(g for w in tqdm(
        circuit, disable=not verbose, desc='Add amplitude damping noise')
                        for g in _add_noise(w))
Beispiel #4
0
def expectation_value(state: Array,
                      op: Circuit,
                      qubits_order: iter[any],
                      complex_type: any = 'complex64',
                      backend: any = 'numpy',
                      verbose: bool = False,
                      **kwargs) -> complex:
    """
    Compute expectation value of an operator given a quantum state.

    Parameters
    ----------
    state: Array
        Quantum state to use to compute the expectation value of the operator
        `op`.
    op: Circuit
        Quantum operator to use to compute the expectation value.
    qubits_order: iter[any]
        Order of qubits used to map `Circuit.qubits` to `state`.
    complex_type: any, optional
        Complex type to use to compute the expectation value.
    backend: any, optional
        Backend used to compute the quantum state. Backend must have
        `tensordot`, `transpose` and `einsum` methods.
    verbose: bool, optional
        Verbose output.

    Returns
    -------
    complex
        The expectation value of the operator `op` given `state`.

    Other Parameters
    ----------------
    `expectation_value` accepts all valid parameters for `simulate`.

    See Also
    --------
    `simulate`

    Example
    -------
    >>> op = Circuit([
    >>>     Gate('H', qubits=[32]),
    >>>     Gate('CX', qubits=[32, 42]),
    >>>     Gate('RX', qubits=[12], params=[1.32])
    >>> ])
    >>> expectation_value(
    >>>     state=prepare_state('+0-'),
    >>>     op=op,
    >>>     qubits_order=[12, 42, 32],
    >>> )
    array(0.55860883-0.43353909j)
    """

    # Fix remove_id_gates
    kwargs['remove_id_gates'] = False

    # Get number of qubits
    state = np.asarray(state)

    # Get number of qubits
    n_qubits = state.ndim

    # Convert qubits_order to a list
    qubits_order = list(qubits_order)

    # Check lenght of qubits_order
    if len(qubits_order) != n_qubits:
        raise ValueError(
            "'qubits_order' must have the same number of qubits of 'state'.")

    # Check that qubits in op are a subset of qubits_order
    if set(op.all_qubits()).difference(qubits_order):
        raise ValueError("'op' has qubits not included in 'qubits_order'.")

    # Add Gate('I') to op for not used qubits
    op = op + [
        Gate('I', qubits=[q])
        for q in set(qubits_order).difference(op.all_qubits())
    ]

    # Simulate op with given state
    _state = simulate(op,
                      initial_state=state,
                      optimize='evolution',
                      complex_type=complex_type,
                      backend=backend,
                      verbose=verbose,
                      **kwargs)

    # Return expectation value
    return np.real_if_close(np.sum(_state * state.conj()))
Beispiel #5
0
def to_qasm(circuit: Circuit, qubits_map: dict[any, int] = None) -> str:
    """
    Convert a `Circuit` to QASM language.

    Parameters
    ----------
    circuit: Circuit
        `Circuit` to convert to QASM language.
    qubits_map: dict[any, int], optional
        If provided, `qubits_map` map qubit indexes in `Circuit` to qubit
        indexes in QASM. Otherwise, indexes are assigned to QASM qubits by using
        `Circuit.all_qubits()` order.

    Returns
    -------
    str
        String representing the QAMS circuit.

    Notes
    -----

    The QASM language used in HybridQ is compatible with the standard QASM.
    However, HybridQ introduces few extensions, which are recognized by the
    parser using ``#@`` at the beginning of the line (``#`` at the beginning of
    the line represent a general comment in QASM). At the moment, the following
    QAMS extensions are supported:

    * **qubits**, used to store `qubits_map`,
    * **power**, used to store the power of the gate,
    * **tags**, used to store the tags associated to the gate,
    * **U**, used to store the matrix reprentation of the gate if gate name is `MATRIX`

    If `Gate.qubits` are not specified, a single `.` is used to represent the
    missing qubits. If `Gate.params` are missing, parameters are just omitted.

    Example
    -------
    >>> from hybridq.extras.qasm import to_qasm
    >>> print(to_qasm(Circuit(Gate('H', [q]) for q in range(10))))
    10
    #@ qubits =
    #@ {
    #@   "0": "0",
    #@   "1": "1",
    #@   "2": "2",
    #@   "3": "3",
    #@   "4": "4",
    #@   "5": "5",
    #@   "6": "6",
    #@   "7": "7",
    #@   "8": "8",
    #@   "9": "9"
    #@ }
    h 0
    h 1
    h 2
    h 3
    h 4
    h 5
    h 6
    h 7
    h 8
    h 9
    >>> c = Circuit()
    >>> c.append(Gate('RX', tags={'params': False, 'qubits': False}))
    >>> c.append(Gate('RY', params=[1.23], tags={'params': True, 'qubits': False}))
    >>> c.append(Gate('RZ', qubits=[42], tags={'params': False, 'qubits': True})**1.23)
    >>> c.append(Gate('MATRIX', U=Gate('H').matrix()))
    >>> print(to_qasm(c))
    1
    #@ qubits =
    #@ {
    #@   "0": "42"
    #@ }
    #@ tags =
    #@ {
    #@   "params": false,
    #@   "qubits": false
    #@ }
    rx .
    #@ tags =
    #@ {
    #@   "params": true,
    #@   "qubits": false
    #@ }
    ry . 1.23
    #@ tags =
    #@ {
    #@   "params": false,
    #@   "qubits": true
    #@ }
    #@ power = 1.23
    rz 0
    #@ U =
    #@ [
    #@   [
    #@     "0.7071067811865475",
    #@     "0.7071067811865475"
    #@   ],
    #@   [
    #@     "0.7071067811865475",
    #@     "-0.7071067811865475"
    #@   ]
    #@ ]
    matrix .
    """

    # Initialize output
    _out = ''

    # Get qubits map
    if qubits_map is None:
        qubits_map = {
            q: x
            for x, q in enumerate(
                circuit.all_qubits(ignore_missing_qubits=True))
        }

    # Get inverse map
    inv_qubits_map = {x: str(q) for q, x in qubits_map.items()}

    # Dump number of qubits
    _out += f'{len(qubits_map)}\n'

    # Dump map
    _out += '#@ qubits = \n'
    _out += '\n'.join(
        ['#@ ' + x for x in json.dumps(inv_qubits_map, indent=2).split('\n')])
    _out += '\n'

    for gate in circuit:

        # Store matrix
        if gate.name == 'MATRIX':
            _out += '#@ U = \n'
            _out += '\n'.join('#@ ' + x
                              for x in json.dumps([[str(y) for y in x]
                                                   for x in gate.Matrix],
                                                  indent=2).split('\n'))
            _out += '\n'

        # Dump tags
        if gate.provides('tags') and gate.tags is not None:
            _out += '#@ tags = \n'
            _out += '\n'.join([
                '#@ ' + x for x in json.dumps(gate.tags, indent=2).split('\n')
            ])
            _out += '\n'

        # Dump power
        if gate.provides('power') and gate.power != 1:
            _out += f'#@ power = {gate.power}\n'

        # Dump conjugation
        if gate.provides('is_conjugated') and gate.is_conjugated():
            _out += f'#@ conj\n'

        # Dump transposition
        if gate.provides('is_transposed') and gate.is_transposed():
            _out += f'#@ T\n'

        # Dump name
        _out += gate.name.lower()

        # Dump gates
        if gate.provides('qubits') and gate.qubits is not None:
            _out += ' ' + ' '.join((str(qubits_map[x]) for x in gate.qubits))
        else:
            _out += ' .'

        # Dump params
        if gate.provides('params'):
            if gate.params is not None:
                _out += ' ' + ' '.join((str(x) for x in gate.params))
            else:
                raise ValueError('Not yet implemented.')

        # Add newline
        _out += '\n'

    return _out
Beispiel #6
0
def update_pauli_string(circuit: Circuit,
                        pauli_string: {Circuit, dict[str, float]},
                        phase: float = 1,
                        parallel: {bool, int} = False,
                        return_info: bool = False,
                        use_mpi: bool = None,
                        compress: int = 4,
                        simplify: bool = True,
                        remove_id_gates: bool = True,
                        float_type: any = 'float32',
                        verbose: bool = False,
                        **kwargs) -> defaultdict:
    """
    Evolve density matrix accordingly to `circuit` using `pauli_string` as
    initial product state. The evolved density matrix will be represented as a
    set of different Pauli strings, each of them with a different phase, such
    that their sum corresponds to the evolved density matrix. The number of
    branches depends on the number of non-Clifford gates in `circuit`.

    Parameters
    ----------
    circuit: Circuit
        Circuit to use to evolve `pauli_string`.
    pauli_string: {Circuit, dict[str, float]}
        Pauli string to be evolved. `pauli_string` must be a `Circuit` composed
        of single qubit Pauli `Gate`s (that is, either `Gate('I')`, `Gate('X')`,
        `Gate('Y')` or `Gate('Z')`), each one acting on every qubit of
        `circuit`. If a dictionary is provided, every key of `pauli_string` must
        be a valid Pauli string. The size of each Pauli string must be equal to
        the number of qubits in `circuit`. Values in `pauli_string` will be
        used as inital phase for the given string.
    phase: float, optional
        Initial phase for `pauli_string`.
    atol: float, optional
        Discard all Pauli strings that have an absolute amplitude smaller than
        `atol`.
    parallel: int, optional
        Parallelize simulation (where possible). If `True`, the number of
        available cpus is used. Otherwise, a `parallel` number of threads is
        used.
    return_info: bool
        Return extra information collected during the evolution.
    use_mpi: bool, optional
        Use `MPI` if available. Unless `use_mpi=False`, `MPI` will be used if
        detected (for instance, if `mpiexec` is used to called HybridQ). If
        `use_mpi=True`, force the use of `MPI` (in case `MPI` is not
        automatically detected).
    compress: int, optional
        Compress `Circuit` using `utils.compress` prior the simulation.
    simplify: bool, optional
        Simplify `Circuit` using `utils.simplify` prior the simulation.
    remove_id_gates: bool, optional
        Remove `ID` gates prior the simulation.
    float_type: any, optional
        Float type to use for the simulation.
    verbose: bool, optional
        Verbose output.

    Returns
    -------
    dict[str, float] [, dict[any, any]]
        If `return_info=False`, `update_pauli_string` returns a `dict` of Pauli
        strings and the corresponding amplitude. The full density matrix can be
        reconstructed by resumming over all the Pauli string, weighted with the
        corresponding amplitude. If `return_info=True`, information gathered
        during the simulation are also returned.

    Other Parameters
    ----------------
    eps: float, optional (default: auto)
        Do not branch if the branch weight for the given non-Clifford operation
        is smaller than `eps`. `atol=1e-7` if `float_type=float32`, otherwise `atol=1e-8`
        if `float_type=float64`.
    atol: float, optional (default: auto)
        Remove elements from final state if such element as an absolute amplitude
        smaller than `atol`. `atol=1e-8` if `float_type=float32`, otherwise `atol=1e-12`
        if `float_type=float64`.
    branch_atol: float, optional
        Stop branching if the branch absolute amplitude is smaller than
        `branch_atol`. If not specified, it will be equal to `atol`.
    max_breadth_first_branches: int (default: auto)
        Max number of branches to collect using breadth first search. The number
        of branches collect during the breadth first phase will be split among
        the different threads (or nodes if using `MPI`).
    n_chunks: int (default: auto)
        Number of chunks to divide the branches obtained during the breadth
        first phase. The default value is twelve times the number of threads.
    max_virtual_memory: float (default: 80)
        Max virtual memory (%) that can be using during the simulation. If the
        used virtual memory is above `max_virtual_memory`, `update_pauli_string`
        will raise an error.
    sleep_time: float (default: 0.1)
        Completition of parallel processes is checked every `sleep_time`
        seconds.

    Example
    -------
    >>> from hybridq.circuit import utils
    >>> import numpy as np
    >>>
    >>> # Define circuit
    >>> circuit = Circuit(
    >>>     [Gate('X', qubits=[0])**1.2,
    >>>      Gate('ISWAP', qubits=[0, 1])**2.3])
    >>>
    >>> # Define Pauli string
    >>> pauli_string = Circuit([Gate('Z', qubits=[1])])
    >>>
    >>> # Get density matrix decomposed in Pauli strings
    >>> dm = clifford.update_pauli_string(circuit=circuit,
    >>>                                   pauli_string=pauli_string,
    >>>                                   float_type='float64')
    >>>
    >>> dm
    defaultdict(<function hybridq.circuit.simulation.clifford.update_pauli_string.<locals>._db_init.<locals>.<lambda>()>,
                {'IZ': 0.7938926261462365,
                 'YI': -0.12114687473997318,
                 'ZI': -0.166744368113685,
                 'ZX': 0.2377641290737882,
                 'YX': -0.3272542485937367,
                 'XY': -0.40450849718747345})
    >>> # Reconstruct density matrix
    >>> U = sum(phase * np.kron(Gate(g1).matrix(),
    >>>                         Gate(g2).matrix()) for (g1, g2), phase in dm.items())
    >>>
    >>> U
    array([[ 0.62714826+0.j        ,  0.23776413+0.j        ,
             0.        +0.12114687j,  0.        +0.73176275j],
           [ 0.23776413+0.j        , -0.96063699+0.j        ,
             0.        -0.07725425j,  0.        +0.12114687j],
           [ 0.        -0.12114687j,  0.        +0.07725425j,
             0.96063699+0.j        , -0.23776413+0.j        ],
           [ 0.        -0.73176275j,  0.        -0.12114687j,
            -0.23776413+0.j        , -0.62714826+0.j        ]])
    >>> np.allclose(utils.matrix(circuit + pauli_string + circuit.inv()),
    >>>             U,
    >>>             atol=1e-8)
    True
    >>> U[0b11, 0b11]
    (-0.6271482580325515+0j)
    """

    # ==== Set default parameters ====

    # If use_mpi==False, force the non-use of MPI
    if use_mpi is None and _detect_mpi:

        # Warn that MPI is used because detected
        warn("MPI has been detected. Using MPI.")

        # Set MPI to true
        use_mpi = True

    # If parallel==True, use number of cpus
    if type(parallel) is bool:
        parallel = cpu_count() if parallel else 1
    else:
        parallel = int(parallel)
        if parallel <= 0:
            warn("'parallel' must be a positive integer. Setting parallel=1")
            parallel = 1

    # utils.globalize may not work properly on MacOSX systems .. for now, let's
    # disable parallelization for MacOSX
    if parallel > 1:
        from platform import system
        from warnings import warn

        if system() == 'Darwin':
            warn(
                "'utils.globalize' may not work on MacOSX. Disabling parallelization."
            )
            parallel = 1

    # Fix atol
    if 'atol' in kwargs:
        atol = kwargs['atol']
        del (kwargs['atol'])
    else:
        float_type = np.dtype(float_type)
        if float_type == np.float64:
            atol = 1e-12
        elif float_type == np.float32:
            atol = 1e-8
        else:
            raise ValueError(f'Unsupported array dtype: {float_type}')

    # Fix branch_atol
    if 'branch_atol' in kwargs:
        branch_atol = kwargs['branch_atol']
        del (kwargs['branch_atol'])
    else:
        branch_atol = atol

    # Fix eps
    if 'eps' in kwargs:
        eps = kwargs['eps']
        del (kwargs['eps'])
    else:
        float_type = np.dtype(float_type)
        if float_type == np.float64:
            eps = 1e-8
        elif float_type == np.float32:
            eps = 1e-7
        else:
            raise ValueError(f'Unsupported array dtype: {float_type}')

    # Set default db initialization
    def _db_init():
        return defaultdict(int)

    # Set default transform
    def _transform(ps):

        # Join bitstring
        return ''.join({_X: 'X', _Y: 'Y', _Z: 'Z', _I: 'I'}[op] for op in ps)

    # Set default collect
    def _collect(db, ps, ph):

        # Update final paulis
        db[ps] += ph

        # Remove elements close to zero
        if abs(db[ps]) < atol:
            del (db[ps])

    # Set default merge
    def _merge(db, db_new, use_tuple=False):

        # Update final paulis
        for ps, ph in db_new if use_tuple else db_new.items():

            # Collect results
            kwargs['collect'](db, ps, ph)

    kwargs.setdefault('max_breadth_first_branches', min(4 * 12 * parallel,
                                                        2**14))
    kwargs.setdefault('n_chunks', 12 * parallel)
    kwargs.setdefault('max_virtual_memory', 80)
    kwargs.setdefault('sleep_time', 0.1)
    kwargs.setdefault('collect', _collect)
    kwargs.setdefault('transform', _transform)
    kwargs.setdefault('merge', _merge)
    kwargs.setdefault('db_init', _db_init)

    # Get MPI info
    if use_mpi:
        from mpi4py import MPI
        _mpi_comm = MPI.COMM_WORLD
        _mpi_size = _mpi_comm.Get_size()
        _mpi_rank = _mpi_comm.Get_rank()
        kwargs.setdefault('max_breadth_first_branches_mpi',
                          min(_mpi_size * 2**9, 2**14))
        kwargs.setdefault('mpi_chunk_max_size', 2**20)
        kwargs.setdefault('mpi_merge', True)

    # Get complex_type from float_type
    complex_type = (np.array([1], dtype=float_type) +
                    1j * np.array([1], dtype=float_type)).dtype

    # Local verbose
    _verbose = verbose and (not use_mpi or _mpi_rank == 0)

    # =========== CHECKS =============

    if type(pauli_string) == Circuit:
        from collections import Counter

        # Initialize error message
        _err_msg = "'pauli_string' must contain only I, X, Y and Z gates acting on different qubits."

        # Check qubits match with circuit
        if any(g.n_qubits != 1 or not g.qubits for g in pauli_string) or set(
                pauli_string.all_qubits()).difference(
                    circuit.all_qubits()) or set(
                        Counter(gate.qubits[0]
                                for gate in pauli_string).values()).difference(
                                    [1]):
            raise ValueError(_err_msg)

        # Get ideal paulis
        _ig = list(map(lambda n: Gate(n).matrix(), 'IXYZ'))

        # Get the correct pauli
        def _get_pauli(gate):
            # Get matrix
            U = gate.matrix()

            # Get right pauli
            p = next(
                (p for x, p in enumerate('IXYZ') if np.allclose(_ig[x], U)),
                None)

            # If not found, raise error
            if not p:
                raise ValueError(_err_msg)

            # Otherwise, return pauli
            return Gate(p, qubits=gate.qubits)

        # Reconstruct paulis
        pauli_string = Circuit(map(_get_pauli, pauli_string))

    else:

        # Check that all strings only have I,X,Y,Z tokens
        _n_qubits = len(circuit.all_qubits())
        if any(
                set(p).difference('IXYZ') or len(p) != _n_qubits
                for p in pauli_string):
            raise ValueError(
                f"'pauli_string' must contain only I, X, Y and Z gates acting on different qubits."
            )

    # ================================

    # Start pre-processing time
    _prep_time = time()

    # Get qubits
    _qubits = circuit.all_qubits()

    # Remove ID gates
    if remove_id_gates:
        circuit = Circuit(gate for gate in circuit if gate.name != 'I')

    # Simplify circuit
    if simplify:
        # Get qubits to pin
        if type(pauli_string) == Circuit:
            # Pinned qubits
            _pinned_qubits = pauli_string.all_qubits()

        else:
            # Find qubits to pin
            _pinned_qubits = set.union(
                *({q
                   for q, g in zip(_qubits, p)
                   if g != 'I'}
                  for p in pauli_string))

        # Simplify
        circuit = utils.simplify(circuit,
                                 remove_id_gates=remove_id_gates,
                                 verbose=_verbose)
        circuit = utils.popright(utils.simplify(circuit),
                                 pinned_qubits=set(_pinned_qubits).intersection(
                                     circuit.all_qubits()),
                                 verbose=_verbose)

    # Compress circuit
    circuit = Circuit(
        utils.to_matrix_gate(c, complex_type=complex_type)
        for c in tqdm(utils.compress(circuit, max_n_qubits=compress),
                      disable=not _verbose,
                      desc=f"Compress ({int(compress)})"))

    # Pad missing qubits
    circuit += Circuit(
        Gate('MATRIX', [q], U=np.eye(2))
        for q in set(_qubits).difference(circuit.all_qubits()))

    # Get qubits map
    qubits_map = kwargs['qubits_map'] if 'qubits_map' in kwargs else {
        q: x for x, q in enumerate(circuit.all_qubits())
    }

    # Pre-process circuit
    _LS_cache = {}
    _P_cache = {}
    circuit = [
        g for gate in tqdm(reversed(circuit),
                           total=len(circuit),
                           disable=not _verbose,
                           desc='Pre-processing')
        for g in _process_gate(gate, LS_cache=_LS_cache, P_cache=_P_cache)
    ]
    _LS_cache.clear()
    _P_cache.clear()
    del (_LS_cache)
    del (_P_cache)

    # Get maximum number of qubits and parameters
    _max_n_qubits = max(max(len(gate[1]) for gate in circuit), 2)
    _max_n_params = max(len(gate[2]) for gate in circuit)

    # Get qubits
    qubits = np.array([
        np.pad([qubits_map[q]
                for q in gate[1]], (0, _max_n_qubits - len(gate[1])))
        for gate in circuit
    ],
                      dtype='int32')

    # Get parameters
    params = np.round(
        np.array([
            np.pad(gate[2], (0, _max_n_params - len(gate[2])))
            for gate in circuit
        ],
                 dtype=float_type),
        -int(np.floor(np.log10(atol))) if atol < 1 else 0)

    # Remove -0
    params[np.abs(params) == 0] = 0

    # Quick check
    assert (all('_' + gate[0] in globals() for gate in circuit))

    # Get gates
    gates = np.array([globals()['_' + gate[0]] for gate in circuit],
                     dtype='int')

    # Compute expected number of paths
    _log2_n_expected_branches = 0
    for _idx in np.where(np.isin(gates, _MATRIX_SET))[0]:
        _nq = (gates[_idx] // _gate_mul) + 1
        _p = params[_idx][:4**(2 * _nq)]
        _log2_n_expected_branches += np.sum(
            np.log2(
                np.sum(np.abs(np.reshape(_p, (4**_nq, 4**_nq))) > eps,
                       axis=1))) / 4**_nq

    # Check
    assert (len(gates) == len(qubits) and len(gates) == len(params))

    # Initialize branches
    if type(pauli_string) == Circuit:

        # Convert Pauli string
        _pauli_string = np.array([_I] * len(qubits_map), dtype='int')
        for gate in pauli_string:
            if gate.name != 'I':
                _pauli_string[qubits_map[gate.qubits[0]]] = {
                    'X': _X,
                    'Y': _Y,
                    'Z': _Z
                }[gate.name]

        # Initialize branches
        branches = [(_pauli_string, phase, 0)]

    else:

        # Initialize branches
        branches = [(np.array([{
            'I': _I,
            'X': _X,
            'Y': _Y,
            'Z': _Z
        }[g]
                               for g in p],
                              dtype='int'), phase, 0)
                    for p, phase in pauli_string.items()
                    if abs(phase) > atol]

    # Initialize final Pauli strings
    db = kwargs['db_init']()

    # Define update function
    _update = partial_func(_update_pauli_string,
                           gates,
                           qubits,
                           params,
                           eps=eps,
                           atol=branch_atol)

    # End pre-processing time
    _prep_time = time() - _prep_time

    # Initialize infos
    _info_init = lambda: {
        'n_explored_branches': 0,
        'largest_n_branches_in_memory': 0,
        'peak_virtual_memory (GB)': virtual_memory().used / 2**30,
        'average_virtual_memory (GB)': (virtual_memory().used / 2**30, 1),
        'n_threads': parallel,
        'n_cpus': cpu_count(),
        'eps': eps,
        'atol': atol,
        'branch_atol': branch_atol,
        'float_type': str(float_type),
        'log2_n_expected_branches': _log2_n_expected_branches
    }
    infos = _info_init()
    infos['memory_baseline (GB)'] = virtual_memory().used / 2**30
    if not use_mpi or _mpi_rank == 0:
        infos['n_explored_branches'] = 1
        infos['largest_n_branches_in_memory'] = 1

    # Start clock
    _init_time = time()

    # Scatter first batch of branches to different MPI nodes
    if use_mpi and _mpi_size > 1:

        if _mpi_rank == 0:

            # Explore branches (breadth-first search)
            branches = _breadth_first_search(
                _update,
                db,
                branches,
                max_n_branches=kwargs['max_breadth_first_branches_mpi'],
                infos=infos,
                verbose=verbose,
                mpi_rank=_mpi_rank,
                **kwargs)

        # Distribute branches
        branches = _mpi_comm.scatter(
            [list(x) for x in distribute(_mpi_size, branches)], root=0)

    # Explore branches (breadth-first search)
    branches = _breadth_first_search(
        _update,
        db,
        branches,
        max_n_branches=kwargs['max_breadth_first_branches'],
        infos=infos,
        verbose=verbose if not use_mpi or _mpi_rank == 0 else False,
        **kwargs)

    # If there are remaining branches, use depth-first search
    if branches:

        _depth_first_search(_update,
                            db,
                            branches,
                            parallel=parallel,
                            infos=infos,
                            info_init=_info_init,
                            verbose=verbose,
                            mpi_rank=_mpi_rank if use_mpi else 0,
                            mpi_size=_mpi_size if use_mpi else 1,
                            **kwargs)

    # Update infos
    infos['average_virtual_memory (GB)'] = infos['average_virtual_memory (GB)'][
        0] / infos['average_virtual_memory (GB)'][1] - infos[
            'memory_baseline (GB)']
    infos['peak_virtual_memory (GB)'] -= infos['memory_baseline (GB)']

    # Update branching time
    infos['branching_time (s)'] = time() - _init_time

    # Collect results
    if use_mpi and _mpi_size > 1 and kwargs['mpi_merge']:

        for _k in infos:
            infos[_k] = [infos[_k]]

        # Initialize pbar
        if _mpi_rank == 0:
            pbar = tqdm(total=int(np.ceil(np.log2(_mpi_size))),
                        disable=not verbose,
                        desc='Collect results')

        # Initialize tag and size
        _tag = 0
        _size = _mpi_size
        while _size > 1:
            # Update progressbar
            if _mpi_rank == 0:
                pbar.set_description(
                    f'Collect results (Mem={virtual_memory().percent}%)')

            # Get shift
            _shift = (_size // 2) + (_size % 2)

            if _mpi_rank < (_size // 2):
                # Get infos
                _infos = _mpi_comm.recv(source=_mpi_rank + _shift, tag=_tag)

                # Update infos
                for _k in infos:
                    infos[_k].extend(_infos[_k])

                # Get number of chunks
                _n_chunks = _mpi_comm.recv(source=_mpi_rank + _shift,
                                           tag=_tag + 1)

                if _n_chunks > 1:
                    # Initialize _process
                    with tqdm(range(_n_chunks),
                              desc='Get db',
                              leave=False,
                              disable=_mpi_rank != 0) as pbar:
                        for _ in pbar:
                            # Receive db
                            _db = _mpi_comm.recv(source=_mpi_rank + _shift,
                                                 tag=_tag + 2)

                            # Merge datasets
                            kwargs['merge'](db, _db, use_tuple=True)

                            # Update description
                            pbar.set_description(
                                f'Get db (Mem={virtual_memory().percent}%)')

                            # Clear dataset
                            _db.clear()

                else:
                    # Receive db
                    _db = _mpi_comm.recv(source=_mpi_rank + _shift,
                                         tag=_tag + 2)

                    # Merge datasets
                    kwargs['merge'](db, _db)

                    # Clear dataset
                    _db.clear()

            elif _shift <= _mpi_rank < _size:
                # Remove default_factory because pickle is picky regarding local objects
                db.default_factory = None

                # Send infos
                _mpi_comm.send(infos, dest=_mpi_rank - _shift, tag=_tag)

                # Compute chunks
                _n_chunks = kwargs['mpi_chunk_max_size']
                _n_chunks = (len(db) // _n_chunks) + (
                    (len(db) % _n_chunks) != 0)

                # Send number of chunks
                _mpi_comm.send(_n_chunks, dest=_mpi_rank - _shift, tag=_tag + 1)

                if _n_chunks > 1:
                    # Split db in chunks
                    for _db in chunked(db.items(),
                                       kwargs['mpi_chunk_max_size']):
                        _mpi_comm.send(_db,
                                       dest=_mpi_rank - _shift,
                                       tag=_tag + 2)
                else:
                    # Send db
                    _mpi_comm.send(db, dest=_mpi_rank - _shift, tag=_tag + 2)

                # Reset db and infos
                db.clear()
                infos.clear()

            # update size
            _tag += 3
            _size = _shift
            _mpi_comm.barrier()

            # Update progressbar
            if _mpi_rank == 0:
                pbar.set_description(
                    f'Collect results (Mem={virtual_memory().percent}%)')
                pbar.update()

    # Update runtime
    if not use_mpi or _mpi_rank == 0 or not kwargs['mpi_merge']:
        infos['runtime (s)'] = time() - _init_time
        infos['pre-processing (s)'] = _prep_time

    # Check that all the others dbs/infos (excluding rank==0) has been cleared up
    if use_mpi and _mpi_rank > 0 and kwargs['mpi_merge']:
        assert (not len(db) and not len(infos))

    if return_info:
        return db, infos
    else:
        return db
Beispiel #7
0
def expectation_value(circuit: Circuit, op: Circuit, initial_state: str,
                      **kwargs) -> float:
    """
    Compute the expectation value of `op` for the given `circuit`, using
    `initial_state` as initial state.

    Parameters
    ----------
    circuit: Circuit
        Circuit to simulate.
    op: Circuit
        Operator used to compute the expectation value. `op` must be a valid
        `Circuit` containing only Pauli gates (that is, either `I`, `X`, `Y` or
        `Z` gates) acting on different qubits.
    initial_state: str
        Initial state used to compute the expectation value. Valid tokens for
        `initial_state` are:

        - `0`: qubit is set to `0` in the computational basis,
        - `1`: qubit is set to `1` in the computational basis,
        - `+`: qubit is set to `+` state in the computational basis,
        - `-`: qubit is set to `-` state in the computational basis.

    Returns
    -------
    float [, dict[any, any]]
        The expectation value of the operator `op`. If `return_info=True`,
        information gathered during the simulation are also returned.

    Other Parameters
    ----------------
    `expectation_value` uses all valid parameters for `update_pauli_string`.

    See Also
    --------
    `update_pauli_string`

    Example
    -------
    >>> # Define circuit
    >>> circuit = Circuit(
    >>>     [Gate('X', qubits=[0])**1.2,
    >>>      Gate('ISWAP', qubits=[0, 1])**2.3])
    >>>
    >>> # Define operator
    >>> op = Circuit([Gate('Z', qubits=[1])])
    >>>
    >>> # Get expectation value
    >>> clifford.expectation_value(circuit=circuit,
    >>>                            op=op,
    >>>                            initial_state='11',
    >>>                            float_type='float64')
    -0.6271482580325515
    """

    # ==== Set default parameters ====

    def _db_init():
        return [0]

    def _collect(db, ops, ph):
        # Compute expectation value given pauli string
        if next((False for x in ops if x in 'XY'), True):
            db[0] += ph

    def _merge(db, db_new):
        db[0] += db_new[0]

    def _prepare_state(state, qubits):
        c = Circuit()
        for q, s in zip(qubits, state):
            if s == '0':
                pass
            elif s == '1':
                c.append(Gate('X', [q]))
            elif s == '+':
                c.append(Gate('H', [q]))
            elif s == '-':
                c.extend([Gate('X', [q]), Gate('H', [q])])
            else:
                raise ValueError(f"Unexpected token '{s}'")
        return c

    kwargs.setdefault('use_mpi', None)
    kwargs.setdefault('return_info', False)

    # If use_mpi==False, force the non-use of MPI
    if kwargs['use_mpi'] is None and _detect_mpi:

        # Warn that MPI is used because detected
        warn("MPI has been detected. Using MPI.")

        # Set MPI to true
        kwargs['use_mpi'] = True

    # Get MPI info
    if kwargs['use_mpi']:
        from mpi4py import MPI
        _mpi_comm = MPI.COMM_WORLD
        _mpi_size = _mpi_comm.Get_size()
        _mpi_rank = _mpi_comm.Get_rank()

    # ================================

    # Prepare initial state
    if type(initial_state) == str:
        # Check
        if len(initial_state) != len(circuit.all_qubits()):
            raise ValueError(
                f"'initial_state' has the wrong number of qubits "
                f"(expected {len(circuit.all_qubits())}, got {len(initial_state)})."
            )
        # Get state
        initial_state = _prepare_state(initial_state, circuit.all_qubits())
    else:
        raise ValueError(
            f"'{type(initial_state)}' not supported for 'initial_state'.")

    # Get expectation value
    _res = update_pauli_string(initial_state + circuit,
                               op,
                               db_init=_db_init,
                               collect=_collect,
                               merge=_merge,
                               mpi_merge=False,
                               **kwargs)

    # Collect results
    if kwargs['use_mpi'] and _mpi_size > 1:
        _all_res = _mpi_comm.gather(_res, root=0)
        if _mpi_rank == 0:
            if kwargs['return_info']:
                infos = {}
                for _, _infos in _all_res:
                    for _k, _v in _infos.items():
                        if _k not in infos:
                            infos[_k] = [_v]
                        else:
                            infos[_k].append(_v)
                exp_value = sum(ev[0] for ev, _ in _all_res)
            else:
                exp_value = sum(ev[0] for ev in _all_res)
        else:
            exp_value = infos = None
    else:
        if kwargs['return_info']:
            exp_value = _res[0][0]
            infos = _res[1]
        else:
            exp_value = _res[0]

    # Return expectation value
    if kwargs['return_info']:
        return exp_value, infos
    else:
        return exp_value
Beispiel #8
0
def to_cirq(circuit: Circuit,
            qubits_map: dict[any, any] = None,
            verbose: bool = False) -> cirq.Circuit:
    """
    Convert `Circuit` to `cirq.Circuit`.

    Parameters
    ----------
    circuit: Circuit
        `Circuit` to convert to `cirq.Circuit`.
    qubits_map: dict[any, any], optional
        How to map qubits in `Circuit` to `cirq.Circuit`. if not provided,
        `cirq.LineQubit`s are used and automatically mapped to `Circuit`'s
        qubits.
    verbose: bool, optional
        Verbose output.

    Returns
    -------
    cirq.Circuit
        `cirq.Circuit` obtained from `Circuit`.

    Example
    -------
    >>> from hybridq.extras.cirq import to_cirq
    >>> c = Circuit(Gate('H', qubits=[q]) for q in range(3))
    >>> c.append(Gate('CX', qubits=[0, 1]))
    >>> c.append(Gate('CX', qubits=[2, 0]))
    >>> c.append(Gate('CX', qubits=[1, 2]))
    >>> to_cirq(c)
    0: ───H───@───X───────
              │   │
    1: ───H───X───┼───@───
                  │   │
    2: ───H───────@───X───
    """

    _to_cirq_naming = {
        'I': lambda params: cirq.I,
        'P': lambda params: cirq.S,
        'T': lambda params: cirq.T,
        'X': lambda params: cirq.X,
        'Y': lambda params: cirq.Y,
        'Z': lambda params: cirq.Z,
        'H': lambda params: cirq.H,
        'RX': lambda params: cirq.rx(*params),
        'RY': lambda params: cirq.ry(*params),
        'RZ': lambda params: cirq.rz(*params),
        'CZ': lambda params: cirq.CZ,
        'ZZ': lambda params: cirq.ZZ,
        'CX': lambda params: cirq.CNOT,
        'SWAP': lambda params: cirq.SWAP,
        'ISWAP': lambda params: cirq.ISWAP,
        'FSIM': lambda params: cirq.FSimGate(*params),
        'CPHASE': lambda params: cirq.CZ**(params[0] / np.pi)
    }

    # Get circuit
    circ = cirq.Circuit()

    # If not provided, create trivial map
    if not qubits_map:
        try:
            sorted(circuit.all_qubits())
            _standard_sortable = True
        except:
            _standard_sortable = False

        if _standard_sortable:
            qubits_map = {q: cirq.LineQubit(q) for q in circuit.all_qubits()}
        else:
            qubits_map = {
                q: cirq.LineQubit(x)
                for x, q in enumerate(circuit.all_qubits())
            }

    # Apply gates
    for gate in tqdm(circuit, disable=not verbose):

        # Check if qubits are missing
        if not gate.provides('qubits') or gate.qubits is None:
            raise ValueError(
                f"Gate(name='{gate.name}') requires {gate.n_qubits} qubits.")

        # Check if params are missing
        if gate.provides('params') and gate.params is None:
            raise ValueError(
                f"Gate(name='{gate.name}') requires {gate.n_params} parameters."
            )

        # Get mapped qubits
        qubits = [qubits_map[q] for q in gate.qubits]

        # Get params
        params = gate.params if gate.provides('params') else None

        if gate.name in {
                'MATRIX', 'U3', 'R_PI_2'
        } or (gate.provides('is_conjugated')
              and gate.is_conjugated()) or (gate.provides('is_transposed')
                                            and gate.is_transposed()):
            cirq_gate = cirq.MatrixGate(gate.matrix())
        elif gate.name[:5] == 'SQRT_':
            cirq_gate = _to_cirq_naming[gate.name[5:]](params)**(0.5 *
                                                                 gate.power)
        elif gate.name in _to_cirq_naming:
            cirq_gate = _to_cirq_naming[gate.name](params)**gate.power
        else:
            raise ValueError(f"{gate} not yet supported.")

        # Append
        circ.append(cirq_gate.on(*qubits))

    return circ