Esempio n. 1
0
def test_hyper_reconf(parallel):
    if parallel:
        pytest.importorskip('distributed')

    eq, shapes = oe.helpers.rand_equation(30, reg=5, seed=42, d_max=3)
    optimizer = ctg.HyperOptimizer(
        max_repeats=16, parallel=parallel, optlib='random',
        reconf_opts={'subtree_size': 6}, progbar=True,
    )
    oe.contract_path(eq, *shapes, shapes=True, optimize=optimizer)
    assert optimizer.best['flops'] < optimizer.best['original_flops']
Esempio n. 2
0
def test_hyper_slicer(parallel):
    if parallel:
        pytest.importorskip('distributed')

    eq, shapes = oe.helpers.rand_equation(30, reg=5, seed=42, d_max=3)
    optimizer = ctg.HyperOptimizer(
        max_repeats=16, parallel=parallel, optlib='random',
        slicing_opts={'target_slices': 1000}, progbar=True,
    )
    oe.contract_path(eq, *shapes, shapes=True, optimize=optimizer)
    assert optimizer.best['tree'].multiplicity >= 1000
    assert optimizer.best['flops'] > optimizer.best['original_flops']
Esempio n. 3
0
def test_hyper(contraction_20_5):
    pytest.importorskip('btb')
    pytest.importorskip('psutil')
    pytest.importorskip('kahypar')
    eq, _, _, arrays = contraction_20_5
    optimizer = ctg.HyperOptimizer(
        max_repeats=32,
        parallel=False,
    )
    _, path_info = oe.contract_path(eq, *arrays, optimize=optimizer)
    assert path_info.speedup > 1
    assert {x[0] for x in optimizer.get_trials()} == {'greedy', 'kahypar'}
Esempio n. 4
0
def test_hyper(contraction_20_5, optlib, requires, parallel):
    pytest.importorskip('kahypar')
    pytest.importorskip(requires)
    if parallel:
        pytest.importorskip('distributed')

    eq, _, _, arrays = contraction_20_5
    optimizer = ctg.HyperOptimizer(
        max_repeats=16, parallel=parallel, optlib=optlib,
    )
    _, path_info = oe.contract_path(eq, *arrays, optimize=optimizer)
    assert path_info.speedup > 1
    assert {x[0] for x in optimizer.get_trials()} == {'greedy', 'kahypar'}
    optimizer.print_trials()
Esempio n. 5
0
            def cotengra_params():
                # Get HyperOptimizer
                q = ctg.HyperOptimizer(methods=kwargs['methods'],
                                       max_time=kwargs['max_time'],
                                       max_repeats=kwargs['max_repeats'],
                                       minimize=kwargs['minimize'],
                                       progbar=False,
                                       parallel=False,
                                       **kwargs['cotengra'])

                # For some optlib, HyperOptimizer._retrieve_params is not
                # pickeable. Let's fix the problem by hand.
                q._retrieve_params = __FunctionWrap(q._retrieve_params)

                # Return HyperOptimizer
                return q
Esempio n. 6
0
def test_hyper_slicer_reconf(parallel):
    if parallel:
        pytest.importorskip('distributed')

    eq, shapes = oe.helpers.rand_equation(30, reg=5, seed=42, d_max=3)
    optimizer = ctg.HyperOptimizer(
        max_repeats=16, parallel=parallel, optlib='random',
        slicing_reconf_opts={
            'target_size': 2**19,
            'reconf_opts': {
                'subtree_size': 6,
            },
        }, progbar=True,
    )
    oe.contract_path(eq, *shapes, shapes=True, optimize=optimizer)
    assert optimizer.best['tree'].max_size() <= 2**19
Esempio n. 7
0
def test_insane_nested():
    pytest.importorskip('distributed')

    eq, shapes = oe.helpers.rand_equation(30, reg=5, seed=42, d_max=3)
    optimizer = ctg.HyperOptimizer(
        max_repeats=16, parallel=True, optlib='random', progbar=True,
        slicing_reconf_opts={
            'target_size': 2**20,
            'forested': True,
            'max_repeats': 8,
            'num_trees': 2,
            'reconf_opts': {
                'forested': True,
                'num_trees': 2,
                'subtree_size': 6,
            }
        }
    )
    oe.contract_path(eq, *shapes, shapes=True, optimize=optimizer)
    assert optimizer.best['tree'].max_size() <= 2**20
Esempio n. 8
0
def edge_simulate(args):
    circ, kwargs, edge = args

    ZZ = qu.pauli('Z') & qu.pauli('Z')
    opt_type = kwargs.get('ordering_algo', 'uniform')
    if opt_type == 'hyper':
        optimizer = ctg.HyperOptimizer(parallel=False,
                                       max_repeats=10000,
                                       max_time=kwargs.get(
                                           'optimizer_time', 1))
    elif opt_type == 'uniform':
        optimizer = ctg.UniformOptimizer(parallel=False,
                                         methods=['greedy'],
                                         max_repeats=1_000_000,
                                         max_time=kwargs.get(
                                             'optimizer_time', 1))
    else:
        raise ValueError('Ordering algorithm not supported')
    #return circ.local_expectation(ZZ, edge, optimize=optimizer)
    return circ.local_expectation(ZZ, edge, optimize=optimizer)
Esempio n. 9
0
    140,
    3,
    n_out=2,
    seed=666,
)
arrays = [np.random.randn(*s) for s in shapes]

# ------------------------ Find the contraction tree ------------------------ #

print("Finding tree...")

# find a contraction tree
opt = ctg.HyperOptimizer(
    parallel=True,
    # make sure contractions fit onto GPU
    slicing_reconf_opts={'target_size': 2**28},
    max_repeats=32,
    progbar=True,
)

# run the optimizer and extract the contraction tree
tree = opt.search(inputs, output, size_dict)

# ------------------------- Perform the contraction ------------------------- #

print("1: Contracting slices with jax...")

# we'll run the GPU contraction on a separate single thread, which mostly
# serves as an example of how one might distribute contractions to multi-GPUs
pool = ThreadPoolExecutor(1)
Esempio n. 10
0
def _simulate_tn(circuit: any, initial_state: any, final_state: any,
                 optimize: any, backend: any, complex_type: any,
                 tensor_only: bool, verbose: bool, **kwargs):
    import quimb.tensor as tn
    import cotengra as ctg

    # Get random leaves_prefix
    leaves_prefix = ''.join(
        np.random.choice(list('abcdefghijklmnopqrstuvwxyz'), size=20))

    # Initialize info
    _sim_info = {}

    # Alias for tn
    if optimize == 'tn':
        optimize = 'cotengra'

    if isinstance(circuit, Circuit):

        # Get number of qubits
        qubits = circuit.all_qubits()
        n_qubits = len(qubits)

        # If initial/final state is None, set to all .'s
        initial_state = '.' * n_qubits if initial_state is None else initial_state
        final_state = '.' * n_qubits if final_state is None else final_state

        # Initial and final states must be valid strings
        for state, sname in [(initial_state, 'initial_state'),
                             (final_state, 'final_state')]:
            # Get alphabet
            from string import ascii_letters

            # Check if string
            if not isinstance(state, str):
                raise ValueError(f"'{sname}' must be a valid string.")

            # Deprecated error
            if any(x in 'xX' for x in state):
                from hybridq.utils import DeprecationWarning
                from warnings import warn

                # Warn the user that '.' is used to represent open qubits
                warn(
                    "Since '0.6.3', letters in the alphabet are used to "
                    "trace selected qubits (including 'x' and 'X'). "
                    "Instead, '.' is used to represent an open qubit.",
                    DeprecationWarning)

            # Check only valid symbols are present
            if set(state).difference('01+-.' + ascii_letters):
                raise ValueError(f"'{sname}' contains invalid symbols.")

            # Check number of qubits
            if len(state) != n_qubits:
                raise ValueError(f"'{sname}' has the wrong number of qubits "
                                 f"(expected {n_qubits}, got {len(state)})")

        # Check memory
        if 2**(initial_state.count('.') +
               final_state.count('.')) > kwargs['max_largest_intermediate']:
            raise MemoryError("Memory for the given number of open qubits "
                              "exceeds the 'max_largest_intermediate'.")

        # Compress circuit
        if kwargs['compress']:
            if verbose:
                print(
                    f"Compress circuit (max_n_qubits={kwargs['compress']}): ",
                    end='',
                    file=stderr)
                _time = time()

            circuit = utils.compress(
                circuit,
                kwargs['compress']['max_n_qubits'] if isinstance(
                    kwargs['compress'], dict) else kwargs['compress'],
                verbose=verbose,
                **({
                    k: v
                    for k, v in kwargs['compress'].items()
                    if k != 'max_n_qubits'
                } if isinstance(kwargs['compress'], dict) else {}))

            circuit = Circuit(
                utils.to_matrix_gate(c, complex_type=complex_type)
                for c in circuit)
            if verbose:
                print(f"Done! ({time()-_time:1.2f}s)", file=stderr)

        # Get tensor network representation of circuit
        tensor, tn_qubits_map = utils.to_tn(circuit,
                                            return_qubits_map=True,
                                            leaves_prefix=leaves_prefix)

        # Define basic MPS
        _mps = {
            '0': np.array([1, 0]),
            '1': np.array([0, 1]),
            '+': np.array([1, 1]) / np.sqrt(2),
            '-': np.array([1, -1]) / np.sqrt(2)
        }

        # Attach initial/final state
        for state, ext in [(initial_state, 'i'), (final_state, 'f')]:
            for s, q in ((s, q) for s, q in zip(state, qubits) if s in _mps):
                inds = [f'{leaves_prefix}_{tn_qubits_map[q]}_{ext}']
                tensor &= tn.Tensor(_mps[s], inds=inds, tags=inds)

        # For each unique letter, apply trace
        for x in set(initial_state + final_state).difference(''.join(_mps) +
                                                             '.'):
            # Get indexes
            inds = [
                f'{leaves_prefix}_{tn_qubits_map[q]}_i'
                for s, q in zip(initial_state, qubits) if s == x
            ]
            inds += [
                f'{leaves_prefix}_{tn_qubits_map[q]}_f'
                for s, q in zip(final_state, qubits) if s == x
            ]

            # Apply trace
            tensor &= tn.Tensor(np.reshape([1] + [0] * (2**len(inds) - 2) +
                                           [1], (2, ) * len(inds)),
                                inds=inds)

        # Simplify if requested
        if kwargs['simplify_tn']:
            tensor.full_simplify_(kwargs['simplify_tn']).astype_(complex_type)
        else:
            # Otherwise, just convert to the given complex_type
            tensor.astype_(complex_type)

        # Get contraction from heuristic
        if optimize == 'cotengra' and kwargs['max_iterations'] > 0:

            # Create local client if MPI has been detected (not compatible with Dask at the moment)
            if _mpi_env and kwargs['parallel']:

                from distributed import Client, LocalCluster
                _client = Client(LocalCluster(processes=False))

            else:

                _client = None

            # Set cotengra parameters
            cotengra_params = lambda: ctg.HyperOptimizer(
                methods=kwargs['methods'],
                max_time=kwargs['max_time'],
                max_repeats=kwargs['max_repeats'],
                minimize=kwargs['minimize'],
                progbar=verbose,
                parallel=kwargs['parallel'],
                **kwargs['cotengra'])

            # Get optimized path
            opt = cotengra_params()
            info = tensor.contract(all, optimize=opt, get='path-info')

            # Get target size
            tli = kwargs['target_largest_intermediate']

            # Repeat for the requested number of iterations
            for _ in range(1, kwargs['max_iterations']):

                # Break if largest intermediate is equal or smaller than target
                if info.largest_intermediate <= tli:
                    break

                # Otherwise, restart
                _opt = cotengra_params()
                _info = tensor.contract(all, optimize=_opt, get='path-info')

                # Store the best
                if kwargs['minimize'] == 'size':

                    if _info.largest_intermediate < info.largest_intermediate or (
                            _info.largest_intermediate
                            == info.largest_intermediate
                            and _opt.best['flops'] < opt.best['flops']):
                        info = _info
                        opt = _opt

                else:

                    if _opt.best['flops'] < opt.best['flops'] or (
                            _opt.best['flops'] == opt.best['flops']
                            and _info.largest_intermediate <
                            info.largest_intermediate):
                        info = _info
                        opt = _opt

            # Close client if exists
            if _client:

                _client.shutdown()
                _client.close()

        # Just return tensor if required
        if tensor_only:
            if optimize == 'cotengra' and kwargs['max_iterations'] > 0:
                return tensor, (info, opt)
            else:
                return tensor

    else:

        # Set tensor
        tensor = circuit

        if len(optimize) == 2 and isinstance(
                optimize[0], PathInfo) and isinstance(
                    optimize[1], ctg.hyper.HyperOptimizer):

            # Get info and opt from optimize
            info, opt = optimize

            # Set optimization
            optimize = 'cotengra'

        else:

            # Get tensor and path
            tensor = circuit

    # Print some info
    if verbose:
        print(
            f'Largest Intermediate: 2^{np.log2(float(info.largest_intermediate)):1.2f}',
            file=stderr)
        print(
            f'Max Largest Intermediate: 2^{np.log2(float(kwargs["max_largest_intermediate"])):1.2f}',
            file=stderr)
        print(f'Flops: 2^{np.log2(float(info.opt_cost)):1.2f}', file=stderr)

    if optimize == 'cotengra':

        # Get indexes
        _inds = tensor.outer_inds()

        # Get input indexes and output indexes
        _i_inds = sort([x for x in _inds if x[-2:] == '_i'],
                       key=lambda x: int(x.split('_')[1]))
        _f_inds = sort([x for x in _inds if x[-2:] == '_f'],
                       key=lambda x: int(x.split('_')[1]))

        # Get order
        _inds = [_inds.index(x) for x in _i_inds + _f_inds]

        # Get slice finder
        sf = ctg.SliceFinder(info,
                             target_size=kwargs['max_largest_intermediate'])

        # Find slices
        with tqdm(kwargs['temperatures'], disable=not verbose,
                  leave=False) as pbar:
            for _temp in pbar:
                pbar.set_description(f'Find slices (T={_temp})')
                ix_sl, cost_sl = sf.search(temperature=_temp)

        # Get slice contractor
        sc = sf.SlicedContractor([t.data for t in tensor])

        # Update infos
        _sim_info.update({
            'flops': info.opt_cost,
            'largest_intermediate': info.largest_intermediate,
            'n_slices': cost_sl.nslices,
            'total_flops': cost_sl.total_flops
        })

        # Print some infos
        if verbose:
            print(
                f'Number of slices: 2^{np.log2(float(cost_sl.nslices)):1.2f}',
                file=stderr)
            print(f'Flops+Cuts: 2^{np.log2(float(cost_sl.total_flops)):1.2f}',
                  file=stderr)

        if kwargs['max_n_slices'] and sc.nslices > kwargs['max_n_slices']:
            raise RuntimeError(
                f'Too many slices ({sc.nslices} > {kwargs["max_n_slices"]})')

        # Contract tensor
        _li = np.log2(float(info.largest_intermediate))
        _mli = np.log2(float(kwargs["max_largest_intermediate"]))
        _tensor = sc.gather_slices((sc.contract_slice(
            i, backend=backend
        ) for i in tqdm(
            range(sc.nslices),
            desc=f'Contracting tensor (li=2^{_li:1.0f}, mli=2^{_mli:1.1f})',
            leave=False)))

        # Create map
        _map = ''.join([get_symbol(x) for x in range(len(_inds))])
        _map += '->'
        _map += ''.join([get_symbol(x) for x in _inds])

        # Reorder tensor
        tensor = contract(_map, _tensor)

        # Deprecated
        ## Reshape tensor
        #if _inds:
        #    if _i_inds and _f_inds:
        #        tensor = np.reshape(tensor, (2**len(_i_inds), 2**len(_f_inds)))
        #    else:
        #        tensor = np.reshape(tensor,
        #                            (2**max(len(_i_inds), len(_f_inds)),))

    else:

        # Contract tensor
        tensor = tensor.contract(optimize=optimize, backend=backend)

        if hasattr(tensor, 'inds'):

            # Get input indexes and output indexes
            _i_inds = sort([x for x in tensor.inds if x[-2:] == '_i'],
                           key=lambda x: int(x.split('_')[1]))
            _f_inds = sort([x for x in tensor.inds if x[-2:] == '_f'],
                           key=lambda x: int(x.split('_')[1]))

            # Transpose tensor
            tensor.transpose(*(_i_inds + _f_inds), inplace=True)

            # Deprecated
            ## Reshape tensor
            #if _i_inds and _f_inds:
            #    tensor = np.reshape(tensor, (2**len(_i_inds), 2**len(_f_inds)))
            #else:
            #    tensor = np.reshape(tensor,
            #                        (2**max(len(_i_inds), len(_f_inds)),))

    if kwargs['return_info']:
        return tensor, _sim_info
    else:
        return tensor
Esempio n. 11
0
        gate_opts={'contract': 'swap-split-gate', 'max_bond': 2}  
    else:
        gate_opts={}
    
    # instantiate the `Circuit` object that 
    # constructs the initial tensor network:
    return qtn.Circuit.from_qasm_file(file, gate_opts=gate_opts)

circ = load_circuit(depth=10)
psi_f = qtn.MPS_computational_state('0' * (circ.N))
tn = circ.psi & psi_f
output_inds = []


# inplace full simplify and cast to single precision
tn.full_simplify_(output_inds=output_inds)
tn.astype_('complex64')

opt = ctg.HyperOptimizer(
   # methods=['kahypar', 'greedy', 'walktrap'],
    methods = ['greedy','kahypar'],
    max_repeats=128,
    progbar=True,
    minimize='flops',
    score_compression=0.5,  # deliberately make the optimizer try many methods 
)

info = tn.contract(all, optimize=opt, get='path-info')

print(tn.contract(all, optimize=opt.path, backend='pycompss'))
Esempio n. 12
0
    3,
    n_out=2,
    seed=666,
)
# numpy seeded by    ^^^^ so these are synced
arrays = [np.random.randn(*s) for s in shapes]

# ------- STAGE 1: find contract tree with many independant searches -------- #

print(f"{comm.rank}:: Finding tree SPMD style ...")

# each worker will be running a *separate* contraction optimizer
opt = ctg.HyperOptimizer(
    # make sure we generate at least 1 slice per process
    slicing_opts={'target_slices': comm.size},
    # each worker optimizes its own trials so we don't need a fast sampler
    optlib='optuna',
    # since the load is not balanced, makes more sense to limit by time
    max_repeats=1_000_000,
    max_time=5,
)
# perform the search
tree = opt.search(inputs, output, size_dict)
score = opt.best['score']

# need to get the best tree from across all processes
print(f"{comm.rank}:: Sharing best tree ...")
_, tree = comm.allreduce((score, tree), op=MPI.MIN)

# -------------- STAGE 2: use SPMD mode to perform contraction -------------- #