def get_all_couplings(qpu_layout: QpuLayout) -> list[Coupling]: """ Given `qpu_layout` of `Qubit`s, return all couplings between nearest neighbors. Parameters ---------- qpu_layout: QpuLayout List of `Qubit`s to use as `QpuLayout`. Returns ------- list[Coupling] List of all possible couplings between nearest neighbor `Qubit`s. Example ------- >>> get_all_couplings(qpu_layout=((0, 0), (0, 1), (1, 0), (1, 1))) [((0, 0), (0, 1)), ((0, 0), (1, 0)), ((0, 1), (1, 1)), ((1, 0), (1, 1))] """ return sort({ tuple(sort(((x1, y1), (x2, y2)))) for x1, y1 in qpu_layout for x2, y2 in qpu_layout if x1 == x2 and abs(y1 - y2) == 1 or y1 == y2 and abs(x1 - x2) == 1 })
def isclose(self, gate: Gate, atol: float = 1e-8) -> bool: """ Determine if the matrix of `gate` is close within an absolute tollerance. If the gates are acting on a different set of qubits, `isclose` will return `False`. Parameters ---------- gate: PowerMatrixGate Gate to compare with. atol: float, optional Absolute tollerance. Returns ------- bool `True` if the two gates are close withing the given absolute tollerance, otherwise `False`. Example ------- >>> g1 = PowerMatrixGate(U = [[1, 2], [3, 4]]) >>> g2 = PowerMatrixGate(U = [[4, 5], [3, 4]]) >>> g1.isclose(g1) True >>> g1.isclose(g2) False >>> g1.on([3]).isclose(g1) False >>> g1.on([3]).isclose(g1.on([3]) True """ if not (self.qubits is None) ^ (gate.qubits is None): # The gates differ if they act on a different set of qubits if self.qubits is not None and sort(self.qubits) != sort( gate.qubits): return False # Get unitaries _U1 = self.matrix(order=self.qubits if self.qubits else None) _U2 = gate.matrix(order=self.qubits if self.qubits else None) # If either matrix does not exist, the two gates differ return np.allclose(_U1, _U2, atol=atol) else: return False
def moments( circuit: iter[{BaseGate, Circuit}]) -> list[list[{BaseGate, Circuit}]]: """ Split circuit in moments. """ from hybridq.gate import TupleGate # Convert iterable to list circuit = list(circuit) # If circuit is empty, return a single empty TupleGate if not circuit: return [TupleGate()] # Get qubits def _get_qubits(x): if isinstance(x, BaseGate): return x.qubits if x.n_qubits else tuple() elif isinstance(x, Circuit): return x.all_qubits() else: raise ValueError(f"'{x}' is not valid.") # Get all used qubits qubits = sort({q for x in circuit for q in _get_qubits(x)}) # Get map of leves level_map = {q: 0 for q in qubits} level = [0] * len(circuit) # Get the right level for each object .. for i, x in enumerate(circuit): # Get qubits _qubits = _get_qubits(x) # If gate is acting on qubits, add gate to the right level if _qubits: # Get max level level[i] = np.max([level_map[q] for q in _get_qubits(x)]) + 1 # Update level_map level_map.update({q: level[i] for q in _get_qubits(x)}) # .. otherwise, simply update all qubits to create a new moment else: level[i] = np.max(level) + 1 level_map = {q: level[i] for q in qubits} # Initialize moments moments = [[] for _ in range(np.max(level))] # Update moments for i, x in enumerate(circuit): moments[level[i] - 1].append(x) # Return moments return list(map(TupleGate, moments))
def qubits(self) -> tuple[any, ...]: from hybridq.utils import sort # If empty, return empty tuple if not len(self): return tuple() # Get all qubits _qubits = tuple(g.qubits if g.provides('qubits') else None for g in self) # If any None is present, return None if any(q is None for q in _qubits): return None # Flatten list and remove duplicates else: return tuple(sort(set(y for x in _qubits for y in x)))
def _simulate_tn_mpi(circuit: Circuit, initial_state: any, final_state: any, optimize: any, backend: any, complex_type: any, tensor_only: bool, verbose: bool, **kwargs): import quimb.tensor as tn import cotengra as ctg # Get MPI _mpi_comm = MPI.COMM_WORLD _mpi_size = _mpi_comm.Get_size() _mpi_rank = _mpi_comm.Get_rank() # Set default parameters kwargs.setdefault('compress', 2) kwargs.setdefault('simplify_tn', 'RC') kwargs.setdefault('max_iterations', 1) kwargs.setdefault('methods', ['kahypar', 'greedy']) kwargs.setdefault('max_time', 120) kwargs.setdefault('max_repeats', 16) kwargs.setdefault('minimize', 'combo') kwargs.setdefault('target_largest_intermediate', 0) kwargs.setdefault('max_largest_intermediate', 2**26) kwargs.setdefault('temperatures', [1.0, 0.1, 0.01]) kwargs.setdefault('parallel', None) kwargs.setdefault('cotengra', {}) kwargs.setdefault('max_n_slices', None) kwargs.setdefault('return_info', False) # Get random leaves_prefix leaves_prefix = ''.join( np.random.choice(list('abcdefghijklmnopqrstuvwxyz'), size=20)) # Initialize info _sim_info = {} # Alias for tn if optimize == 'tn': optimize = 'cotengra' if isinstance(circuit, Circuit): if not kwargs['parallel']: kwargs['parallel'] = 1 else: # If number of threads not provided, just use half of the number of available cpus if isinstance(kwargs['parallel'], bool) and kwargs['parallel'] == True: kwargs['parallel'] = cpu_count() // 2 if optimize is not None and kwargs['parallel'] and kwargs[ 'max_iterations'] == 1: warn("Parallelization for MPI works for multiple iterations only. " "For a better performance, use: 'max_iterations' > 1") # Get number of qubits qubits = circuit.all_qubits() n_qubits = len(qubits) # If initial/final state is None, set to all .'s initial_state = '.' * n_qubits if initial_state is None else initial_state final_state = '.' * n_qubits if final_state is None else final_state # Initial and final states must be valid strings for state, sname in [(initial_state, 'initial_state'), (final_state, 'final_state')]: # Get alphabet from string import ascii_letters # Check if string if not isinstance(state, str): raise ValueError(f"'{sname}' must be a valid string.") # Deprecated error if any(x in 'xX' for x in state): from warnings import warn # Define new DeprecationWarning (to always print the warning # signal) class DeprecationWarning(Warning): pass # Warn the user that '.' is used to represent open qubits warn( "Since '0.6.3', letters in the alphabet are used to " "trace selected qubits (including 'x' and 'X'). " "Instead, '.' is used to represent an open qubit.", DeprecationWarning) # Check only valid symbols are present if set(state).difference('01+-.' + ascii_letters): raise ValueError(f"'{sname}' contains invalid symbols.") # Check number of qubits if len(state) != n_qubits: raise ValueError(f"'{sname}' has the wrong number of qubits " f"(expected {n_qubits}, got {len(state)})") # Check memory if 2**(initial_state.count('.') + final_state.count('.')) > kwargs['max_largest_intermediate']: raise MemoryError("Memory for the given number of open qubits " "exceeds the 'max_largest_intermediate'.") # Compress circuit if kwargs['compress']: if verbose: print( f"Compress circuit (max_n_qubits={kwargs['compress']}): ", end='', file=stderr) _time = time() circuit = utils.compress( circuit, kwargs['compress']['max_n_qubits'] if isinstance( kwargs['compress'], dict) else kwargs['compress'], verbose=verbose, **({ k: v for k, v in kwargs['compress'].items() if k != 'max_n_qubits' } if isinstance(kwargs['compress'], dict) else {})) circuit = Circuit( utils.to_matrix_gate(c, complex_type=complex_type) for c in circuit) if verbose: print(f"Done! ({time()-_time:1.2f}s)", file=stderr) # Get tensor network representation of circuit tensor, tn_qubits_map = utils.to_tn(circuit, return_qubits_map=True, leaves_prefix=leaves_prefix) # Define basic MPS _mps = { '0': np.array([1, 0]), '1': np.array([0, 1]), '+': np.array([1, 1]) / np.sqrt(2), '-': np.array([1, -1]) / np.sqrt(2) } # Attach initial/final state for state, ext in [(initial_state, 'i'), (final_state, 'f')]: for s, q in ((s, q) for s, q in zip(state, qubits) if s in _mps): inds = [f'{leaves_prefix}_{tn_qubits_map[q]}_{ext}'] tensor &= tn.Tensor(_mps[s], inds=inds, tags=inds) # For each unique letter, apply trace for x in set(initial_state + final_state).difference(''.join(_mps) + '.'): # Get indexes inds = [ f'{leaves_prefix}_{tn_qubits_map[q]}_i' for s, q in zip(initial_state, qubits) if s == x ] inds += [ f'{leaves_prefix}_{tn_qubits_map[q]}_f' for s, q in zip(final_state, qubits) if s == x ] # Apply trace tensor &= tn.Tensor(np.reshape([1] + [0] * (2**len(inds) - 2) + [1], (2, ) * len(inds)), inds=inds) # Simplify if requested if kwargs['simplify_tn']: tensor.full_simplify_(kwargs['simplify_tn']).astype_(complex_type) else: # Otherwise, just convert to the given complex_type tensor.astype_(complex_type) # Get contraction from heuristic if optimize == 'cotengra' and kwargs['max_iterations'] > 0: # Set cotengra parameters def cotengra_params(): # Get HyperOptimizer q = ctg.HyperOptimizer(methods=kwargs['methods'], max_time=kwargs['max_time'], max_repeats=kwargs['max_repeats'], minimize=kwargs['minimize'], progbar=False, parallel=False, **kwargs['cotengra']) # For some optlib, HyperOptimizer._retrieve_params is not # pickeable. Let's fix the problem by hand. q._retrieve_params = __FunctionWrap(q._retrieve_params) # Return HyperOptimizer return q # Get target size tli = kwargs['target_largest_intermediate'] with Pool(kwargs['parallel']) as pool: # Sumbit jobs _opts = [ cotengra_params() for _ in range(kwargs['max_iterations']) ] _map = [ pool.apply_async(tensor.contract, (all, ), dict(optimize=_opt, get='path-info')) for _opt in _opts ] with tqdm(total=len(_map), disable=not verbose, desc='Collecting contractions') as pbar: _old_completed = 0 while 1: # Count number of completed _completed = 0 for _w in _map: _completed += _w.ready() if _w.ready() and not _w.successful(): _w.get() # Update pbar pbar.update(_completed - _old_completed) _old_completed = _completed if _completed == len(_map): break # Wait sleep(1) # Collect results _infos = [_w.get() for _w in _map] if kwargs['minimize'] == 'size': opt, info = sort( zip(_opts, _infos), key=lambda w: (w[1].largest_intermediate, w[0].best['flops']))[0] else: opt, info = sort( zip(_opts, _infos), key=lambda w: (w[0].best['flops'], w[1].largest_intermediate))[0] if optimize == 'cotengra': # Gather best contractions _cost = _mpi_comm.gather( (info.largest_intermediate, info.opt_cost, _mpi_rank), root=0) if _mpi_rank == 0: if kwargs['minimize'] == 'size': _best_rank = sort(_cost, key=lambda x: (x[0], x[1]))[0][-1] else: _best_rank = sort(_cost, key=lambda x: (x[1], x[0]))[0][-1] else: _best_rank = None _best_rank = _mpi_comm.bcast(_best_rank, root=0) if hasattr(opt, '_pool'): del (opt._pool) # Distribute opt/info tensor, info, opt = _mpi_comm.bcast((tensor, info, opt), root=_best_rank) # Just return tensor if required if tensor_only: if optimize == 'cotengra' and kwargs['max_iterations'] > 0: return tensor, (info, opt) else: return tensor else: # Set tensor tensor = circuit if len(optimize) == 2 and isinstance( optimize[0], PathInfo) and isinstance( optimize[1], ctg.hyper.HyperOptimizer): # Get info and opt from optimize info, opt = optimize # Set optimization optimize = 'cotengra' else: # Get tensor and path tensor = circuit # Print some info if verbose and _mpi_rank == 0: print( f'Largest Intermediate: 2^{np.log2(float(info.largest_intermediate)):1.2f}', file=stderr) print( f'Max Largest Intermediate: 2^{np.log2(float(kwargs["max_largest_intermediate"])):1.2f}', file=stderr) print(f'Flops: 2^{np.log2(float(info.opt_cost)):1.2f}', file=stderr) if optimize == 'cotengra': if _mpi_rank == 0: # Get indexes _inds = tensor.outer_inds() # Get input indexes and output indexes _i_inds = sort([x for x in _inds if x[-2:] == '_i'], key=lambda x: int(x.split('_')[1])) _f_inds = sort([x for x in _inds if x[-2:] == '_f'], key=lambda x: int(x.split('_')[1])) # Get order _inds = [_inds.index(x) for x in _i_inds + _f_inds] # Get slice finder sf = ctg.SliceFinder( info, target_size=kwargs['max_largest_intermediate'], allow_outer=False) # Find slices with tqdm(kwargs['temperatures'], disable=not verbose, leave=False) as pbar: for _temp in pbar: pbar.set_description(f'Find slices (T={_temp})') ix_sl, cost_sl = sf.search(temperature=_temp) # Get slice contractor sc = sf.SlicedContractor([t.data for t in tensor]) # Make sure that no open qubits are sliced assert (not { ix: i for i, ix in enumerate(sc.output) if ix in sc.sliced }) # Print some infos if verbose: print( f'Number of slices: 2^{np.log2(float(cost_sl.nslices)):1.2f}', file=stderr) print( f'Flops+Cuts: 2^{np.log2(float(cost_sl.total_flops)):1.2f}', file=stderr) # Update infos _sim_info.update({ 'flops': info.opt_cost, 'largest_intermediate': info.largest_intermediate, 'n_slices': cost_sl.nslices, 'total_flops': cost_sl.total_flops }) # Get slices slices = list(range(cost_sl.nslices + 1)) + [None] * ( _mpi_size - cost_sl.nslices) if cost_sl.nslices < _mpi_size else [ cost_sl.nslices / _mpi_size * i for i in range(_mpi_size) ] + [cost_sl.nslices] if not np.alltrue( [int(x) == x for x in slices if x is not None]) or not np.alltrue([ slices[i] < slices[i + 1] for i in range(_mpi_size) if slices[i] is not None and slices[i + 1] is not None ]): raise RuntimeError('Something went wrong') # Convert all to integers slices = [int(x) if x is not None else None for x in slices] else: sc = slices = None # Distribute slicer and slices sc, slices = _mpi_comm.bcast((sc, slices), root=0) _n_slices = max(x for x in slices if x) if kwargs['max_n_slices'] and _n_slices > kwargs['max_n_slices']: raise RuntimeError( f'Too many slices ({_n_slices} > {kwargs["max_n_slices"]})') # Contract slices _tensor = None if slices[_mpi_rank] is not None and slices[_mpi_rank + 1] is not None: for i in tqdm(range(slices[_mpi_rank], slices[_mpi_rank + 1]), desc='Contracting slices', disable=not verbose, leave=False): if _tensor is None: _tensor = np.copy(sc.contract_slice(i, backend=backend)) else: _tensor += sc.contract_slice(i, backend=backend) # Gather tensors if _mpi_rank != 0: _mpi_comm.send(_tensor, dest=0, tag=11) elif _mpi_rank == 0: for i in tqdm(range(1, _mpi_size), desc='Collecting tensors', disable=not verbose): _p_tensor = _mpi_comm.recv(source=i, tag=11) if _p_tensor is not None: _tensor += _p_tensor if _mpi_rank == 0: # Create map _map = ''.join([get_symbol(x) for x in range(len(_inds))]) _map += '->' _map += ''.join([get_symbol(x) for x in _inds]) # Reorder tensor tensor = contract(_map, _tensor) # Deprecated ## Reshape tensor #if _inds: # if _i_inds and _f_inds: # tensor = np.reshape(tensor, # (2**len(_i_inds), 2**len(_f_inds))) # else: # tensor = np.reshape(tensor, # (2**max(len(_i_inds), len(_f_inds)),)) else: tensor = None else: if _mpi_rank == 0: # Contract tensor tensor = tensor.contract(optimize=optimize, backend=backend) if hasattr(tensor, 'inds'): # Get input indexes and output indexes _i_inds = sort([x for x in tensor.inds if x[-2:] == '_i'], key=lambda x: int(x.split('_')[1])) _f_inds = sort([x for x in tensor.inds if x[-2:] == '_f'], key=lambda x: int(x.split('_')[1])) # Transpose tensor tensor.transpose(*(_i_inds + _f_inds), inplace=True) # Deprecated ## Reshape tensor #if _i_inds and _f_inds: # tensor = np.reshape(tensor, # (2**len(_i_inds), 2**len(_f_inds))) #else: # tensor = np.reshape(tensor, # (2**max(len(_i_inds), len(_f_inds)),)) else: tensor = None if kwargs['return_info']: return tensor, _sim_info else: return tensor
def commutes_with(self, gate: PowerMatrixGate, atol: float = 1e-7) -> bool: """ Return `True` if the calling gate commutes with `gate`. Parameters ---------- gate: PowerMatrixGate Gate to check commutation with. atol: float Absolute tollerance. Returns ------- bool `True` if the calling gate commutes with `gate`, otherwise `False`. """ from string import ascii_lowercase as alc, ascii_uppercase as auc # Check both gates have qubits if self.qubits is None or gate.qubits is None: raise ValueError("Cannot check commutation between virtual gates.") # Get shared qubits shared_qubits = sort(set(self.qubits).intersection(gate.qubits)) # If no qubits are shared, the gates definitely commute if not shared_qubits: return True # Rename g1, g2 = self, gate # Get all qubits q12 = tuple(sort(set(g1.qubits + g2.qubits))) # Get number of qubits n12 = len(q12) # Get unitaries U1 = np.reshape(g1.matrix(), (2, ) * 2 * g1.n_qubits) U2 = np.reshape(g2.matrix(), (2, ) * 2 * g2.n_qubits) # Define how to multiply matrices def _mul(w1, w2): # Get qubits and unitaries q1, U1 = w1 q2, U2 = w2 # Get number of qubits n1 = len(q1) n2 = len(q2) # Construct map _map = '' _map += ''.join(alc[q12.index(q)] for q in q1) _map += ''.join(auc[-shared_qubits.index(q) - 1 if q in shared_qubits else q12.index(q)] for q in q1) _map += ',' _map += ''.join(auc[-shared_qubits.index(q) - 1] if q in shared_qubits else alc[q12.index(q)] for q in q2) _map += ''.join(auc[q12.index(q)] for q in q2) _map += '->' _map += ''.join(alc[x] for x in range(n12)) _map += ''.join(auc[x] for x in range(n12)) # Multiply map return np.einsum(_map, U1, U2) # Compute products P1 = _mul((g1.qubits, U1), (g2.qubits, U2)) P2 = _mul((g2.qubits, U2), (g1.qubits, U1)) # Check if the same return np.allclose(P1, P2, atol=1e-5)
def matrix(self, order: iter[any] = None) -> np.ndarray: """ Return matrix representing `MatrixPowerGate`. If `order` is provided, the given order of qubits is used to output its matrix. Parameters ---------- order: iter[any] Order of qubits used to output the matrix. Returns ------- array_like Matrix representing `MatrixPowerGate`. Example ------- >>> g = PowerMatrixGate(qubits=[0, 1], U=[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]) array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]) The order of qubits is `[1, 0]`. On the contrary: >>> g.on().matrix(order=[1, 0]) array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]) outputs a matrix with the qubits order being if `[0, 1]`. """ # Get Unitary _U = np.asarray(self.Matrix) if order is not None: # Covert order to list order = list(order) # Order is allowed only if gate.qubits are specified if self.qubits is None or sort(order) != sort(self.qubits): raise ValueError( "'order' is not a permutation of 'gate.qubits'.") # Reorder matrix in case qubits are out-of-order if order and order != self.qubits: # Transpose _U = np.reshape( np.transpose( np.reshape(_U, (2, ) * (2 * self.n_qubits)), [self.qubits.index(q) for q in order] + [self.n_qubits + self.qubits.index(q) for q in order]), (2**self.n_qubits, 2**self.n_qubits)) # Apply power if self.power != 1: _U = powm(_U, float(self.power)) # Apply conjugation if self.__conj: _U = _U.conj() # Apply transposition if self.__T: _U = _U.T # Return matrix return _U
def _unique_flatten(l): from hybridq.utils import sort return sort(set(y for x in l for y in x))
def pad(gate: Gate, qubits: iter[any], order: iter[any] = None, return_matrix_only: bool = False) -> {MatrixGate, np.ndarray}: """ Pad `gate` to act on `qubits`. More precisely, if `gate` is acting on a subset of `qubits`, extend `gate` with identities to act on all `qubits`. Parameters ---------- gate: Gate The gate to pad. qubits: iter[any] Qubits used to pad `gate`. If `gate.qubits` is not a subset of `qubits`, raise an error. order: iter[any], optional If provided, reorder qubits in the final gate accordingly to `qubits`. return_matrix_only: bool, optional If `True`, the matrix representing the state is returned instead of `MatrixGate` (default: `False`). Returns ------- MatrixGate The padded gate acting on `qubits`. """ from hybridq.gate import MatrixGate from hybridq.utils import sort # Convert qubits to tuple qubits = tuple(qubits) # Convert order to tuple if provided order = None if order is None else tuple(order) # Check that order is a permutation of qubits if order and sort(qubits) != sort(order): raise ValueError("'order' must be a permutation of 'qubits'") # 'gate' must have qubits and it must be a subset of 'qubits' if not gate.provides('qubits') or set(gate.qubits).difference(qubits): raise ValueError("'gate' must provide qubits and those " "qubits must be a subset of 'qubits'.") # Get matrix M = gate.matrix() # Pad matrix with identity if gate.n_qubits != len(qubits): M = np.kron(M, np.eye(2**(len(qubits) - gate.n_qubits))) # Get new qubits qubits = gate.qubits + tuple(set(qubits).difference(gate.qubits)) # Reorder if required if order and order != qubits: # Get new matrix M = MatrixGate(M, qubits=qubits).matrix(order=order) # Set new qubits qubits = order # Return gate return M if return_matrix_only else MatrixGate( M, qubits=qubits, tags=gate.tags if gate.provides('tags') else {})
def _simulate_tn(circuit: any, initial_state: any, final_state: any, optimize: any, backend: any, complex_type: any, tensor_only: bool, verbose: bool, **kwargs): import quimb.tensor as tn import cotengra as ctg # Get random leaves_prefix leaves_prefix = ''.join( np.random.choice(list('abcdefghijklmnopqrstuvwxyz'), size=20)) # Initialize info _sim_info = {} # Alias for tn if optimize == 'tn': optimize = 'cotengra' if isinstance(circuit, Circuit): # Get number of qubits qubits = circuit.all_qubits() n_qubits = len(qubits) # If initial/final state is None, set to all .'s initial_state = '.' * n_qubits if initial_state is None else initial_state final_state = '.' * n_qubits if final_state is None else final_state # Initial and final states must be valid strings for state, sname in [(initial_state, 'initial_state'), (final_state, 'final_state')]: # Get alphabet from string import ascii_letters # Check if string if not isinstance(state, str): raise ValueError(f"'{sname}' must be a valid string.") # Deprecated error if any(x in 'xX' for x in state): from hybridq.utils import DeprecationWarning from warnings import warn # Warn the user that '.' is used to represent open qubits warn( "Since '0.6.3', letters in the alphabet are used to " "trace selected qubits (including 'x' and 'X'). " "Instead, '.' is used to represent an open qubit.", DeprecationWarning) # Check only valid symbols are present if set(state).difference('01+-.' + ascii_letters): raise ValueError(f"'{sname}' contains invalid symbols.") # Check number of qubits if len(state) != n_qubits: raise ValueError(f"'{sname}' has the wrong number of qubits " f"(expected {n_qubits}, got {len(state)})") # Check memory if 2**(initial_state.count('.') + final_state.count('.')) > kwargs['max_largest_intermediate']: raise MemoryError("Memory for the given number of open qubits " "exceeds the 'max_largest_intermediate'.") # Compress circuit if kwargs['compress']: if verbose: print( f"Compress circuit (max_n_qubits={kwargs['compress']}): ", end='', file=stderr) _time = time() circuit = utils.compress( circuit, kwargs['compress']['max_n_qubits'] if isinstance( kwargs['compress'], dict) else kwargs['compress'], verbose=verbose, **({ k: v for k, v in kwargs['compress'].items() if k != 'max_n_qubits' } if isinstance(kwargs['compress'], dict) else {})) circuit = Circuit( utils.to_matrix_gate(c, complex_type=complex_type) for c in circuit) if verbose: print(f"Done! ({time()-_time:1.2f}s)", file=stderr) # Get tensor network representation of circuit tensor, tn_qubits_map = utils.to_tn(circuit, return_qubits_map=True, leaves_prefix=leaves_prefix) # Define basic MPS _mps = { '0': np.array([1, 0]), '1': np.array([0, 1]), '+': np.array([1, 1]) / np.sqrt(2), '-': np.array([1, -1]) / np.sqrt(2) } # Attach initial/final state for state, ext in [(initial_state, 'i'), (final_state, 'f')]: for s, q in ((s, q) for s, q in zip(state, qubits) if s in _mps): inds = [f'{leaves_prefix}_{tn_qubits_map[q]}_{ext}'] tensor &= tn.Tensor(_mps[s], inds=inds, tags=inds) # For each unique letter, apply trace for x in set(initial_state + final_state).difference(''.join(_mps) + '.'): # Get indexes inds = [ f'{leaves_prefix}_{tn_qubits_map[q]}_i' for s, q in zip(initial_state, qubits) if s == x ] inds += [ f'{leaves_prefix}_{tn_qubits_map[q]}_f' for s, q in zip(final_state, qubits) if s == x ] # Apply trace tensor &= tn.Tensor(np.reshape([1] + [0] * (2**len(inds) - 2) + [1], (2, ) * len(inds)), inds=inds) # Simplify if requested if kwargs['simplify_tn']: tensor.full_simplify_(kwargs['simplify_tn']).astype_(complex_type) else: # Otherwise, just convert to the given complex_type tensor.astype_(complex_type) # Get contraction from heuristic if optimize == 'cotengra' and kwargs['max_iterations'] > 0: # Create local client if MPI has been detected (not compatible with Dask at the moment) if _mpi_env and kwargs['parallel']: from distributed import Client, LocalCluster _client = Client(LocalCluster(processes=False)) else: _client = None # Set cotengra parameters cotengra_params = lambda: ctg.HyperOptimizer( methods=kwargs['methods'], max_time=kwargs['max_time'], max_repeats=kwargs['max_repeats'], minimize=kwargs['minimize'], progbar=verbose, parallel=kwargs['parallel'], **kwargs['cotengra']) # Get optimized path opt = cotengra_params() info = tensor.contract(all, optimize=opt, get='path-info') # Get target size tli = kwargs['target_largest_intermediate'] # Repeat for the requested number of iterations for _ in range(1, kwargs['max_iterations']): # Break if largest intermediate is equal or smaller than target if info.largest_intermediate <= tli: break # Otherwise, restart _opt = cotengra_params() _info = tensor.contract(all, optimize=_opt, get='path-info') # Store the best if kwargs['minimize'] == 'size': if _info.largest_intermediate < info.largest_intermediate or ( _info.largest_intermediate == info.largest_intermediate and _opt.best['flops'] < opt.best['flops']): info = _info opt = _opt else: if _opt.best['flops'] < opt.best['flops'] or ( _opt.best['flops'] == opt.best['flops'] and _info.largest_intermediate < info.largest_intermediate): info = _info opt = _opt # Close client if exists if _client: _client.shutdown() _client.close() # Just return tensor if required if tensor_only: if optimize == 'cotengra' and kwargs['max_iterations'] > 0: return tensor, (info, opt) else: return tensor else: # Set tensor tensor = circuit if len(optimize) == 2 and isinstance( optimize[0], PathInfo) and isinstance( optimize[1], ctg.hyper.HyperOptimizer): # Get info and opt from optimize info, opt = optimize # Set optimization optimize = 'cotengra' else: # Get tensor and path tensor = circuit # Print some info if verbose: print( f'Largest Intermediate: 2^{np.log2(float(info.largest_intermediate)):1.2f}', file=stderr) print( f'Max Largest Intermediate: 2^{np.log2(float(kwargs["max_largest_intermediate"])):1.2f}', file=stderr) print(f'Flops: 2^{np.log2(float(info.opt_cost)):1.2f}', file=stderr) if optimize == 'cotengra': # Get indexes _inds = tensor.outer_inds() # Get input indexes and output indexes _i_inds = sort([x for x in _inds if x[-2:] == '_i'], key=lambda x: int(x.split('_')[1])) _f_inds = sort([x for x in _inds if x[-2:] == '_f'], key=lambda x: int(x.split('_')[1])) # Get order _inds = [_inds.index(x) for x in _i_inds + _f_inds] # Get slice finder sf = ctg.SliceFinder(info, target_size=kwargs['max_largest_intermediate']) # Find slices with tqdm(kwargs['temperatures'], disable=not verbose, leave=False) as pbar: for _temp in pbar: pbar.set_description(f'Find slices (T={_temp})') ix_sl, cost_sl = sf.search(temperature=_temp) # Get slice contractor sc = sf.SlicedContractor([t.data for t in tensor]) # Update infos _sim_info.update({ 'flops': info.opt_cost, 'largest_intermediate': info.largest_intermediate, 'n_slices': cost_sl.nslices, 'total_flops': cost_sl.total_flops }) # Print some infos if verbose: print( f'Number of slices: 2^{np.log2(float(cost_sl.nslices)):1.2f}', file=stderr) print(f'Flops+Cuts: 2^{np.log2(float(cost_sl.total_flops)):1.2f}', file=stderr) if kwargs['max_n_slices'] and sc.nslices > kwargs['max_n_slices']: raise RuntimeError( f'Too many slices ({sc.nslices} > {kwargs["max_n_slices"]})') # Contract tensor _li = np.log2(float(info.largest_intermediate)) _mli = np.log2(float(kwargs["max_largest_intermediate"])) _tensor = sc.gather_slices((sc.contract_slice( i, backend=backend ) for i in tqdm( range(sc.nslices), desc=f'Contracting tensor (li=2^{_li:1.0f}, mli=2^{_mli:1.1f})', leave=False))) # Create map _map = ''.join([get_symbol(x) for x in range(len(_inds))]) _map += '->' _map += ''.join([get_symbol(x) for x in _inds]) # Reorder tensor tensor = contract(_map, _tensor) # Deprecated ## Reshape tensor #if _inds: # if _i_inds and _f_inds: # tensor = np.reshape(tensor, (2**len(_i_inds), 2**len(_f_inds))) # else: # tensor = np.reshape(tensor, # (2**max(len(_i_inds), len(_f_inds)),)) else: # Contract tensor tensor = tensor.contract(optimize=optimize, backend=backend) if hasattr(tensor, 'inds'): # Get input indexes and output indexes _i_inds = sort([x for x in tensor.inds if x[-2:] == '_i'], key=lambda x: int(x.split('_')[1])) _f_inds = sort([x for x in tensor.inds if x[-2:] == '_f'], key=lambda x: int(x.split('_')[1])) # Transpose tensor tensor.transpose(*(_i_inds + _f_inds), inplace=True) # Deprecated ## Reshape tensor #if _i_inds and _f_inds: # tensor = np.reshape(tensor, (2**len(_i_inds), 2**len(_f_inds))) #else: # tensor = np.reshape(tensor, # (2**max(len(_i_inds), len(_f_inds)),)) if kwargs['return_info']: return tensor, _sim_info else: return tensor
def _simulate_evolution(circuit: iter[Gate], initial_state: any, final_state: any, optimize: any, backend: any, complex_type: any, verbose: bool, **kwargs): """ Perform simulation of the circuit by using the direct evolution of the quantum state. """ if _detect_mpi: warn("Detected MPI but optimize='evolution' does not support MPI.") # Initialize info _sim_info = {} # Convert iterable to circuit circuit = Circuit(circuit) # Get number of qubits qubits = circuit.all_qubits() n_qubits = len(qubits) # Check if core libraries have been loaded properly if any(not x for x in [_swap_core, _dot_core, _to_complex_core, _log2_pack_size]): warn("Cannot find C++ HybridQ core. " "Falling back to optimize='evolution-einsum' instead.") optimize = 'einsum' # If the system is too small, fallback to einsum if optimize == 'hybridq' and n_qubits <= max(10, _log2_pack_size): warn("The system is too small to use optimize='evolution-hybridq'. " "Falling back to optimize='evolution-einsum'") optimize = 'einsum' if verbose: print(f'# Optimization: {optimize}', file=stderr) # Check memory if 2**n_qubits > kwargs['max_largest_intermediate']: raise MemoryError( "Memory for the given number of qubits exceeds the 'max_largest_intermediate'." ) # If final_state is specified, warn user if final_state is not None: warn( f"'final_state' cannot be specified in optimize='{optimize}'. Ignoring 'final_state'." ) # Initial state must be provided if initial_state is None: raise ValueError( "'initial_state' must be specified for optimize='evolution'.") # Convert complex_type to np.dtype complex_type = np.dtype(complex_type) # Print info if verbose: print(f"Compress circuit (max_n_qubits={kwargs['compress']}): ", end='', file=stderr) _time = time() # Compress circuit circuit = utils.compress( circuit, kwargs['compress']['max_n_qubits'] if isinstance( kwargs['compress'], dict) else kwargs['compress'], verbose=verbose, skip_compression=[pr.FunctionalGate], **({ k: v for k, v in kwargs['compress'].items() if k != 'max_n_qubits' } if isinstance(kwargs['compress'], dict) else {})) # Check that FunctionalGate's are not compressed assert (all(not isinstance(g, pr.FunctionalGate) if len(x) > 1 else True for x in circuit for g in x)) # Compress everything which is not a FunctionalGate circuit = Circuit(g for c in (c if any( isinstance(g, pr.FunctionalGate) for g in c) else [utils.to_matrix_gate(c, complex_type=complex_type)] for c in circuit) for g in c) # Get state initial_state = prepare_state(initial_state, complex_type=complex_type) if isinstance( initial_state, str) else initial_state if verbose: print(f"Done! ({time()-_time:1.2f}s)", file=stderr) if optimize == 'hybridq': if complex_type not in ['complex64', 'complex128']: warn( "optimize=evolution-hybridq only support ['complex64', 'complex128']. Using 'complex64'." ) complex_type = np.dtype('complex64') # Get float_type float_type = np.real(np.array(1, dtype=complex_type)).dtype # Get C float_type c_float_type = { np.dtype('float32'): ctypes.c_float, np.dtype('float64'): ctypes.c_double }[float_type] # Load libraries _apply_U = _dot_core[float_type] # Get swap core _swap = _swap_core[float_type] # Get to_complex core _to_complex = _to_complex_core[complex_type] # Get states _psi = aligned.empty(shape=(2, ) + initial_state.shape, dtype=float_type, order='C', alignment=32) # Split in real and imaginary part _psi_re = _psi[0] _psi_im = _psi[1] # Check alignment assert (_psi_re.ctypes.data % 32 == 0) assert (_psi_im.ctypes.data % 32 == 0) # Get C-pointers _psi_re_ptr = _psi_re.ctypes.data_as(ctypes.POINTER(c_float_type)) _psi_im_ptr = _psi_im.ctypes.data_as(ctypes.POINTER(c_float_type)) # Initialize np.copyto(_psi_re, np.real(initial_state)) np.copyto(_psi_im, np.imag(initial_state)) # Create index maps _map = {q: n_qubits - x - 1 for x, q in enumerate(qubits)} _inv_map = [q for q, _ in sort(_map.items(), key=lambda x: x[1])] # Set largest swap_size _max_swap_size = 0 # Start clock _ini_time = time() # Apply all gates for gate in tqdm(circuit, disable=not verbose): # FunctionalGate if isinstance(gate, pr.FunctionalGate): # Get order order = tuple( q for q, _ in sorted(_map.items(), key=lambda x: x[1])[::-1]) # Apply gate to state new_psi, new_order = gate.apply(psi=_psi, order=order) # Copy back if needed if new_psi is not _psi: # Align if needed _psi = aligned.asarray(new_psi, order='C', alignment=32, dtype=_psi.dtype) # Redefine real and imaginary part _psi_re = _psi[0] _psi_im = _psi[1] # Get C-pointers _psi_re_ptr = _psi_re.ctypes.data_as( ctypes.POINTER(c_float_type)) _psi_im_ptr = _psi_im.ctypes.data_as( ctypes.POINTER(c_float_type)) # This can be eventually fixed ... if any(x != y for x, y in zip(order, new_order)): raise RuntimeError("'order' has changed.") elif gate.provides(['qubits', 'matrix']): # Check if any qubits is withing the pack_size if any(q in _inv_map[:_log2_pack_size] for q in gate.qubits): #@@@ Alternative way to always use the smallest swap size #@@@ #@@@ # Get positions #@@@ _pos = np.fromiter((_map[q] for q in gate.qubits), #@@@ dtype=int) #@@@ # Get smallest swap size #@@@ _swap_size = 0 if np.all(_pos >= _log2_pack_size) else next( #@@@ k #@@@ for k in range(_log2_pack_size, 2 * #@@@ max(len(_pos), _log2_pack_size) + 1) #@@@ if sum(_pos < k) <= k - _log2_pack_size) #@@@ # Get new order #@@@ _order = [ #@@@ x for x, q in enumerate(_inv_map[:_swap_size]) #@@@ if q not in gate.qubits #@@@ ] #@@@ _order += [ #@@@ x for x, q in enumerate(_inv_map[:_swap_size]) #@@@ if q in gate.qubits #@@@ ] if len(gate.qubits) <= 4: # Get new order _order = [ x for x, q in enumerate(_inv_map[:8]) if q not in gate.qubits ] _order += [ x for x, q in enumerate(_inv_map[:8]) if q in gate.qubits ] else: # Get qubit indexes for gate _gate_idxs = [_inv_map.index(q) for q in gate.qubits] # Get new order _order = [ x for x in range(n_qubits) if x not in _gate_idxs ][:_log2_pack_size] _order += [x for x in _gate_idxs if x < max(_order)] # Get swap size _swap_size = len(_order) # Update max swap size if _swap_size > _max_swap_size: _max_swap_size = _swap_size # Update maps _inv_map[:_swap_size] = [ _inv_map[:_swap_size][x] for x in _order ] _map.update( {q: x for x, q in enumerate(_inv_map[:_swap_size])}) # Apply swap _order = np.array(_order, dtype='uint32') _swap( _psi_re_ptr, _order.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)), n_qubits, len(_order)) _swap( _psi_im_ptr, _order.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)), n_qubits, len(_order)) # Get positions _pos = np.array([_map[q] for q in reversed(gate.qubits)], dtype='uint32') # Get matrix _U = np.asarray(gate.matrix(), dtype=complex_type, order='C') # Apply matrix if _apply_U( _psi_re_ptr, _psi_im_ptr, _U.ctypes.data_as(ctypes.POINTER(c_float_type)), _pos.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)), n_qubits, len(_pos)): raise RuntimeError('something went wrong') else: raise RuntimeError(f"'{gate}' not supported") # Check maps are still consistent assert (all(_inv_map[_map[q]] == q for q in _map)) # Swap back to the correct order _order = np.array([_inv_map.index(q) for q in reversed(qubits)][:_max_swap_size], dtype='uint32') _swap(_psi_re_ptr, _order.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)), n_qubits, len(_order)) _swap(_psi_im_ptr, _order.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)), n_qubits, len(_order)) # Stop clock _end_time = time() # Copy the results if kwargs['return_numpy_array']: _complex_psi = np.empty(_psi.shape[1:], dtype=complex_type) _to_complex( _psi_re_ptr, _psi_im_ptr, _complex_psi.ctypes.data_as(ctypes.POINTER(c_float_type)), 2**n_qubits) _psi = _complex_psi # Update info _sim_info['runtime (s)'] = _end_time - _ini_time elif optimize.split('-')[0] == 'einsum': optimize = '-'.join(optimize.split('-')[1:]) if not optimize: optimize = 'auto' # Split circuits to separate FunctionalGate's circuit = utils.compress( circuit, max_n_qubits=len(qubits), skip_compression=[pr.FunctionalGate], **({ k: v for k, v in kwargs['compress'].items() if k != 'max_n_qubits' } if isinstance(kwargs['compress'], dict) else {})) # Check that FunctionalGate's are not compressed assert (all( not isinstance(g, pr.FunctionalGate) if len(x) > 1 else True for x in circuit for g in x)) # Prepare initial_state _psi = initial_state # Initialize time _ini_time = time() for circuit in circuit: # Check assert (all(not isinstance(g, pr.FunctionalGate) for g in circuit) or len(circuit) == 1) # Apply gate if functional if len(circuit) == 1 and isinstance(circuit[0], pr.FunctionalGate): # Apply gate to state _psi, qubits = circuit[0].apply(psi=_psi, order=qubits) else: # Get gates and corresponding qubits _qubits, _gates = zip( *((c.qubits, np.reshape(c.matrix().astype(complex_type), (2, ) * (2 * len(c.qubits)))) for c in circuit)) # Initialize map _map = {q: get_symbol(x) for x, q in enumerate(qubits)} _count = n_qubits _path = ''.join((_map[q] for q in qubits)) # Generate map for _qs in _qubits: # Initialize local paths _path_in = _path_out = '' # Add incoming legs for _q in _qs: _path_in += _map[_q] # Add outcoming legs for _q in _qs: _map[_q] = get_symbol(_count) _count += 1 _path_out += _map[_q] # Update path _path = _path_out + _path_in + ',' + _path # Make sure that qubits order is preserved _path += '->' + ''.join([_map[q] for q in qubits]) # Contracts _psi = contract(_path, *reversed(_gates), _psi, backend=backend, optimize=optimize) # Block JAX until result is ready (for a more precise runtime) if backend == 'jax' and kwargs['block_until_ready']: _psi.block_until_ready() # Stop time _end_time = time() # Update info _sim_info['runtime (s)'] = _end_time - _ini_time else: raise ValueError(f"optimize='{optimize}' not implemented.") if verbose: print(f'# Runtime (s): {_sim_info["runtime (s)"]:1.2f}', file=stderr) # Return state if kwargs['return_info']: return _psi, _sim_info else: return _psi
def to_nx(circuit: iter[BaseGate], add_final_nodes: bool = True, node_tags: dict = None, edge_tags: dict = None, return_qubits_map: bool = False, leaves_prefix: str = 'q') -> networkx.Graph: """ Return graph representation of circuit. `to_nx` is deterministic, so it can be reused elsewhere. Parameters ---------- circuit: iter[BaseGate] Circuit to get graph representation from. add_final_nodes: bool, optional Add final nodes for each qubit to the graph representation of `circuit`. node_tags: dict, optional Add specific tags to nodes. edge_tags: dict, optional Add specific tags to edges. return_qubits_map: bool, optional Return map associated to the Circuit qubits. leaves_prefix: str, optional Specify prefix to use for leaves. Returns ------- networkx.Graph Graph representing `circuit`. Example ------- >>> import networkx as nx >>> >>> # Define circuit >>> circuit = Circuit( >>> [Gate('X', qubits=[0])**1.2, >>> Gate('ISWAP', qubits=[0, 1])**2.3], Gate('H', [1])) >>> >>> # Draw graph >>> nx.draw_planar(utils.to_nx(circuit)) .. image:: ../../images/circuit_nx.png """ import networkx as nx # Initialize if node_tags is None: node_tags = {} if edge_tags is None: edge_tags = {} # Check if node is a leaf def _is_leaf(node): return type(node) == str and node[:len(leaves_prefix)] == leaves_prefix # Convert iterable to Circuit circuit = Circuit(circuit) # Get graph graph = nx.DiGraph() # Get qubits qubits = circuit.all_qubits() # Get qubits_map qubits_map = {q: i for i, q in enumerate(qubits)} # Check that no qubits is 'confused' as leaf if any(_is_leaf(q) for q in qubits): raise ValueError( f"No qubits must start with 'leaves_prefix'={leaves_prefix}.") # Add first layer for q in qubits: graph.add_node(f'{leaves_prefix}_{qubits_map[q]}_i', qubits=[q], **node_tags) # Last leg last_leg = {q: f'{leaves_prefix}_{qubits_map[q]}_i' for q in qubits} # Build network for x, gate in enumerate(circuit): # Add node graph.add_node(x, circuit=Circuit([gate]), qubits=sort(gate.qubits), **node_tags) # Add edges (time directed) graph.add_edges_from([(last_leg[q], x) for q in gate.qubits], **edge_tags) # Update last_leg last_leg.update({q: x for q in gate.qubits}) # Add last indexes if required if add_final_nodes: for q in qubits: graph.add_node(f'{leaves_prefix}_{qubits_map[q]}_f', qubits=[q], **node_tags) graph.add_edges_from([(x, f'{leaves_prefix}_{qubits_map[q]}_f') for q, x in last_leg.items()], **edge_tags) if return_qubits_map: return graph, qubits_map else: return graph
def generate_OTOC(layout: dict[any, list[Coupling]], depth: int, sequence: list[any], one_qb_gates: iter[Gate], two_qb_gates: iter[Gate], butterfly_op: str, ancilla: Qubit, targets: list[Qubit], qubits_order: list[Qubit] = None) -> Circuit: # Get all qubits all_qubits = { q for s in sequence[:min(depth, len(sequence))] for gate in layout[s] for q in gate } # Get order of qubits qubits_order = sort(all_qubits) if qubits_order is None else qubits_order # Get list if single butterfly is provided butterfly_op = list(butterfly_op) # Check order of qubits if sort(all_qubits) != sort(qubits_order): raise ValueError( "'qubits_order' must be a valid permutation of all qubits.") # Check if butterfly op has valid strings if set(butterfly_op).difference(['I', 'X', 'Y', 'Z']): raise ValueError('Only {I, X, Y, Z} are valid butterfly operators') # Check if ancilla/targets are in layout if set(targets).union([ancilla]).difference(all_qubits): raise ValueError(f"Ancilla/Targets must be in layout.") # Check if targets are unique if len(set(targets)) != len(targets): raise ValueError('Targets must be unique.') # Check that ancilla is not in targets if ancilla in targets: raise ValueError('Ancilla must be different from targets') # Check if the number of targets corresponds to the number of butterfly ops if len(targets) != len(butterfly_op) + 1: raise ValueError( f"Number of butterfly operators does not match number " f"of targets (expected {len(targets)-1}, got {len(butterfly_op)})." ) # Check that there is a coupling between the ancilla qubit and the measurement qubit if next((False for s in sequence[:min(depth, len(sequence))] for w in layout[s] if sort(w) == sort([ancilla, targets[0]])), True): raise ValueError( f"No available two-qubit gate between ancilla {ancilla} " f"and qubit {targets[0]}.") # Initialize Circuit circ = Circuit() # Add initial layer of single qubit gates circ.extend([ Gate('SQRT_Y' if q != ancilla else 'SQRT_X', qubits=[q], tags={ 'depth': 0, 'sequence': 'initial' }) for q in sort(all_qubits) ]) # Add CZ between ancilla and first target qubit circ.append( Gate('CZ', [ancilla, targets[0]], tags={ 'depth': 0, 'sequence': 'first_control' })) # Generate U U = generate_U(layout=layout, qubits_order=qubits_order, depth=depth, sequence=sequence, one_qb_gates=one_qb_gates, two_qb_gates=two_qb_gates, exclude_qubits=[ancilla]).update_all_tags({'U': True}) # Add U to circuit circ += U # Add butterfly operator circ.extend([ Gate(_b, qubits=[_t], tags={ 'depth': depth - 1, 'sequence': 'butterfly' }) for _b, _t in zip(butterfly_op, targets[1:]) ]) # Add U* to circuit and update depth circ += Circuit( gate.update_tags({ 'depth': 2 * depth - gate.tags['depth'] - 1, 'U^-1': True }) for gate in U.inv().remove_all_tags(['U'])) # Add CZ between ancilla and first target qubit circ.append( Gate('CZ', [ancilla, targets[0]], tags={ 'depth': 2 * depth - 1, 'sequence': 'second_control' })) return circ
def map(self, order: iter[any] = None): """ Return map. Parameters ---------- order: tuple[any, ...], optional If provided, Kraus' map is ordered accordingly to `order`. """ # Get left and right qubits l_qubits, r_qubits = self.qubits # Get order if order is not None: from hybridq.utils import sort # Get order order = tuple(order) try: # Split l_order, r_order = order # Convert to tuples l_order = tuple(l_order) r_order = tuple(r_order) # Check that qubits are consistent if sort(l_order) != sort(l_qubits) or sort(r_order) != sort( r_qubits): raise RuntimeError("Something went wrong.") # Get order order = (l_order, r_order) except: if l_qubits != r_qubits or sort(order) != sort(l_qubits): raise ValueError( "'order' is not a valid permutation of qubits.") # Get order order = (order, order) # Get Matrix representing the map _U = self.Matrix # Transpose if order is provided if order is not None and (order[0] != self.l_qubits or order[1] != self.r_qubits): from hybridq.gate import MatrixGate # Get gate _g = MatrixGate(_U, qubits=tuple((0, q) for q in l_qubits) + tuple( (1, q) for q in r_qubits), copy=False) # Transpose _U = _g.matrix(order=tuple((0, q) for q in order[0]) + tuple( (1, q) for q in order[1])) # Return map return _U
def _get_state(state, name): # If None, return None if state is None: return None # Check if string elif isinstance(state, str): # If single char, extend to full size state = state * (nl + nr) if len(state) == 1 else state # Check that state has the right number of chars if not (len(state) == (nl + nr) or (l_qubits == r_qubits and len(state) == nl)): raise ValueError( f"'{name}' has the wrong number of qubits.") # Extend if needed state = state + state if len(state) == nl else state # Return return state # Check if Circuit elif isinstance(state, Circuit): from hybridq.circuit.utils import matrix # Check that qubits are consistent if l_qubits != r_qubits or sort(l_qubits) != sort( state.all_qubits()): raise ValueError( f"Qubits in '{name}' are not consistent with 'circuit'." ) # Get matrix U = matrix(state, order=l_qubits) # Swap input/output return np.transpose(np.reshape(U, (2, ) * 2 * nl), list(range(nl, 2 * nl)) + list(range(nl))) else: # Try to convert to numpy array state = np.asarray(state) # At the moment, only 2-dimensional qubits are allowed if set(state.shape) != {2}: raise NotImplementedError( "Only 2-dimensional qubits are allowed.") # Check if the number of dimensions matches if not (state.ndim == (nl + nr) or (l_qubits == r_qubits and state.ndim == nl)): raise ValueError( f"'{name}' has the wrong number of qubits.") # Extend if needed if state.ndim == nl: state = np.reshape(np.kron(state.ravel(), state.ravel()), (2, ) * 2 * nl) # Return state return state