def makemats(self, index_groups: Sequence[Sequence[int]]) -> Mapping[IndexType, MatrixType]: opm = self.op.makemats(index_groups[1:]) newdict = {} for indices in opm: newindices = tuple(flatten([index_groups[0], indices])) newdict[newindices] = CMat(opm[indices]) return newdict
def feed_indices(self, state: StateType, index_groups: Sequence[Sequence[int]], n: int) -> Tuple[int, object]: # Get indices and make measurement indices = numpy.array(flatten(index_groups), dtype=numpy.int32) probs = state.measure_probabilities(indices, top_k=self.top_k)[:] return 0, probs
def __call__(self, *inputs: Qubit, **kwargs) -> Union[Qubit, Tuple[Qubit, ...]]: # Extract consumed ops in reverse order input_list = list(inputs) ops_and_qubits = [] for opconstructor, consumed_indices in reversed(self.wrapper_funcs): # Remove in reverse order to preserve index values. consumed_qubits = reversed( [input_list.pop(i) for i in reversed(consumed_indices)]) ops_and_qubits.append( (opconstructor, consumed_qubits, consumed_indices)) ops_and_qubits = list(reversed(ops_and_qubits)) # Qubits left in input_list are destined to go to circuit func # Use func to construct the circuit from args with QubitWrapperContext(*flatten(ops_and_qubits)) as context: outputs = self.op(*input_list, **kwargs) if isinstance(outputs, Qubit): outputs = (outputs, ) in_context_qubits = context.put_qubits_in_local_context_order( *outputs) if len(in_context_qubits) == 1: return in_context_qubits[0] else: return in_context_qubits
def feed_indices(self, state: StateType, index_groups: Sequence[Sequence[int]], n: int) -> Tuple[int, Any]: # Get indices and make measurement indices = numpy.array(flatten(index_groups), dtype=numpy.int32) bits, prob = state.measure(indices) # We measure but don't remove any qubits. return 0, (bits, prob)
def feed(self, qbitindex, node): indices = flatten([qbitindex[qubit] for qubit in node.inputs]) if len(indices) > 0: nodestr = repr(node) paren_pos = nodestr.find('(') if paren_pos > 0: nodestr = nodestr[:paren_pos] if nodestr not in PrintFeeder.BLACKLIST: # print node at relevant positions default_line = [self.qubit_line] * self.n self.outputfn((" " * self.linespacing).join(default_line)) for index in indices: default_line[index] = "-" * (2 * self.opwidth + 1) self.outputfn((" " * self.linespacing).join(default_line)) for nodechr in nodestr: default_line = [self.qubit_line] * self.n for index in indices: default_line[index] = "|" + (" " * (self.opwidth - 1)) +\ nodechr + (" " * (self.opwidth - 1)) + "|" self.outputfn((" " * self.linespacing).join(default_line)) max_len = max(len(str(i)) for i in range(len(indices))) index_strs = [] for i in range(len(indices)): index_str = str(i) difflen = max_len - len(index_str) if difflen > 0: index_str = " " * difflen + index_str index_strs.append(index_str) for l in range(max_len): for i, index in enumerate(indices): default_line[index] = "|" + ( " " * (self.opwidth - 1)) + index_strs[i][l] + ( " " * (self.opwidth - 1)) + "|" self.outputfn((" " * self.linespacing).join(default_line)) for index in indices: default_line[index] = "-" * (2 * self.opwidth + 1) self.outputfn((" " * self.linespacing).join(default_line))
def run_graph(frontier: Sequence[PipelineObject], graphnodes: AbstractSet[PipelineObject], graphacc: GraphAccumulator): """ Apply the feed function from graphacc to each node in the graph in the order necessary to run the circuit. :param frontier: top level nodes of graph :param graphnodes: all nodes in graph :param graphacc: graph accumulator class with "feed(qbitindex, node) -> void" function. """ # Condition is that if a node is in the frontier, either all its inputs are # in the feed dict, or it itself is. qbitindex = {} seen = set() n = 0 for qbit in sorted(frontier, key=lambda q: q.qid): qbitindex[qbit] = [i for i in range(n, n + qbit.n)] n += qbit.n while len(frontier) > 0: frontier = list(sorted(frontier, key=lambda q: q.qid)) node, frontier = frontier[0], frontier[1:] graphacc.feed(qbitindex, node) # Manage measurements qbitindex = node.remap_index(qbitindex, n) seen.add(node) # Iterate sink for special cases where "cloning" takes place (i.e. splitting qubits after operation) for nextnode in node.sink: if nextnode in graphnodes and nextnode not in seen and nextnode not in frontier: all_deps = True for prevnode in nextnode.inputs: if prevnode not in seen: all_deps = False break if all_deps: frontier.append(nextnode) qbitindex[nextnode] = nextnode.select_index( flatten(qbitindex[j] for j in nextnode.inputs))
def QFFT(*inputs: Qubit, rev: bool = True) -> Union[Qubit, Tuple[Qubit, ...]]: qarr = flatten([inp.split(range(inp.n)) for inp in inputs]) recQFFT(qarr) if rev: half = int(len(qarr) / 2) for i in range(half): qarr[i], qarr[-1 - i] = Swap(qarr[i], qarr[-1 - i]) if len(inputs) < len(qarr): outputqarr = [] index = 0 for qubit in inputs: outputqarr.append(Qubit(*(qarr[index:index + qubit.n]))) index += qubit.n else: outputqarr = qarr if len(outputqarr) > 1: return tuple(outputqarr) else: return outputqarr[0]
def get_context() -> Sequence[Tuple[OpConstructor, Sequence[Qubit], Optional[Sequence[int]]]]: """Get full set of contexts.""" return flatten(QubitWrapperContext.CONTEXT_STACK)
def makemats(self, index_groups: Sequence[Sequence[int]]) -> Mapping[IndexType, MatrixType]: return {i: (1/numpy.sqrt(2))*numpy.array([[1, 1], [1, -1]]) for i in flatten(index_groups)}
def makemats(self, index_groups: Sequence[Sequence[int]]) -> Mapping[IndexType, MatrixType]: return {i: numpy.array([[1.0, 0.0],[0.0j, -1.0]]) for i in flatten(index_groups)}
def makemats(self, index_groups: Sequence[Sequence[int]]) -> Mapping[IndexType, MatrixType]: return {i: numpy.flip(numpy.eye(2), 0) for i in flatten(index_groups)}
def makemats(self, index_groups: Sequence[Sequence[int]]) -> Mapping[IndexType, MatrixType]: swapn = self.inputs[0].n a_indices = index_groups[0] b_indices = index_groups[1] return {tuple(flatten([a_indices, b_indices])): SwapMat(swapn)}
def makemats(self, index_groups: Sequence[Sequence[int]]) -> Mapping[IndexType, MatrixType]: return {i: numpy.array([[1, 0], [0, self.exponented]]) for i in flatten(index_groups)}
def run(*args: PipelineObject, feed: Mapping[Union[PipelineObject, Tuple[PipelineObject, ...]], InitialState] = None, strict: bool = False, backend_constructor: Callable[ [int, Sequence[Tuple[int, ...]], Sequence[Sequence[complex]]], StateType] = None, statetype: Type = numpy.complex128, **kwargs): """ Runs pipeline using all qubits in *args. Produces an output state based on input. :param args: list of qubits to evaluate :param feed: feed of individual qbit states - qbit/qbits : state :param statetype: type of state data (should be complex numpy value). :param strict: requires all qubits to have states in feed, none implicitly defined as |0> :param backend_constructor :return: state handle, output dictionary """ if feed is None: feed = {} else: # Copy to dictionary, convert non-tuples to tuples feed = {(k if type(k) == tuple else (k, )): feed[k] for k in feed} # Flatten keys for the ones which are qubits all_qubits_in_feed = list(flatten(feed.keys())) if backend_constructor is None: backend_constructor = CythonBackend.make_state # Frontier contains all qubits required for execution # Assume 0 unless in feed frontier, graphnodes = get_deps(*args, feed=feed) for qubit in frontier: if qubit not in all_qubits_in_feed and qubit.default is not None: if type(qubit.default) == int: if 0 < qubit.default < 2**qubit.n: feed[(qubit, )] = numpy.zeros((2**qubit.n, )) feed[(qubit, )][qubit.default] = 1.0 # if it's 0 then not defining it is faster else: feed[(qubit, )] = qubit.default frontier = list(sorted(frontier, key=lambda q: q.qid)) if strict: missing_qubits = [ qubit for qubit in frontier if qubit not in all_qubits_in_feed ] if len(missing_qubits): raise ValueError( "Missing Qubit states for: {}".format(missing_qubits)) qbitindex = {} n = 0 for qbit in frontier: qbitindex[qbit] = [i for i in range(n, n + qbit.n)] n += qbit.n feed_index_groups = [sum((qbitindex[q] for q in qs), []) for qs in feed] feed_states = [feed[qs] for qs in feed] # In case they used the deprecated state argument # Convert state into a single index group for all indices. if 'state' in kwargs and kwargs['state']: feed_index_groups = [tuple(range(n))] feed_states = [kwargs['state']] state = backend_constructor(n, feed_index_groups, feed_states, statetype=statetype) return feed_forward(frontier, state, graphnodes)