def test_sequence_output_formats(self) -> None: dimension = 2 rx_entry = UnitarySequenceEntry(UnitaryDefinitions.rx(np.pi / 3), [0]) ry_entry = UnitarySequenceEntry(UnitaryDefinitions.ry(np.pi / 3), [0]) sequence = UnitarySequence(dimension, [rx_entry, ry_entry]) assert sequence.get_qasm() assert sequence.get_jaqal() assert sequence.get_display_output()
def test_identity_roots_incorrect(self) -> None: dimension = 2 t = Unitary(dimension, np.array([[1, 0], [0, np.exp(1j * np.pi / 4)]])) t_entry = UnitarySequenceEntry(t, [0]) sequence = UnitarySequence(dimension, np.repeat(t_entry, 7)) assert sequence.get_dimension() == dimension assert sequence.get_length() == 7 assert not sequence.product().close_to(np.identity(dimension))
def _make_random_change(self, compiled_sequence: UnitarySequence) -> None: ''' Chooses and implements a random change to the specified unitary sequence. ''' count_append = np.count_nonzero([ CompilerAction.is_append(action) for action in list(CompilerAction) ]) count_non_append = len(CompilerAction) - count_append p_append = self.append_probability / count_append p_non_append = (1 - self.append_probability) / count_non_append action = np.random.choice( list(CompilerAction), 1, p=[ p_append if CompilerAction.is_append(action) else p_non_append for action in list(CompilerAction) ]) new_sequence_entry = None if CompilerAction.is_append(action): new_sequence_entry = Compiler.create_random_sequence_entry( self.dimension, self.unitary_primitives, self.unitary_primitive_probabilities) if action == CompilerAction.AppendFirst: compiled_sequence.append_first(new_sequence_entry) elif action == CompilerAction.AppendLast: compiled_sequence.append_last(new_sequence_entry) elif action == CompilerAction.RemoveFirst: compiled_sequence.remove_first() elif action == CompilerAction.RemoveLast: compiled_sequence.remove_last()
def test_inverse(self) -> None: dimension = 2 rx_entry = UnitarySequenceEntry(UnitaryDefinitions.rx(np.pi / 3), [0]) ry_entry = UnitarySequenceEntry(UnitaryDefinitions.ry(np.pi / 3), [0]) sequence = UnitarySequence(dimension, [rx_entry, ry_entry]) product = sequence.product() inverse_sequence = sequence.inverse() inverse_product = inverse_sequence.product() assert inverse_product.close_to(product.inverse()) inverse_sequence.sequence_product = None inverse_product = inverse_sequence.product() assert inverse_product.close_to(product.inverse())
def get_qdrift_sequence(self, time: float, num_repetitions: int) -> UnitarySequence: ''' Returns a sequence of unitaries using a QDRIFT decomposition of the time-evolution under this Hamiltonian, as per Campbell, PRL 123, 070503 (2019). The sequence approximately implements the ideal time evolution of the system. :param time: The total time to evolve the system. :type time: float :param num_repetitions: The number of QDRIFT repetitions to use. :type num_repetitions: int :return: [description] :return: A sequence of unitaries implementing the QDRIFT decomposition of the time evolution of the system. :rtype: UnitarySequence ''' sequence_entries = [] coefficients = [term.get_coefficient() for term in self.terms] sum_coefficients = np.sum(coefficients) prob_coefficients = coefficients / sum_coefficients time_per_step = sum_coefficients * time / num_repetitions apply_to = list(range(self.get_qubit_count())) for _ in range(num_repetitions): term = np.random.choice(self.terms, p=prob_coefficients) display_suffix = str(self.terms.index(term)) + ' λ/N' u = UnitaryDefinitions.time_evolution(term.get_normalized_matrix(), time_per_step, display_suffix) entry = UnitarySequenceEntry(u, apply_to) sequence_entries.append(entry) return UnitarySequence(self.get_dimension(), sequence_entries)
def get_ideal_sequence(self, time: float, num_steps: int) -> UnitarySequence: ''' Returns a sequence of identical unitaries, where each unitary is the time-evolution operator under this Hamiltonian for time / num_steps, and the length of the sequence is num_steps. :param time: The total time to evolve the system. :type time: float :param num_steps: The number of steps in which to break up the time evolution of the system. :type num_steps: int :return: A sequence of num_steps identical unitaries implementing the time evolution of the system. :rtype: UnitarySequence ''' sequence_entries = [] time_per_step = time / num_steps u = self.get_time_evolution_operator(time_per_step) apply_to = list(range(self.get_qubit_count())) for _ in range(num_steps): entry = UnitarySequenceEntry(u, apply_to) sequence_entries.append(entry) return UnitarySequence(self.get_dimension(), sequence_entries)
def test_combine(self) -> None: dimension = 2 t = Unitary(dimension, np.array([[1, 0], [0, np.exp(1j * np.pi / 4)]])) t_entry = UnitarySequenceEntry(t, [0]) sequence_1 = UnitarySequence(dimension, np.repeat(t_entry, 3)) sequence_2 = UnitarySequence( dimension, [UnitarySequenceEntry(UnitaryDefinitions.sigmay(), [0])]) combined_sequence = UnitarySequence.combine(sequence_1, sequence_2) assert (combined_sequence.get_length() == sequence_1.get_length() + sequence_2.get_length()) assert combined_sequence.product().close_to( sequence_1.product().left_multiply(sequence_2.product()))
def _compile_layered(self, target_unitary: Unitary, unitary_primitive_counts: Dict[UnitaryPrimitive, int], threshold: float, max_step_count: int) -> CompilerResult: ''' Internal implementation of layered STOQ compilation. See documentation for Compiler.compile_layered() for full details. ''' compiled_sequence = UnitarySequence(self.dimension) cost_by_step = [] while (not compiled_sequence.product().close_to( target_unitary, threshold) and len(cost_by_step) < max_step_count): self.beta = min(self.beta + self.annealing_rate, self.max_beta) product_before_change = compiled_sequence.product() self._make_random_change_layered(compiled_sequence, unitary_primitive_counts) current_cost = target_unitary.distance_from(product_before_change) proposed_cost = target_unitary.distance_from( compiled_sequence.product()) accept = self._accept_proposed_change(target_unitary, current_cost, proposed_cost) if accept: cost_by_step.append(proposed_cost) else: compiled_sequence.undo() cost_by_step.append(current_cost) return compiled_sequence, cost_by_step
def compile_rav_sequence( self, time: float, max_t_step: float, threshold: float, allow_simultaneous_terms: bool = False ) -> CompilerResult: ''' Returns a randomized analog verification (RAV) sequence as per Shaffer et al., arXiv:2003.04500 (2020). The sequence of unitaries is built from terms of this Hamiltonian by first generating a random sequence and then using STOQ to compile the inverse such that the full sequence approximately implements the identity operation. :param time: The total time to evolve the system in the initial randomly-generated sequence. :type time: float :param max_t_step: The maximum time to use for a single Hamiltonian term at each step of the sequence. :type max_t_step: float :param threshold: The overlap with the target unitary at which to stop the STOQ compilation, defaults to None. A value of 1.0 implies an exact compilation. :type threshold: float :param allow_simultaneous_terms: Whether to allow multiple Hamiltonian terms to be executed simultaneously in the resulting sequence, defaults to False. :type allow_simultaneous_terms: bool, optional :return: A sequence of unitaries implementing RAV. :rtype: CompilerResult ''' # Generate a random sequence, mostly forward in time forward_probability = 0.8 unitary_primitives = self._get_unitary_primitives( max_t_step, allow_simultaneous_terms) apply_to = list(range(self.get_qubit_count())) random_sequence = UnitarySequence(self.get_dimension()) total_time = 0.0 while total_time < time: t_step = (max_t_step * np.random.random_sample()) * ( 1 if np.random.random_sample() < forward_probability else -1) u_step = np.random.choice( unitary_primitives).get_unitary().as_unitary([t_step]) random_sequence.append_last(UnitarySequenceEntry(u_step, apply_to)) total_time += np.abs(t_step) # Calculate the product of this sequence and invert it target_unitary = random_sequence.product().inverse() # Call _compile_stoq_sequence_from_unitary to compile a new sequence # implementing the inverse result = self._compile_stoq_sequence_for_target_unitary( target_unitary, max_t_step, threshold, allow_simultaneous_terms) # Return the CompilerResult with the combined sequence result.compiled_sequence = UnitarySequence.combine( random_sequence, result.compiled_sequence) return result
def _make_random_change_layered( self, compiled_sequence: UnitarySequence, unitary_primitive_counts: Dict[UnitaryPrimitive, int]) -> None: ''' Chooses and implements a random change to the specified layered unitary sequence. ''' count_append = np.count_nonzero([ CompilerAction.is_append(action) for action in list(CompilerAction) ]) count_non_append = len(CompilerAction) - count_append p_append = self.append_probability / count_append p_non_append = (1 - self.append_probability) / count_non_append action = np.random.choice( list(CompilerAction), 1, p=[ p_append if CompilerAction.is_append(action) else p_non_append for action in list(CompilerAction) ]) layer_length = sum(unitary_primitive_counts.values()) new_layer = None if CompilerAction.is_append(action): new_layer = Compiler.create_random_layer(self.dimension, unitary_primitive_counts) # only save the undo state for the first modification to the sequence # this is so that a future undo() call will reverse the entire action if action == CompilerAction.AppendFirst: for i, sequence_entry in enumerate(new_layer): compiled_sequence.append_first(sequence_entry, save_undo=(i == 0)) elif action == CompilerAction.AppendLast: for i, sequence_entry in enumerate(new_layer): compiled_sequence.append_last(sequence_entry, save_undo=(i == 0)) elif action == CompilerAction.RemoveFirst: for i in range(layer_length): compiled_sequence.remove_first(save_undo=(i == 0)) elif action == CompilerAction.RemoveLast: for i in range(layer_length): compiled_sequence.remove_last(save_undo=(i == 0))
def get_trotter_sequence( self, time: float, num_trotter_steps: int, randomize: bool = False ) -> UnitarySequence: ''' Returns a sequence of unitaries using a Suzuki-Trotter decomposition of the time-evolution under this Hamiltonian. The sequence approximately implements the ideal time evolution of the system. :param time: The total time to evolve the system. :type time: float :param num_trotter_steps: The number of Trotter steps to use. :type num_trotter_steps: int :param randomize: Whether to randomize the order of Hamiltonian terms in each step of the Suzuki-Trotter decomposition, defaults to False. :type randomize: bool, optional :return: A sequence of unitaries implementing the Suzuki-Trotter decomposition of the time evolution of the system. :rtype: UnitarySequence ''' sequence_entries = [] time_per_step = time / num_trotter_steps apply_to = list(range(self.get_qubit_count())) term_indices = list(range(len(self.terms))) for _ in range(num_trotter_steps): if randomize: random.shuffle(term_indices) for term_index in term_indices: term = self.terms[term_index] u = UnitaryDefinitions.time_evolution( term.get_matrix(), time_per_step, term_index) entry = UnitarySequenceEntry(u, apply_to) sequence_entries.append(entry) return UnitarySequence(self.get_dimension(), sequence_entries)
def test_compile_two_qubits(self) -> None: num_qubits = 2 system_dimension = qubit_dimension**num_qubits unitary_primitives = [ UnitaryPrimitive(UnitaryDefinitions.rx(np.pi / 2)), UnitaryPrimitive(UnitaryDefinitions.cnot()) ] compiler = Compiler(system_dimension, unitary_primitives) # Ensure determinism by setting the random seed np.random.seed(123456) target_unitary = UnitarySequence(system_dimension, [ UnitarySequenceEntry(UnitaryDefinitions.cnot(), [0, 1]), UnitarySequenceEntry(UnitaryDefinitions.rx(np.pi), [0]) ]).product() result = compiler.compile(target_unitary) assert result.compiled_sequence.product().close_to(target_unitary) assert result.compiled_sequence.get_qasm() assert result.compiled_sequence.get_display_output() assert isinstance(result.cost_by_step, list) assert result.total_elapsed_time >= 0.0
def generate_rav_sequence( dimension: int, unitary_primitives: List[UnitaryPrimitive], sequence_length: int, threshold: float, stoq_append_probability: float = 0.5, unitary_primitive_probabilities: Optional[List[float]] = None, max_step_count: int = 10000 ) -> CompilerResult: ''' Implements randomized analog verification (RAV) as per Shaffer et al., arXiv:2003.04500 (2020). :param dimension: The dimension of the state space. For an n-qubit system, dimension should be set to 2**n. :type dimension: int :param unitary_primitives: The unitary primitives to be used for the compilation. :type unitary_primitives: List[UnitaryPrimitive] :param sequence_length: The length of the initial randomly-generated sequence. :type sequence_length: int :param threshold: The overlap with the target unitary at which to stop compilation, defaults to None. A value of 1.0 implies an exact compilation. If None, a threshold of 1.0 is used. :type threshold: float :param stoq_append_probability: Probability of appending a new gate at each step in the compilation, defaults to 0.5. :type stoq_append_probability: float, optional :param unitary_primitive_probabilities: The probability for STOQ to choose each of the primitives specified in unitary_primitives when proposing new gates at each step of the compilation process, defaults to None. If not specified, each unitary primitive is chosen with uniform probability. :type unitary_primitive_probabilities: Optional[List[float]], optional :param max_step_count: Maximum number of steps to perform while attempting to perform the approximate compilation, defaults to 10000. Compilation of the inversion sequence will terminate after this number of steps regardless of whether the threshold has been reached. :type max_step_count: int, optional :return: The result of the compilation, including the RAV sequence. :rtype: CompilerResult ''' assert (isinstance(unitary_primitives, list) or isinstance(unitary_primitives, np.ndarray)) assert np.all([ isinstance(primitive, UnitaryPrimitive) for primitive in unitary_primitives]) assert np.all([ primitive.get_unitary().get_dimension() <= dimension for primitive in unitary_primitives]) assert sequence_length >= 0 assert threshold >= 0.0 and threshold <= 1.0 # Generate a random sequence of the desired length random_sequence = UnitarySequence(dimension) for _ in range(sequence_length): new_sequence_entry = Compiler.create_random_sequence_entry( dimension, unitary_primitives, unitary_primitive_probabilities) random_sequence.append_last(new_sequence_entry) # Skip inverse compilation if threshold or max_step_count is zero if threshold == 0.0 or max_step_count == 0: return CompilerResult(random_sequence, [], 0.0) # Calculate the product of this sequence and invert it target_unitary = random_sequence.product().inverse() # Use Compiler to compile a new sequence implementing the inverse compiler = Compiler( dimension, unitary_primitives, unitary_primitive_probabilities, append_probability=stoq_append_probability) result = compiler.compile( target_unitary, threshold, max_step_count) # Return the CompilerResult with the combined sequence result.compiled_sequence = UnitarySequence.combine( random_sequence, result.compiled_sequence) return result
def generate_layered_rav_sequence( dimension: int, unitary_primitive_counts: Dict[UnitaryPrimitive, int], layer_count: int, threshold: float, stoq_append_probability: float = 0.5, max_step_count: int = 10000 ) -> CompilerResult: ''' Implements layered randomized analog verification (RAV). :param dimension: [description] :type dimension: int :param unitary_primitive_counts: Specifies the fixed set of unitary primitives to be contained in each layer of the compilation. Each key is the unitary primitive to be included, and each value is the count of that unitary primitive per layer. :type unitary_primitive_counts: Dict[UnitaryPrimitive, int] :param layer_count: The number of layers to create in the initial randomly-generated sequence. :type layer_count: int :param threshold: The overlap with the target unitary at which to stop compilation, defaults to None. A value of 1.0 implies an exact compilation. If None, a threshold of 1.0 is used. :type threshold: float :param stoq_append_probability: Probability of appending a new gate at each step in the compilation, defaults to 0.5. :type stoq_append_probability: float, optional :param max_step_count: Maximum number of steps to perform while attempting to perform the approximate compilation, defaults to 10000. Compilation of the inversion sequence will terminate after this number of steps regardless of whether the threshold has been reached. :type max_step_count: int, optional :return: The result of the compilation, including the layered RAV sequence. :rtype: CompilerResult ''' assert isinstance(unitary_primitive_counts, dict) assert np.all([ isinstance(primitive, UnitaryPrimitive) for primitive in unitary_primitive_counts.keys()]) assert np.all([ primitive.get_unitary().get_dimension() <= dimension for primitive in unitary_primitive_counts.keys()]) assert np.all([ isinstance(count, int) for count in unitary_primitive_counts.values()]) assert layer_count >= 0 assert threshold >= 0.0 and threshold <= 1.0 # Generate a random sequence of the desired number of layers # Total sequence length will therefore be # sum(unitary_primitive_counts.values()) * layer_count random_sequence = UnitarySequence(dimension) for _ in range(layer_count): layer = Compiler.create_random_layer( dimension, unitary_primitive_counts) for sequence_entry in layer: random_sequence.append_last(sequence_entry) # Skip inverse compilation if threshold or max_step_count is zero if threshold == 0.0 or max_step_count == 0: return CompilerResult(random_sequence, [], 0.0) # Calculate the product of this sequence and invert it target_unitary = random_sequence.product().inverse() # Use Compiler to compile a new sequence implementing the inverse compiler = Compiler( dimension, list(unitary_primitive_counts.keys()), append_probability=stoq_append_probability) result = compiler.compile_layered( target_unitary, unitary_primitive_counts, threshold, max_step_count) # Return the CompilerResult with the combined sequence result.compiled_sequence = UnitarySequence.combine( random_sequence, result.compiled_sequence) return result
def test_default(self) -> None: dimension = 2 sequence = UnitarySequence(dimension) assert sequence.get_dimension() == dimension assert sequence.product().close_to(np.identity(dimension))
def test_append_and_remove(self) -> None: dimension = 2 identity = Unitary.identity(dimension) sequence = UnitarySequence(dimension) assert sequence.get_length() == 0 assert sequence.product().close_to(identity) sequence.append_first( UnitarySequenceEntry(UnitaryDefinitions.sigmax(), [0])) assert sequence.get_length() == 1 assert sequence.product().close_to(UnitaryDefinitions.sigmax()) sequence.append_last( UnitarySequenceEntry(UnitaryDefinitions.sigmay(), [0])) assert sequence.get_length() == 2 assert sequence.product().close_to(UnitaryDefinitions.sigmaz()) sequence.append_first( UnitarySequenceEntry(UnitaryDefinitions.sigmaz(), [0])) assert sequence.get_length() == 3 assert sequence.product().close_to(identity) sequence.remove_last() assert sequence.get_length() == 2 assert sequence.product().close_to(UnitaryDefinitions.sigmay()) sequence.remove_first() assert sequence.get_length() == 1 assert sequence.product().close_to(UnitaryDefinitions.sigmax()) sequence.remove_first() assert sequence.get_length() == 0 assert sequence.product().close_to(identity)
def test_undo(self) -> None: dimension = 2 identity = Unitary.identity(dimension) sequence = UnitarySequence(dimension) assert sequence.get_length() == 0 with pytest.raises(Exception): sequence.undo() sequence.append_first( UnitarySequenceEntry(UnitaryDefinitions.sigmax(), [0])) assert sequence.get_length() == 1 assert sequence.product().close_to(UnitaryDefinitions.sigmax()) sequence.undo() assert sequence.get_length() == 0 assert sequence.product().close_to(identity) with pytest.raises(Exception): sequence.undo() sequence.append_first( UnitarySequenceEntry(UnitaryDefinitions.sigmay(), [0])) sequence.append_first( UnitarySequenceEntry(UnitaryDefinitions.sigmay(), [0])) assert sequence.get_length() == 2 assert sequence.product().close_to(identity) sequence.remove_last() assert sequence.get_length() == 1 assert sequence.product().close_to(UnitaryDefinitions.sigmay()) sequence.undo() assert sequence.get_length() == 2 assert sequence.product().close_to(identity) with pytest.raises(Exception): sequence.undo()