Example #1
0
    def test_invalid_url_loop_till_timeout(self, monkeypatch):
        """Test that when receiving an invalid url, looping continues until the
        timeout."""
        decoded_data = "Decoded Data"

        class MockDecodableObj:
            """A mock class that can be decoded."""
            def decode(self):
                return decoded_data

        class MockURL:
            """A mock class for URLs that can serve as a context manager."""
            def __enter__(self):
                pass

            def __exit__(self, *args):
                pass

            def read(self):
                return MockDecodableObj()

        def raise_urllib_error(*args):
            """Auxiliary function that raises a URLError."""
            raise urllib.error.URLError("Test error due to an incorrect URL.")

        with monkeypatch.context() as m:
            status = "Status:              Failed\n"
            result_message = ["Some message2", "Some location"]

            m.setattr(pennylane_orquestra.cli_actions, "workflow_results",
                      lambda *args: result_message)
            m.setattr(urllib.request, "urlopen", raise_urllib_error)

            with pytest.raises(TimeoutError, match="were not obtained after"):
                loop_until_finished("Some ID", timeout=1)
Example #2
0
    def test_unexpected_datatype_result(self, monkeypatch, tmpdir):
        """Test that an error is raised when the result is not a tarfile."""
        decoded_data = {"res": "Decoded Data"}
        test_file = os.path.join(tmpdir, "workflow_result.json")
        test_tar = os.path.join(tmpdir, "test.tgz")

        with open(test_file, "w") as outfile:
            json.dump(decoded_data, outfile)

        tar = tarfile.open(test_tar, mode="w:gz")
        tar.add(test_tar)
        tar.close()

        # Change to the test directory
        os.chdir(tmpdir)
        with monkeypatch.context() as m:
            result_message = ["Some message2", "Some location"]

            m.setattr(pennylane_orquestra.cli_actions, "workflow_results",
                      lambda *args: result_message)
            m.setattr(urllib.request, "urlopen", lambda arg: arg)

            # test_file is a json file instead of a tarfile
            m.setattr(urllib.request, "urlretrieve", lambda *args, **kwargs:
                      (test_file, ))
            with pytest.raises(ValueError, match="not a tarfile"):
                loop_until_finished("Some ID", timeout=1)
Example #3
0
    def test_invalid_url_loop_till_timeout(self, monkeypatch):
        """Test that when receiving an invalid url, looping continues until the
        timeout."""
        def raise_urllib_error(*args):
            """Auxiliary function that raises a URLError."""
            raise urllib.error.URLError("Test error due to an incorrect URL.")

        with monkeypatch.context() as m:
            result_message = ["Some message2", "Some location"]

            m.setattr(pennylane_orquestra.cli_actions, "workflow_results",
                      lambda *args: result_message)
            m.setattr(urllib.request, "urlopen", raise_urllib_error)

            with pytest.raises(TimeoutError, match="were not obtained after"):
                loop_until_finished("Some ID", timeout=1)
Example #4
0
    def test_valid_url(self, monkeypatch, tmpdir):
        """Test that when receiving a valid url, data will be decoded and
        returned."""
        decoded_data = {"res": "Decoded Data"}
        test_id = "some_test_ID"
        test_file = os.path.join(tmpdir,
                                 test_id + "_" + "workflow_result.json")
        test_tar = os.path.join(tmpdir, "test.tgz")

        with open(test_file, "w") as outfile:
            json.dump(decoded_data, outfile)

        tar = tarfile.open(test_tar, mode="w:gz")
        tar.add(test_tar)
        tar.close()

        # Change to the test directory
        os.chdir(tmpdir)
        with monkeypatch.context() as m:
            result_message = ["Some message2", "Some location"]

            m.setattr(pennylane_orquestra.cli_actions, "workflow_results",
                      lambda *args: result_message)
            m.setattr(urllib.request, "urlopen", lambda arg: arg)
            m.setattr(urllib.request, "urlretrieve", lambda *args, **kwargs:
                      (test_tar, ))
            assert loop_until_finished(test_id, timeout=1) == decoded_data
Example #5
0
    def multiple_steps_results(self, workflow_id):
        """Extracts the results of multiple steps obtained for a workflow.

        This method assumes that the workflow had multiple steps and that the
        structure of the result corresponds to results sent by Orquestra API
        v1.0.0.

        Orquestra doesn't necessarily execute parallel steps in the order they
        were defined in a workflow file. Therefore, due to parallel execution,
        results might have been written in any order, so results are sorted by
        the step name.

        Args:
            workflow_id (str): the ID of the workflow to extract results for

        Returns:
            results (list): a list of workflow results for each step
        """
        data = loop_until_finished(workflow_id, timeout=self._timeout)
        try:
            # Sort results by step name
            get_step_name = lambda entry: entry[1]["stepName"]
            data = dict(sorted(data.items(), key=get_step_name))

            # Obtain the results for each step
            result_dicts = [v for k, v in data.items()]
            results = [dct["expval"]["list"] for dct in result_dicts]
        except (IndexError, KeyError, TypeError, AttributeError) as e:
            current_status = workflow_details(workflow_id)
            raise ValueError(
                f"Unexpected result format for workflow {workflow_id}.\n "
                f"{''.join(current_status)}")
        return results
Example #6
0
    def test_loop_until_finished_raises(self, res_msg, monkeypatch):
        """Check that certain errors are raised and handled correctly by the
        loop_until_finished function."""
        with monkeypatch.context() as m:
            m.setattr(pennylane_orquestra.cli_actions, "workflow_details",
                      lambda *args: "Some message1")
            m.setattr(pennylane_orquestra.cli_actions, "workflow_results",
                      lambda *args: res_msg)

            # Check that indexing into the message raises an IndexError
            # (this shows that it will be handled internally)
            with pytest.raises(IndexError, match="list index out of range"):
                res_msg[1].split()[1]

            # Check that looping eventually times out
            with pytest.raises(TimeoutError, match="were not obtained after"):
                loop_until_finished("Some ID", timeout=1)
Example #7
0
    def test_loop_raises_workflow_fail(self, monkeypatch):
        """Check that an error is raised if the workflow exeuction failed."""
        with monkeypatch.context() as m:
            status = "Status:              Failed\n"
            result_message = "Some message2"

            m.setattr(pennylane_orquestra.cli_actions, "workflow_details",
                      lambda *args: status)
            m.setattr(
                pennylane_orquestra.cli_actions,
                "workflow_results",
                lambda *args: result_message,
            )

            # Check that looping raises an error if the workflow details
            # contain a failed status
            with pytest.raises(
                    ValueError,
                    match=
                    f"Something went wrong with executing the workflow. {status}"
            ):
                loop_until_finished("Some ID", timeout=1)
Example #8
0
    def single_step_results(self, workflow_id):
        """Extracts the results of a single step obtained for a workflow.

        This method assumes that the workflow had a single step and that the
        structure of the result corresponds to results sent by Orquestra API
        v1.0.0.

        Args:
            workflow_id (str): the ID of the workflow to extract results for

        Returns:
            results (list): a list of workflow results
        """
        data = loop_until_finished(workflow_id, timeout=self._timeout)
        try:
            step_result = [v for k, v in data.items()][0]
            results = step_result["expval"]["list"]
        except (IndexError, KeyError, TypeError, AttributeError) as e:
            current_status = workflow_details(workflow_id)
            raise ValueError(
                f"Unexpected result format for workflow {workflow_id}.\n "
                f"{''.join(current_status)}") from e
        return results
Example #9
0
    def _batch_execute(self, circuits, file_id, **kwargs):
        """Creates a multi-step workflow for executing a batch of circuits.

        Args:
            circuits (list[QuantumTape]): circuits to execute on the device
            file_id (str): the file id to be used for naming the workflow file

        Returns:
            list[array[float]]: list of measured value(s) for the batch
        """
        for circuit in circuits:
            # Input checks
            not_all_expval = any(obs.return_type is not Expectation for obs in circuit.observables)
            if not_all_expval:
                raise NotImplementedError(
                    f"The {self.short_name} device only supports returning expectation values."
                )

            self.check_validity(circuit.operations, circuit.observables)

        # 1. Create qasm strings from the circuits
        # Extract the CircuitGraph object from QuantumTape
        circuits = [circ.graph for circ in circuits]
        qasm_circuits = [self.serialize_circuit(circuit) for circuit in circuits]

        # 2. Create the qubit operators of observables for each circuit
        ops = []
        identity_indices = {}
        empty_obs_list = []

        for idx, circuit in enumerate(circuits):
            processed_observables, current_id_indices = self.process_observables(
                circuit.observables
            )
            ops.append(processed_observables)
            if not processed_observables:
                # Keep track of empty observable lists
                empty_obs_list.append(idx)

            identity_indices[idx] = current_id_indices

        if not all(ops):
            # There were batches which had only identity observables

            if not any(ops):
                # All the batches only had identity observables, no workflow submission needed
                return [self._asarray([1] * len(circuit.observables)) for circuit in circuits]

            # Remove the empty lists so that those are not submitted
            ops = [o for o in ops if o]

        # Multiple steps: need to create json strings as elements of the list
        ops = [json.dumps(o) for o in ops]

        # 3-4. Create the backend specs & workflow file
        workflow = gen_expval_workflow(
            self.qe_component,
            self.backend_specs,
            qasm_circuits,
            ops,
            resources=self._resources,
            **kwargs,
        )

        filename = f"expval-{file_id}.yaml"
        filepath = write_workflow_file(filename, workflow)

        # 5. Submit the workflow
        workflow_id = qe_submit(filepath, keep_file=self._keep_files)
        self._latest_id = workflow_id

        if self._keep_files:
            self._filenames.append(filename)

        # 6. Loop until finished
        data = loop_until_finished(workflow_id, timeout=self._timeout)

        # Due to parallel execution, results might have been written in any order
        # Sort the results by the step name
        get_step_name = lambda entry: entry[1]["stepName"]
        data = dict(sorted(data.items(), key=get_step_name))

        # There are multiple steps
        # Obtain the results for each step
        result_dicts = [v for k, v in data.items()]
        results = [dct["expval"]["list"] for dct in result_dicts]

        results = self.insert_identity_res_batch(results, empty_obs_list, identity_indices)
        results = [self._asarray(res) for res in results]

        return results
Example #10
0
    def execute(self, circuit, **kwargs):

        # Input checks
        not_all_expval = any(obs.return_type is not Expectation for obs in circuit.observables)
        if not_all_expval:
            raise NotImplementedError(
                f"The {self.short_name} device only supports returning expectation values."
            )

        self.check_validity(circuit.operations, circuit.observables)

        # 1. Create qasm strings from the circuits
        try:
            qasm_circuit = self.serialize_circuit(circuit)
        except AttributeError:
            # QuantumTape case: need to extract the CircuitGraph
            qasm_circuit = self.serialize_circuit(circuit.graph)

        # 2. Create the qubit operators
        ops, identity_indices = self.process_observables(circuit.observables)

        if not ops:
            # All the observables were identity, no workflow submission needed
            return self._asarray([1] * len(identity_indices))

        ops_json = json.dumps(ops)

        # Single step: need to nest the operators into a list
        ops = [ops_json]
        qasm_circuit = [qasm_circuit]

        # 4-5. Create the backend specs & workflow file
        workflow = gen_expval_workflow(
            self.qe_component,
            self.backend_specs,
            qasm_circuit,
            ops,
            resources=self._resources,
            **kwargs,
        )
        file_id = str(uuid.uuid4())
        filename = f"expval-{file_id}.yaml"
        filepath = write_workflow_file(filename, workflow)

        # 6. Submit the workflow
        workflow_id = qe_submit(filepath, keep_file=self._keep_files)

        if self._keep_files:
            self._filenames.append(filename)

        self._latest_id = workflow_id

        # 7. Loop until finished
        data = loop_until_finished(workflow_id, timeout=self._timeout)

        # Assume that there's only one step
        results = [v for k, v in data.items()][0]["expval"]["list"]

        # Insert the theoretical value for the expectation value of the
        # identity operator
        for idx in identity_indices:
            results.insert(idx, 1)

        res = self._asarray(results)

        return res