Esempio n. 1
0
    def properties(self) -> DeviceCapabilities:
        """DeviceCapabilities: Return the device properties

        Please see `braket.device_schema` in amazon-braket-schemas-python_

        .. _amazon-braket-schemas-python: https://github.com/aws/amazon-braket-schemas-python"""
        return BraketSchemaBase.parse_raw_schema(self._properties)
Esempio n. 2
0
    async def _wait_for_completion(
        self,
    ) -> Union[GateModelQuantumTaskResult, AnnealingQuantumTaskResult]:
        """
        Waits for the quantum task to be completed, then returns the result from the S3 bucket.

        Returns:
            Union[GateModelQuantumTaskResult, AnnealingQuantumTaskResult]: If the task is in the
                `AwsQuantumTask.RESULTS_READY_STATES` state within the specified time limit,
                the result from the S3 bucket is loaded and returned.
                `None` is returned if a timeout occurs or task state is in
                `AwsQuantumTask.NO_RESULT_TERMINAL_STATES`.
        Note:
            Timeout and sleep intervals are defined in the constructor fields
                `poll_timeout_seconds` and `poll_interval_seconds` respectively.
                If `poll_outside_execution_window` is set to `False`, it will
                not poll the API for the current task status when the current time
                is outside of the associated device's execution window.
        """
        self._logger.debug(f"Task {self._arn}: start polling for completion")
        start_time = time.time()

        while (time.time() - start_time) < self._poll_timeout_seconds:
            is_polling_time = self._is_polling_time()
            if not is_polling_time:
                self._logger.debug(
                    f"Task {self._arn}: Current time {datetime.now()} is outside of"
                    f" associated device's execution windows "
                    f"{self._get_device_execution_windows()}. Skipping polling for "
                    f" now."
                )
                continue
            current_metadata = self.metadata()
            task_status = current_metadata["status"]
            self._logger.debug(f"Task {self._arn}: task status {task_status}")
            if task_status in AwsQuantumTask.RESULTS_READY_STATES:
                result_string = self._aws_session.retrieve_s3_object_body(
                    current_metadata["outputS3Bucket"],
                    current_metadata["outputS3Directory"] + f"/{AwsQuantumTask.RESULTS_FILENAME}",
                )
                self._result = _format_result(BraketSchemaBase.parse_raw_schema(result_string))
                return self._result
            elif task_status in AwsQuantumTask.NO_RESULT_TERMINAL_STATES:
                self._logger.warning(
                    f"Task is in terminal state {task_status} and no result is available"
                )
                self._result = None
                return None
            else:
                await asyncio.sleep(self._poll_interval_seconds)

        # Timed out
        self._logger.warning(
            f"Task {self._arn}: polling for task completion timed out after "
            + f"{time.time()-start_time} seconds. Please increase the timeout; "
            + "this can be done by creating a new AwsQuantumTask with this task's ARN "
            + "and a higher value for the `poll_timeout_seconds` parameter."
        )
        self._result = None
        return None
def test_parse_raw_schema():
    schema = TaskMetadata(
        id="test_id",
        deviceId="device_id",
        shots=1000,
    )
    assert schema == BraketSchemaBase.parse_raw_schema(schema.json())
    assert isinstance(schema, TaskMetadata)
def test_import_schema_module():
    schema = TaskMetadata(
        id="test_id",
        deviceId="device_id",
        shots=1000,
    )
    module = BraketSchemaBase.import_schema_module(schema)
    assert schema == module.TaskMetadata.parse_raw(schema.json())
Esempio n. 5
0
 def _download_result(self):
     current_metadata = self.metadata(True)
     result_string = self._aws_session.retrieve_s3_object_body(
         current_metadata["outputS3Bucket"],
         current_metadata["outputS3Directory"] + f"/{AwsQuantumTask.RESULTS_FILENAME}",
     )
     self._result = _format_result(BraketSchemaBase.parse_raw_schema(result_string))
     return self._result
 def _populate_properties(self, session):
     metadata = session.get_device(self._arn)
     self._name = metadata.get("deviceName")
     self._status = metadata.get("deviceStatus")
     self._type = AwsDeviceType(metadata.get("deviceType"))
     self._provider_name = metadata.get("providerName")
     qpu_properties = metadata.get("deviceCapabilities")
     self._properties = BraketSchemaBase.parse_raw_schema(qpu_properties)
     self._topology_graph = self._construct_topology_graph()
 def refresh_metadata(self) -> None:
     """
     Refresh the `AwsDevice` object with the most recent Device metadata.
     """
     metadata = self._aws_session.get_device(self._arn)
     self._name = metadata.get("deviceName")
     self._status = metadata.get("deviceStatus")
     self._type = AwsDeviceType(metadata.get("deviceType"))
     self._provider_name = metadata.get("providerName")
     qpu_properties = metadata.get("deviceCapabilities")
     self._properties = BraketSchemaBase.parse_raw_schema(qpu_properties)
     self._topology_graph = self._construct_topology_graph()
    async def _wait_for_completion(
        self,
    ) -> Union[GateModelQuantumTaskResult, AnnealingQuantumTaskResult]:
        """
        Waits for the quantum task to be completed, then returns the result from the S3 bucket.

        Returns:
            Union[GateModelQuantumTaskResult, AnnealingQuantumTaskResult]: If the task is in the
                `AwsQuantumTask.RESULTS_READY_STATES` state within the specified time limit,
                the result from the S3 bucket is loaded and returned.
                `None` is returned if a timeout occurs or task state is in
                `AwsQuantumTask.NO_RESULT_TERMINAL_STATES`.
        Note:
            Timeout and sleep intervals are defined in the constructor fields
                `poll_timeout_seconds` and `poll_interval_seconds` respectively.
        """
        self._logger.debug(f"Task {self._arn}: start polling for completion")
        start_time = time.time()

        while (time.time() - start_time) < self._poll_timeout_seconds:
            # Used cached metadata if cached status is terminal
            task_status = self._update_status_if_nonterminal()
            current_metadata = self.metadata(True)
            self._logger.debug(f"Task {self._arn}: task status {task_status}")
            if task_status in AwsQuantumTask.RESULTS_READY_STATES:
                result_string = self._aws_session.retrieve_s3_object_body(
                    current_metadata["outputS3Bucket"],
                    current_metadata["outputS3Directory"] +
                    f"/{AwsQuantumTask.RESULTS_FILENAME}",
                )
                self._result = _format_result(
                    BraketSchemaBase.parse_raw_schema(result_string))
                return self._result
            elif task_status in AwsQuantumTask.NO_RESULT_TERMINAL_STATES:
                self._result = None
                return None
            else:
                await asyncio.sleep(self._poll_interval_seconds)

        # Timed out
        self._logger.warning(
            f"Task {self._arn}: polling for task completion timed out after " +
            f"{time.time()-start_time} seconds. Please increase the timeout; "
            +
            "this can be done by creating a new AwsQuantumTask with this task's ARN "
            + "and a higher value for the `poll_timeout_seconds` parameter.")
        self._result = None
        return None
 def _download_result(
     self,
 ) -> Union[GateModelQuantumTaskResult, AnnealingQuantumTaskResult,
            PhotonicModelQuantumTaskResult]:
     current_metadata = self.metadata(True)
     result_string = self._aws_session.retrieve_s3_object_body(
         current_metadata["outputS3Bucket"],
         current_metadata["outputS3Directory"] +
         f"/{AwsQuantumTask.RESULTS_FILENAME}",
     )
     self._result = _format_result(
         BraketSchemaBase.parse_raw_schema(result_string))
     task_event = {
         "arn": self.id,
         "status": self.state(),
         "execution_duration": None
     }
     try:
         task_event[
             "execution_duration"] = self._result.additional_metadata.simulatorMetadata.executionDuration
     except AttributeError:
         pass
     broadcast_event(_TaskCompletionEvent(**task_event))
     return self._result
def test_import_schema_module_error():
    schema = BraketSchemaBase(braketSchemaHeader=BraketSchemaHeader(
        name="braket.task_result.task_metadata", version="0.0"), )
    BraketSchemaBase.import_schema_module(schema)
def test_header_name_incorrect():
    BraketSchemaBase(braketSchemaHeader=120)
def test_schema_base_correct(braket_schema_header):
    schema = BraketSchemaBase(braketSchemaHeader=braket_schema_header)
    assert schema.braketSchemaHeader == braket_schema_header
    assert BraketSchemaBase.parse_raw(schema.json()) == schema
def test_missing_properties():
    BraketSchemaBase()