def _copy_aws_session( aws_session: Optional[AwsSession], region: Optional[str] = None, max_connections: Optional[int] = None, ) -> AwsSession: config = Config( max_pool_connections=max_connections) if max_connections else None if aws_session: session_region = aws_session.boto_session.region_name new_region = region or session_region if session_region == new_region and not config: return aws_session else: creds = aws_session.boto_session.get_credentials() boto_session = boto3.Session( aws_access_key_id=creds.access_key, aws_secret_access_key=creds.secret_key, aws_session_token=creds.token, region_name=new_region, ) return AwsSession(boto_session=boto_session, config=config) else: boto_session = boto3.Session( region_name=region) if region else None return AwsSession(boto_session=boto_session, config=config)
def _initialize_non_regional_device_session(aws_session: AwsSession, device: AwsDevice, logger: Logger) -> AwsSession: original_region = aws_session.region try: aws_session.get_device(device) return aws_session except ClientError as e: if e.response["Error"]["Code"] == "ResourceNotFoundException": if "qpu" not in device: raise ValueError( f"Simulator '{device}' not found in '{original_region}'" ) else: raise e for region in frozenset(AwsDevice.REGIONS) - {original_region}: device_session = aws_session.copy_session(region=region) try: device_session.get_device(device) logger.info( f"Changed session region from '{original_region}' to '{device_session.region}'" ) return device_session except ClientError as e: if e.response["Error"]["Code"] != "ResourceNotFoundException": raise e raise ValueError(f"QPU '{device}' not found.")
def _aws_session_for_qpu(device_arn: str, aws_session: AwsSession) -> AwsSession: """ Get an AwsSession for the device ARN. QPUs are physically located in specific AWS Regions. The AWS sessions should connect to the Region that the QPU is located in. See `braket.aws.aws_qpu.AwsDevice.QPU_REGIONS` for the AWS Regions the QPUs are located in. """ region_key = device_arn.split("/")[-2] qpu_regions = AwsDevice.QPU_REGIONS.get(region_key, []) if aws_session: if aws_session.boto_session.region_name in qpu_regions: return aws_session else: creds = aws_session.boto_session.get_credentials() boto_session = boto3.Session( aws_access_key_id=creds.access_key, aws_secret_access_key=creds.secret_key, aws_session_token=creds.token, region_name=qpu_regions[0], ) return AwsSession(boto_session=boto_session) else: boto_session = boto3.Session(region_name=qpu_regions[0]) return AwsSession(boto_session=boto_session)
def run_batch( self, task_specifications: List[Union[Circuit, Problem, OpenQasmProgram, BlackbirdProgram]], s3_destination_folder: Optional[AwsSession.S3DestinationFolder] = None, shots: Optional[int] = None, max_parallel: Optional[int] = None, max_connections: int = AwsQuantumTaskBatch.MAX_CONNECTIONS_DEFAULT, poll_timeout_seconds: float = AwsQuantumTask.DEFAULT_RESULTS_POLL_TIMEOUT, poll_interval_seconds: float = AwsQuantumTask.DEFAULT_RESULTS_POLL_INTERVAL, *aws_quantum_task_args, **aws_quantum_task_kwargs, ) -> AwsQuantumTaskBatch: """Executes a batch of tasks in parallel Args: task_specifications (List[Union[Circuit, Problem, OpenQasmProgram, BlackbirdProgram]]): List of circuits or annealing problems to run on device. s3_destination_folder (Optional[S3DestinationFolder]): The S3 location to save the tasks' results to. Default is `<default_bucket>/tasks` if evoked outside of a Braket Job, `<Job Bucket>/jobs/<job name>/tasks` if evoked inside of a Braket Job. shots (Optional[int]): The number of times to run the circuit or annealing problem. Default is 1000 for QPUs and 0 for simulators. max_parallel (Optional[int]): The maximum number of tasks to run on AWS in parallel. Batch creation will fail if this value is greater than the maximum allowed concurrent tasks on the device. Default: 10 max_connections (int): The maximum number of connections in the Boto3 connection pool. Also the maximum number of thread pool workers for the batch. Default: 100 poll_timeout_seconds (float): The polling timeout for `AwsQuantumTask.result()`, in seconds. Default: 5 days. poll_interval_seconds (float): The polling interval for results in seconds. Default: 1 second. Returns: AwsQuantumTaskBatch: A batch containing all of the tasks run See Also: `braket.aws.aws_quantum_task_batch.AwsQuantumTaskBatch` """ return AwsQuantumTaskBatch( AwsSession.copy_session(self._aws_session, max_connections=max_connections), self._arn, task_specifications, s3_destination_folder or ( AwsSession.parse_s3_uri(os.environ.get("AMZN_BRAKET_TASK_RESULTS_S3_URI")) if "AMZN_BRAKET_TASK_RESULTS_S3_URI" in os.environ else None ) or (self._aws_session.default_bucket(), "tasks"), shots if shots is not None else self._default_shots, max_parallel=max_parallel if max_parallel is not None else self._default_max_parallel, max_workers=max_connections, poll_timeout_seconds=poll_timeout_seconds, poll_interval_seconds=poll_interval_seconds, *aws_quantum_task_args, **aws_quantum_task_kwargs, )
def _aws_session_for_device(device_arn: str, aws_session: AwsSession) -> AwsSession: """AwsSession: Returns an AwsSession for the device ARN. """ if "qpu" in device_arn: return AwsDevice._aws_session_for_qpu(device_arn, aws_session) else: return aws_session or AwsSession()
def _( circuit: Circuit, aws_session: AwsSession, create_task_kwargs: Dict[str, Any], device_parameters: Union[dict, BraketSchemaBase], device_arn: str, *args, **kwargs, ) -> AwsQuantumTask: validate_circuit_and_shots(circuit, create_task_kwargs["shots"]) # TODO: Update this to use `deviceCapabilities` from Amazon Braket's GetDevice operation # in order to decide what parameters to build. paradigm_parameters = GateModelParameters(qubitCount=circuit.qubit_count) if "ionq" in device_arn: device_parameters = IonqDeviceParameters( paradigmParameters=paradigm_parameters) elif "rigetti" in device_arn: device_parameters = RigettiDeviceParameters( paradigmParameters=paradigm_parameters) else: # default to use simulator device_parameters = GateModelSimulatorDeviceParameters( paradigmParameters=paradigm_parameters) create_task_kwargs.update({ "action": circuit.to_ir().json(), "deviceParameters": device_parameters.json() }) task_arn = aws_session.create_quantum_task(**create_task_kwargs) return AwsQuantumTask(task_arn, aws_session, *args, **kwargs)
def _get_arn_sessions(arns, names, types, statuses, provider_names, aws_session): aws_session = aws_session if aws_session else AwsSession() sessions_for_arns = {} session_region = aws_session.boto_session.region_name device_regions_set = AwsDevice._get_devices_regions_set( types, arns, session_region) for region in device_regions_set: session_for_region = AwsDevice._copy_aws_session( aws_session, region) # Simulators are only instantiated in the same region as the AWS session types_for_region = sorted(types if region == session_region else types - {AwsDeviceType.SIMULATOR}) region_device_arns = [ result["deviceArn"] for result in session_for_region.search_devices( arns=arns, names=names, types=types_for_region, statuses=statuses, provider_names=provider_names, ) ] sessions_for_arns.update({ arn: session_for_region for arn in region_device_arns if arn not in sessions_for_arns }) return sessions_for_arns
def _initialize_regional_device_session(aws_session: AwsSession, device: AwsDevice, logger: Logger) -> AwsSession: device_region = AwsDevice.get_device_region(device) current_region = aws_session.region if current_region != device_region: aws_session = aws_session.copy_session(region=device_region) logger.info( f"Changed session region from '{current_region}' to '{device_region}'" ) try: aws_session.get_device(device) return aws_session except ClientError as e: raise ValueError(f"'{device}' not found.") if e.response["Error"][ "Code"] == "ResourceNotFoundException" else e
def __init__( self, image_uri: str, aws_session: AwsSession = None, logger: Logger = getLogger(__name__), force_update: bool = False, ): """Represents and provides functions for interacting with a Braket Jobs docker container. The function "end_session" must be called when the container is no longer needed. Args: image_uri (str): The URI of the container image to run. aws_session (AwsSession): AwsSession for connecting to AWS Services. Default: AwsSession() logger (Logger): Logger object with which to write logs. Default: `getLogger(__name__)` force_update (bool): Try to update the container, if an update is availble. Default: False """ self._aws_session = aws_session or AwsSession() self.image_uri = image_uri self.run_result = None self._container_name = None self._logger = logger self._force_update = force_update
def _get_env_default_vars(aws_session: AwsSession, **creation_kwargs) -> Dict[str, str]: """This function gets the remaining 'simple' env variables, that don't require any additional logic to determine what they are or when they should be added as env variables. Returns: (Dict[str, str]): The set of key/value pairs that should be added as environment variables to the running container. """ job_name = creation_kwargs["jobName"] bucket, location = AwsSession.parse_s3_uri( creation_kwargs["outputDataConfig"]["s3Path"]) return { "AWS_DEFAULT_REGION": aws_session.region, "AMZN_BRAKET_JOB_NAME": job_name, "AMZN_BRAKET_DEVICE_ARN": creation_kwargs["deviceConfig"]["device"], "AMZN_BRAKET_JOB_RESULTS_DIR": "/opt/braket/model", "AMZN_BRAKET_CHECKPOINT_DIR": creation_kwargs["checkpointConfig"]["localPath"], "AMZN_BRAKET_OUT_S3_BUCKET": bucket, "AMZN_BRAKET_TASK_RESULTS_S3_URI": f"s3://{bucket}/jobs/{job_name}/tasks", "AMZN_BRAKET_JOB_RESULTS_S3_PATH": str(Path(location, job_name, "output").as_posix()), }
def _populate_properties(self, session: AwsSession) -> None: metadata = session.get_device(self._arn) self._name = metadata.get("deviceName") self._status = metadata.get("deviceStatus") self._type = AwsDeviceType(metadata.get("deviceType")) self._provider_name = metadata.get("providerName") self._properties = metadata.get("deviceCapabilities")
def _download_input_data( aws_session: AwsSession, download_dir: str, input_data: Dict[str, Any], ) -> None: """Downloads input data for a job. Args: aws_session (AwsSession): AwsSession for connecting to AWS Services. download_dir (str): The directory path to download to. input_data (Dict[str, Any]): One of the input data in the boto3 input parameters for running a Braket Job. """ # If s3 prefix is the full name of a directory and all keys are inside # that directory, the contents of said directory will be copied into a # directory with the same name as the channel. This behavior is the same # whether or not s3 prefix ends with a "/". Moreover, if s3 prefix ends # with a "/", this is certainly the behavior to expect, since it can only # match a directory. # If s3 prefix matches any files exactly, or matches as a prefix of any # files or directories, then all files and directories matching s3 prefix # will be copied into a directory with the same name as the channel. channel_name = input_data["channelName"] s3_uri_prefix = input_data["dataSource"]["s3DataSource"]["s3Uri"] bucket, prefix = AwsSession.parse_s3_uri(s3_uri_prefix) s3_keys = aws_session.list_keys(bucket, prefix) top_level = prefix if _is_dir(prefix, s3_keys) else str( Path(prefix).parent) found_item = False try: Path(download_dir, channel_name).mkdir() except FileExistsError: raise ValueError( f"Duplicate channel names not allowed for input data: {channel_name}" ) for s3_key in s3_keys: relative_key = Path(s3_key).relative_to(top_level) download_path = Path(download_dir, channel_name, relative_key) if not s3_key.endswith("/"): download_path.parent.mkdir(parents=True, exist_ok=True) aws_session.download_from_s3( AwsSession.construct_s3_uri(bucket, s3_key), str(download_path)) found_item = True if not found_item: raise RuntimeError(f"No data found for channel '{channel_name}'")
def _copy_aws_session(aws_session: Optional[AwsSession], regions: List[str]) -> AwsSession: if aws_session: if aws_session.boto_session.region_name in regions: return aws_session else: creds = aws_session.boto_session.get_credentials() boto_session = boto3.Session( aws_access_key_id=creds.access_key, aws_secret_access_key=creds.secret_key, aws_session_token=creds.token, region_name=regions[0], ) return AwsSession(boto_session=boto_session) else: boto_session = boto3.Session(region_name=regions[0]) return AwsSession(boto_session=boto_session)
def _aws_session_for_task_arn(task_arn: str) -> AwsSession: """ Get an AwsSession for the Task ARN. The AWS session should be in the region of the task. Returns: AwsSession: `AwsSession` object with default `boto_session` in task's region """ task_region = task_arn.split(":")[3] boto_session = boto3.Session(region_name=task_region) return AwsSession(boto_session=boto_session)
def _tar_and_upload_to_code_location(source_module_path: Path, aws_session: AwsSession, code_location: str) -> None: """ Tar and upload source module to code location. Args: source_module_path (Path): Path to source module. aws_session (AwsSession): AwsSession for uploading source module. code_location (str): S3 URI pointing to the location where the tarred source module will be uploaded to. """ with tempfile.TemporaryDirectory() as temp_dir: with tarfile.open(f"{temp_dir}/source.tar.gz", "w:gz", dereference=True) as tar: tar.add(source_module_path, arcname=source_module_path.name) aws_session.upload_to_s3(f"{temp_dir}/source.tar.gz", f"{code_location}/source.tar.gz")
def _initialize_session(session_value: AwsSession, device: AwsDevice, logger: Logger) -> AwsSession: aws_session = session_value or AwsSession() if device.startswith("local:"): return aws_session device_region = AwsDevice.get_device_region(device) return (AwsQuantumJob._initialize_regional_device_session( aws_session, device, logger) if device_region else AwsQuantumJob._initialize_non_regional_device_session( aws_session, device, logger))
def _default_session_for_job_arn(job_arn: str) -> AwsSession: """Get an AwsSession for the Job ARN. The AWS session should be in the region of the job. Args: job_arn (str): The ARN for the quantum job. Returns: AwsSession: `AwsSession` object with default `boto_session` in job's region. """ job_region = job_arn.split(":")[3] boto_session = boto3.Session(region_name=job_region) return AwsSession(boto_session=boto_session)
def _( blackbird_program: BlackbirdProgram, aws_session: AwsSession, create_task_kwargs: Dict[str, any], device_arn: str, _device_parameters: Union[dict, BraketSchemaBase], _disable_qubit_rewiring: bool, *args, **kwargs, ) -> AwsQuantumTask: create_task_kwargs.update({"action": blackbird_program.json()}) task_arn = aws_session.create_quantum_task(**create_task_kwargs) return AwsQuantumTask(task_arn, aws_session, *args, **kwargs)
def _get_regional_device_session(self, session: AwsSession) -> AwsSession: device_region = AwsDevice.get_device_region(self._arn) region_session = ( session if session.region == device_region else AwsSession.copy_session(session, device_region) ) try: self._populate_properties(region_session) return region_session except ClientError as e: raise ValueError(f"'{self._arn}' not found") if e.response["Error"][ "Code" ] == "ResourceNotFoundException" else e
def _process_s3_source_module(source_module: str, entry_point: str, aws_session: AwsSession, code_location: str) -> None: """ Check that the source module is an S3 URI of the correct type and that entry point is provided. Args: source_module (str): S3 URI pointing to the tarred source module. entry_point (str): Entry point for the job. aws_session (AwsSession): AwsSession to copy source module to code location. code_location (str): S3 URI pointing to the location where the code will be copied to. """ if entry_point is None: raise ValueError( "If source_module is an S3 URI, entry_point must be provided.") if not source_module.lower().endswith(".tar.gz"): raise ValueError( "If source_module is an S3 URI, it must point to a tar.gz file. " f"Not a valid S3 URI for parameter `source_module`: {source_module}" ) aws_session.copy_s3_object(source_module, f"{code_location}/source.tar.gz")
def _( circuit: Circuit, aws_session: AwsSession, create_task_kwargs: Dict[str, Any], device_arn: str, device_parameters: Union[ dict, BraketSchemaBase], # Not currently used for circuits disable_qubit_rewiring: bool, *args, **kwargs, ) -> AwsQuantumTask: validate_circuit_and_shots(circuit, create_task_kwargs["shots"]) # TODO: Update this to use `deviceCapabilities` from Amazon Braket's GetDevice operation # in order to decide what parameters to build. paradigm_parameters = GateModelParameters( qubitCount=circuit.qubit_count, disableQubitRewiring=disable_qubit_rewiring) if "ionq" in device_arn: device_parameters = IonqDeviceParameters( paradigmParameters=paradigm_parameters) elif "rigetti" in device_arn: device_parameters = RigettiDeviceParameters( paradigmParameters=paradigm_parameters) elif "oqc" in device_arn: device_parameters = OqcDeviceParameters( paradigmParameters=paradigm_parameters) else: # default to use simulator device_parameters = GateModelSimulatorDeviceParameters( paradigmParameters=paradigm_parameters) qubit_reference_type = QubitReferenceType.VIRTUAL if disable_qubit_rewiring or Instruction( StartVerbatimBox()) in circuit.instructions: qubit_reference_type = QubitReferenceType.PHYSICAL serialization_properties = OpenQASMSerializationProperties( qubit_reference_type=qubit_reference_type) create_task_kwargs.update({ "action": circuit.to_ir( ir_type=IRType.OPENQASM, serialization_properties=serialization_properties, ).json(), "deviceParameters": device_parameters.json(), }) task_arn = aws_session.create_quantum_task(**create_task_kwargs) return AwsQuantumTask(task_arn, aws_session, *args, **kwargs)
def _process_channel(location: str, job_name: str, aws_session: AwsSession, channel_name: str) -> S3DataSourceConfig: """ Convert a location to an S3DataSourceConfig, uploading local data to S3, if necessary. Args: location (str): Local prefix or S3 prefix. job_name (str): Job name. aws_session (AwsSession): AwsSession to be used for uploading local data. channel_name (str): Name of the channel. Returns: S3DataSourceConfig: S3DataSourceConfig for the channel. """ if AwsSession.is_s3_uri(location): return S3DataSourceConfig(location) else: # local prefix "path/to/prefix" will be mapped to # s3://bucket/jobs/job-name/data/input/prefix location_name = Path(location).name s3_prefix = AwsSession.construct_s3_uri(aws_session.default_bucket(), "jobs", job_name, "data", channel_name, location_name) aws_session.upload_local_data(location, s3_prefix) return S3DataSourceConfig(s3_prefix)
def _( problem: Problem, aws_session: AwsSession, create_task_kwargs: Dict[str, Any], device_parameters: Union[dict, DwaveDeviceParameters], device_arn: str, *args, **kwargs, ) -> AwsQuantumTask: create_task_kwargs.update({ "action": problem.to_ir().json(), "deviceParameters": DwaveDeviceParameters.parse_obj(device_parameters).json(), }) task_arn = aws_session.create_quantum_task(**create_task_kwargs) return AwsQuantumTask(task_arn, aws_session, *args, **kwargs)
def __init__( self, image_uri: str, aws_session: AwsSession = None, logger: Logger = getLogger(__name__) ): """Represents and provides functions for interacting with a Braket Jobs docker container. The function "end_session" must be called when the container is no longer needed. Args: image_uri (str): The URI of the container image to run. aws_session (AwsSession, Optional): AwsSession for connecting to AWS Services. Default: AwsSession() logger (Logger): Logger object with which to write logs. Default: `getLogger(__name__)` """ self._aws_session = aws_session or AwsSession() self.image_uri = image_uri self.run_log = None self._container_name = None self._logger = logger
def _( open_qasm_program: OpenQasmProgram, aws_session: AwsSession, create_task_kwargs: Dict[str, Any], device_arn: str, _device_parameters: Union[ dict, BraketSchemaBase], # Not currently used for OpenQasmProgram _disable_qubit_rewiring: bool, *args, **kwargs, ) -> AwsQuantumTask: if open_qasm_program.inputs is not None: raise ValueError( "OpenQASM Program inputs are only currently supported in the LocalSimulator." ) create_task_kwargs.update({"action": open_qasm_program.json()}) task_arn = aws_session.create_quantum_task(**create_task_kwargs) return AwsQuantumTask(task_arn, aws_session, *args, **kwargs)
def _initialize_session(session_value, device, logger): aws_session = session_value or AwsSession() current_region = aws_session.region try: aws_session.get_device(device) return aws_session except ClientError as e: if e.response["Error"]["Code"] == "ResourceNotFoundException": if "qpu" not in device: raise ValueError( f"Simulator '{device}' not found in '{current_region}'" ) else: raise e return AwsQuantumJob._find_device_session(aws_session, device, current_region, logger)
def log_stream( aws_session: AwsSession, log_group: str, stream_name: str, start_time: int = 0, skip: int = 0 ) -> Dict: """A generator for log items in a single stream. This yields all the items that are available at the current moment. Args: aws_session (AwsSession): The AwsSession for interfacing with CloudWatch. log_group (str): The name of the log group. stream_name (str): The name of the specific stream. start_time (int): The time stamp value to start reading the logs from. Default: 0. skip (int): The number of log entries to skip at the start. Default: 0 (This is for when there are multiple entries at the same timestamp.) Yields: Dict: A CloudWatch log event with the following key-value pairs: 'timestamp' (int): The time of the event. 'message' (str): The log event data. 'ingestionTime' (int): The time the event was ingested. """ next_token = None event_count = 1 while event_count > 0: response = aws_session.get_log_events( log_group, stream_name, start_time, start_from_head=True, next_token=next_token, ) next_token = response["nextForwardToken"] events = response["events"] event_count = len(events) if event_count > skip: events = events[skip:] skip = 0 else: skip = skip - event_count events = [] for ev in events: yield ev
def _copy_aws_session( aws_session: AwsSession, region: Optional[str] = None, max_connections: Optional[int] = None, ) -> AwsSession: config = Config( max_pool_connections=max_connections) if max_connections else None session_region = aws_session.boto_session.region_name new_region = region or session_region creds = aws_session.boto_session.get_credentials() if creds.method == "explicit": boto_session = boto3.Session( aws_access_key_id=creds.access_key, aws_secret_access_key=creds.secret_key, aws_session_token=creds.token, region_name=new_region, ) else: boto_session = boto3.Session(region_name=new_region) return AwsSession(boto_session=boto_session, config=config)
def _get_session_and_initialize(self, session): current_region = session.region try: self._populate_properties(session) return session except ClientError as e: if e.response["Error"]["Code"] == "ResourceNotFoundException": if "qpu" not in self._arn: raise ValueError(f"Simulator '{self._arn}' not found in '{current_region}'") else: raise e # Search remaining regions for QPU for region in frozenset(AwsDevice.REGIONS) - {current_region}: region_session = AwsSession.copy_session(session, region) try: self._populate_properties(region_session) return region_session except ClientError as e: if e.response["Error"]["Code"] != "ResourceNotFoundException": raise e raise ValueError(f"QPU '{self._arn}' not found")
def __init__(self, arn: str, aws_session: Optional[AwsSession] = None): """ Args: arn (str): The ARN of the device aws_session (AwsSession, optional): An AWS session object. Default is `None`. Note: Some devices (QPUs) are physically located in specific AWS Regions. In some cases, the current `aws_session` connects to a Region other than the Region in which the QPU is physically located. When this occurs, a cloned `aws_session` is created for the Region the QPU is located in. See `braket.aws.aws_device.AwsDevice.DEVICE_REGIONS` for the AWS Regions provider devices are located in. """ super().__init__(name=None, status=None) self._arn = arn self._properties = None self._provider_name = None self._topology_graph = None self._type = None self._aws_session = self._get_session_and_initialize(aws_session or AwsSession())