コード例 #1
0
    def create_or_update_compute_environment(self, cf_output):
        path = get_full_path('AWS/compute_environment.json')
        with open(path, 'r') as f:
            data = json.load(f)

            compute_env_name = self.get_compute_name()
            desc_json = self.batch_client.describe_compute_environments(
                computeEnvironments=[compute_env_name])
            if desc_json['computeEnvironments']:
                logging.info(
                    'Skipping creation of AWS Batch Compute environment %s as it already exists',
                    compute_env_name)
                return compute_env_name

            compute_resources = data['computeResources']
            data['computeEnvironmentName'] = compute_env_name
            compute_resources['instanceTypes'].append(self.machine.name)
            compute_resources['launchTemplate'] = {
                'launchTemplateName': self.get_compute_name()
            }
            if 'EC2KeyPair' in cf_output and cf_output['EC2KeyPair']:
                compute_resources['ec2KeyPair'] = cf_output

            data['serviceRole'] = cf_output['BatchServiceRoleARN']
            compute_resources['subnets'] = [
                cf_output['PrivateSubnet1'], cf_output['PrivateSubnet2']
            ]
            compute_resources['securityGroupIds'] = [
                cf_output['BatchEC2SecurityGroup']
            ]
            compute_resources['instanceRole'] = cf_output[
                'ECSInstanceProfileRoleARN']

        data['tags'] = {'Name': compute_env_name}
        logging.info('Attempting to create AWS Batch Compute environment: %s',
                     compute_env_name)
        self.batch_client.create_compute_environment(**data)

        import botocore.waiter
        try:
            logging.info(
                'Waiting for AWS Batch Compute environment %s to provision...',
                compute_env_name)
            waiter = self.get_compute_environment_waiter(compute_env_name)
            waiter.wait(computeEnvironments=[compute_env_name])
        except botocore.waiter.WaiterError as e:
            msg = f"There was an error with the AWS Batch Compute Environment: {compute_env_name}"
            logging.exception(msg)
            raise SchedulerException(msg)

        logging.info('Successfully created AWS Batch Compute environment: %s',
                     compute_env_name)
        return compute_env_name
コード例 #2
0
    def _wait_for_task_ended(self):
        """
        Try to use a waiter from the below pull request

            * https://github.com/boto/botocore/pull/1307

        If the waiter is not available apply a exponential backoff

            * docs.aws.amazon.com/general/latest/gr/api-retries.html
        """
        try:
            waiter = self.client.get_waiter("job_execution_complete")
            waiter.config.max_attempts = sys.maxsize  # timeout is managed by airflow
            waiter.wait(jobs=[self.jobId])
        except ValueError:
            self._poll_for_task_ended()
コード例 #3
0
ファイル: IAM.py プロジェクト: owasp-sbot/OSBot-AWS
    def wait_for_waiter(self,
                        waiter_name,
                        arg_name,
                        arg_value,
                        delay=None,
                        max_attempts=None):
        kwargs = {arg_name: arg_value}
        kwargs['WaiterConfig'] = {
            'Delay': delay or 1,  # 1  is default boto3 value for Delay
            'MaxAttempts': max_attempts
            or 20  # 20 is default boto3 value for Delay
        }

        waiter = self.client().get_waiter(waiter_name)
        waiter.wait(**kwargs)
        return waiter
コード例 #4
0
ファイル: aws_fixtures.py プロジェクト: dazza-codes/aio-aws
def delete_s3_bucket(bucket_name, s3_client):
    # Recursively deletes a bucket and all of its contents.

    try:
        # - ensure the s3-client is loaded with moto mocks
        # - never allow this to apply to live s3 resources
        # - the event-name mocks are dynamically generated after calling the method
        assert has_moto_mocks(s3_client, "before-send.s3.HeadBucket")

        paginator = s3_client.get_paginator("list_object_versions")

        for n in paginator.paginate(Bucket=bucket_name, Prefix=""):
            for obj in chain(
                    n.get("Versions", []),
                    n.get("DeleteMarkers", []),
                    n.get("Contents", []),
                    n.get("CommonPrefixes", []),
            ):
                kwargs = dict(Bucket=bucket_name, Key=obj["Key"])
                if "VersionId" in obj:
                    kwargs["VersionId"] = obj["VersionId"]
                resp = s3_client.delete_object(**kwargs)
                assert response_success(resp)

        resp = s3_client.delete_bucket(Bucket=bucket_name)
        assert response_success(resp)
        # Ensure the bucket is gone
        waiter = s3_client.get_waiter("bucket_not_exists")
        waiter.wait(Bucket=bucket_name)
        try:
            head = s3_client.head_bucket(Bucket=bucket_name)
            assert_status_code(head, 404)
        except ClientError as err:
            resp = err.response
            assert_status_code(resp, 404)

    except ClientError as err:
        print(f"COULD NOT CLEANUP S3 BUCKET: {bucket_name}")
        print(err)
コード例 #5
0
    def wait_for_job(self,
                     job_id: str,
                     delay: Union[int, float, None] = None) -> None:
        """
        Wait for batch job to complete.  This assumes that the ``.waiter_model`` is configured
        using some variation of the ``.default_config`` so that it can generate waiters with the
        following names: "JobExists", "JobRunning" and "JobComplete".

        :param job_id: a batch job ID
        :type job_id: str

        :param delay:  A delay before polling for job status
        :type delay: Union[int, float, None]

        :raises: AirflowException

        .. note::
            This method adds a small random jitter to the ``delay`` (+/- 2 sec, >= 1 sec).
            Using a random interval helps to avoid AWS API throttle limits when many
            concurrent tasks request job-descriptions.

            It also modifies the ``max_attempts`` to use the ``sys.maxsize``,
            which allows Airflow to manage the timeout on waiting.
        """
        self.delay(delay)
        try:
            waiter = self.get_waiter("JobExists")
            waiter.config.delay = self.add_jitter(waiter.config.delay,
                                                  width=2,
                                                  minima=1)
            waiter.config.max_attempts = sys.maxsize  # timeout is managed by Airflow
            waiter.wait(jobs=[job_id])

            waiter = self.get_waiter("JobRunning")
            waiter.config.delay = self.add_jitter(waiter.config.delay,
                                                  width=2,
                                                  minima=1)
            waiter.config.max_attempts = sys.maxsize  # timeout is managed by Airflow
            waiter.wait(jobs=[job_id])

            waiter = self.get_waiter("JobComplete")
            waiter.config.delay = self.add_jitter(waiter.config.delay,
                                                  width=2,
                                                  minima=1)
            waiter.config.max_attempts = sys.maxsize  # timeout is managed by Airflow
            waiter.wait(jobs=[job_id])

        except (botocore.exceptions.ClientError,
                botocore.exceptions.WaiterError) as err:
            raise AirflowException(err)
コード例 #6
0
 def wait(self, id_='', status="WorkspaceRunning"):
     if id_ == '':
         id_ = self.get_workspace()[0]
     waiter = botocore.waiter.create_waiter_with_client(
         status, model, self.wss)
     waiter.wait(WorkspaceIds=[id_])