コード例 #1
0
def build_region_list(service, chosen_regions=None, partition_name='aws'):
    """
    Build the list of target region names

    :param service:                     Service targeted, e.g. ec2
    :param chosen_regions:              Regions desired, e.g. us-east-2
    :param partition_name:              Name of the partition, default is aws

    :return:
    """
    if chosen_regions is None:
        chosen_regions = []
    service = 'ec2containerservice' if service == 'ecs' else service
    # Get list of regions from botocore
    regions = Session().get_available_regions(service,
                                              partition_name=partition_name)
    if len(chosen_regions):
        return list((Counter(regions) & Counter(chosen_regions)).elements())
    else:
        return regions
コード例 #2
0
    def test_assume_role_uses_same_region_as_client(self):
        config = ('[profile A]\n'
                  'sts_regional_endpoints = regional\n'
                  'role_arn = arn:aws:iam::123456789:role/RoleA\n'
                  'source_profile = B\n\n'
                  '[profile B]\n'
                  'aws_access_key_id = abc123\n'
                  'aws_secret_access_key = def456\n')
        self.write_config(config)

        session = Session(profile='A')
        with SessionHTTPStubber(session) as stubber:
            self.add_assume_role_http_response(stubber)
            # Make an arbitrary client and API call as we are really only
            # looking to make sure the STS assume role call uses the correct
            # endpoint.
            self.make_stubbed_client_call_to_region(session, stubber,
                                                    'us-west-2')
            self.assertEqual(stubber.requests[0].url,
                             'https://sts.us-west-2.amazonaws.com/')
コード例 #3
0
ファイル: factory.py プロジェクト: datamaranai/chalice
def create_botocore_session(profile=None, debug=False,
                            connection_timeout=None,
                            read_timeout=None,
                            max_retries=None):
    # type: (_OPT_STR, bool, _OPT_INT, _OPT_INT, _OPT_INT) -> Session
    s = Session(profile=profile)
    _add_chalice_user_agent(s)
    if debug:
        _inject_large_request_body_filter()
    config_args = {}  # type: Dict[str, Any]
    if connection_timeout is not None:
        config_args['connect_timeout'] = connection_timeout
    if read_timeout is not None:
        config_args['read_timeout'] = read_timeout
    if max_retries is not None:
        config_args['retries'] = {'max_attempts': max_retries}
    if config_args:
        config = BotocoreConfig(**config_args)
        s.set_default_client_config(config)
    return s
コード例 #4
0
    def test_remove_credentials_default_profile(self):
        """ Removes default entry in ~/.aws/credentials """
        self.profile = 'default'
        self.aws_credentials = """
[default]
aws_access_key_id = foo
aws_secret_access_key = bar
aws_session_token = yep
aws_security_token = yep
"""

        credentials = "\n[default]\n" \
                      "aws_access_key_id = \n" \
                      "aws_secret_access_key = \n" \
                      "aws_session_token = \n" \
                      "aws_security_token = \n"

        session = Session()
        remove_credentials(session)
        self.assertAwsCredentialsEquals(credentials)
コード例 #5
0
def post_to_es(payload):
    '''Post data to ES cluster with exponential backoff'''

    # Get aws_region and credentials to post signed URL to ES
    es_region = ES_REGION or os.environ['AWS_REGION']
    session = Session({'region': es_region})
    creds = get_credentials(session)
    es_url = urlparse(ES_ENDPOINT)
    # Extract the domain name in ES_ENDPOINT
    es_endpoint = es_url.netloc or es_url.path

    # Post data with exponential backoff
    retries = 0
    while retries < ES_MAX_RETRIES:
        if retries > 0:
            seconds = (2 ** retries) * .1
            logger.debug('Waiting for %.1f seconds', seconds)
            time.sleep(seconds)

        try:
            es_ret_str = post_data_to_es(
                payload, es_region, creds, es_endpoint, '/_bulk')
            logger.debug('Return from ES: %s', es_ret_str)
            es_ret = json.loads(es_ret_str)

            if es_ret['errors']:
                logger.error(
                    'ES post unsuccessful, errors present, took=%sms', es_ret['took'])
                # Filter errors
                es_errors = [item for item in es_ret['items']
                            if item.get('index', {}).get('error')]
                logger.error('List of items with errors: %s',
                            json.dumps(es_errors))
            else:
                logger.info('ES post successful, took=%sms', es_ret['took'])
            break  # Sending to ES was ok, break retry loop
        except ES_Exception as e:
            if (e.status_code >= 500) and (e.status_code <= 599):
                retries += 1  # Candidate for retry
            else:
                raise  # Stop retrying, re-raise exception
コード例 #6
0
def calculate_implementation_coverage():
    service_names = Session().get_available_services()
    coverage = {}
    for service_name in service_names:
        moto_client = get_moto_implementation(service_name)
        real_client = boto3.client(service_name, region_name='us-east-1')
        implemented = []
        not_implemented = []

        operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names]
        for op in operation_names:
            if moto_client and op in dir(moto_client):
                implemented.append(op)
            else:
                not_implemented.append(op)

        coverage[service_name] = {
            'implemented': implemented,
            'not_implemented': not_implemented,
        }
    return coverage
コード例 #7
0
    def get_spark_context(self, env_context):
        if env_context == 'local':
            session = Session()
            credentials = session.get_credentials()
            current_credentials = credentials.get_frozen_credentials()
            config = (SparkConf().setMaster(env_context).setAppName("Myapp"))
        else:
            config = (SparkConf().setAppName("Myapp"))

        sc = SparkContext(conf=config)

        if env_context == 'local':
            sc._jsc.hadoopConfiguration().set("fs.s3a.access.key",
                                              current_credentials.access_key)
            sc._jsc.hadoopConfiguration().set("fs.s3a.secret.key",
                                              current_credentials.secret_key)
        else:
            pass
        sql_context = SQLContext(sc)
        sc.setLogLevel("ERROR")
        return sc, sql_context
コード例 #8
0
    def test_assume_role_uses_correct_region(self):
        config = ('[profile A]\n'
                  'role_arn = arn:aws:iam::123456789:role/RoleA\n'
                  'source_profile = B\n\n'
                  '[profile B]\n'
                  'aws_access_key_id = abc123\n'
                  'aws_secret_access_key = def456\n')
        self.write_config(config)
        session = Session(profile='A')
        # Verify that when we configure the session with a specific region
        # that we use that region when creating the sts client.
        session.set_config_variable('region', 'cn-north-1')

        create_client, expected_creds = self.create_stubbed_sts_client(session)
        session.create_client = create_client

        resolver = create_credential_resolver(session)
        provider = resolver.get_provider('assume-role')
        creds = provider.load()
        self.assert_creds_equal(creds, expected_creds)
        self.assertEqual(self.actual_client_region, 'cn-north-1')
コード例 #9
0
def put_data_to_es(payload, path, method='PUT', proto='https://'):
    es_url = urlparse.urlparse(ES_ENDPOINT)
    es_endpoint = es_url.netloc or es_url.path  # Extract the domain name in ES_ENDPOINT
    '''Post data to ES endpoint with SigV4 signed http headers'''
    req = AWSRequest(method=method,
                     url=proto + es_endpoint + '/' + urllib.quote(path),
                     data=payload,
                     headers={
                         'Host': es_endpoint,
                         'Content-Type': 'application/json'
                     })
    es_region = ES_REGION or os.environ['AWS_REGION']
    session = Session()
    SigV4Auth(get_credentials(session), 'es',
              os.environ['AWS_REGION']).add_auth(req)
    http_session = URLLib3Session()
    res = http_session.send(req.prepare())
    if res.status_code >= 200 and res.status_code <= 299:
        return res._content
    else:
        raise ES_Exception(res.status_code, res._content)
コード例 #10
0
ファイル: spark_run.py プロジェクト: kkellyy/paasta
def get_spark_env(
    args,
    spark_conf,
):
    spark_env = {}

    creds = Session().get_credentials()
    spark_env['AWS_ACCESS_KEY_ID'] = creds.access_key
    spark_env['AWS_SECRET_ACCESS_KEY'] = creds.secret_key

    # Run spark (and mesos framework) as root.
    spark_env['SPARK_USER'] = '******'
    spark_env['SPARK_OPTS'] = spark_conf

    # Default configs to start the jupyter notebook server
    if args.cmd == 'jupyter':
        dirs = args.work_dir.split(':')
        spark_env['JUPYTER_RUNTIME_DIR'] = dirs[1] + '/.jupyter'
        spark_env['JUPYTER_DATA_DIR'] = dirs[1] + '/.jupyter'

    return spark_env
コード例 #11
0
ファイル: scaffold.py プロジェクト: screwnet/moto
def select_service_and_operation():
    service_names = Session().get_available_services()
    service_completer = WordCompleter(service_names)
    service_name = prompt("Select service: ", completer=service_completer)
    if service_name not in service_names:
        click.secho("{} is not valid service".format(service_name), fg="red")
        raise click.Abort()
    moto_client = get_moto_implementation(service_name)
    real_client = boto3.client(service_name, region_name="us-east-1")
    implemented = []
    not_implemented = []

    operation_names = [
        xform_name(op) for op in real_client.meta.service_model.operation_names
    ]
    for operation in operation_names:
        if moto_client and operation in dir(moto_client):
            implemented.append(operation)
        else:
            not_implemented.append(operation)
    operation_completer = WordCompleter(operation_names)

    click.echo("==Current Implementation Status==")
    for operation_name in operation_names:
        check = "X" if operation_name in implemented else " "
        click.secho("[{}] {}".format(check, operation_name))
    click.echo("=================================")
    operation_name = prompt("Select Operation: ",
                            completer=operation_completer)

    if operation_name not in operation_names:
        click.secho("{} is not valid operation".format(operation_name),
                    fg="red")
        raise click.Abort()

    if operation_name in implemented:
        click.secho("{} is already implemented".format(operation_name),
                    fg="red")
        raise click.Abort()
    return service_name, operation_name
コード例 #12
0
def select_service_and_operation():
    service_names = Session().get_available_services()
    service_completer = WordCompleter(service_names)
    service_name = prompt(u'Select service: ', completer=service_completer)
    if service_name not in service_names:
        click.secho(u'{} is not valid service'.format(service_name), fg='red')
        raise click.Abort()
    moto_client = get_moto_implementation(service_name)
    real_client = boto3.client(service_name, region_name='us-east-1')
    implemented = []
    not_implemented = []

    operation_names = [
        xform_name(op) for op in real_client.meta.service_model.operation_names
    ]
    for op in operation_names:
        if moto_client and op in dir(moto_client):
            implemented.append(op)
        else:
            not_implemented.append(op)
    operation_completer = WordCompleter(operation_names)

    click.echo('==Current Implementation Status==')
    for operation_name in operation_names:
        check = 'X' if operation_name in implemented else ' '
        click.secho('[{}] {}'.format(check, operation_name))
    click.echo('=================================')
    operation_name = prompt(u'Select Operation: ',
                            completer=operation_completer)

    if operation_name not in operation_names:
        click.secho('{} is not valid operation'.format(operation_name),
                    fg='red')
        raise click.Abort()

    if operation_name in implemented:
        click.secho('{} is already implemented'.format(operation_name),
                    fg='red')
        raise click.Abort()
    return service_name, operation_name
コード例 #13
0
def post_to_es(payload):
    '''Post data to ES cluster'''

    # Get aws_region and credentials to post signed URL to ES
    es_region = ES_REGION or os.environ['AWS_REGION']
    session = Session({'region': es_region})
    creds = get_credentials(session)
    es_url = urlparse.urlparse(ES_ENDPOINT)
    es_endpoint = es_url.netloc or es_url.path  # Extract the domain name in ES_ENDPOINT

    # Post data
    retries = 0
    while retries < ES_MAX_RETRIES:
        if retries > 0:
            seconds = (2**retries) * .1
            time.sleep(seconds)

        try:
            es_ret_str = post_data_to_es(payload, es_region, creds,
                                         es_endpoint, '/_bulk')
            es_ret = json.loads(es_ret_str)

            if es_ret['errors']:
                logger.error('ES post unsuccessful, errors present, took=%sms',
                             es_ret['took'])
                es_errors = [
                    item for item in es_ret['items']
                    if item.get('index').get('error')
                ]
                logger.error('List of items with errors: %s',
                             json.dumps(es_errors))
            else:
                logger.info('ES post successful, took=%sms', es_ret['took'])
            break
        except ES_Exception as e:
            if (e.status_code >= 500) and (e.status_code <= 599):
                retries += 1
            else:
                raise
コード例 #14
0
        def __init__(self, name, access_key=None, secret_key=None,
                     security_token=None, profile_name=None, **kwargs):
            """
            Create a new BotoCredentialAdapter.
            """
            # TODO: We take kwargs because new boto2 versions have an 'anon'
            # argument and we want to be future proof

            if (name == 'aws' or name is None) and access_key is None and not kwargs.get('anon', False):
                # We are on AWS and we don't have credentials passed along and we aren't anonymous.
                # We will backend into a boto3 resolver for getting credentials.
                # Make sure to enable boto3's own caching, so we can share that
                # cash with pure boto3 code elsewhere in Toil.
                self._boto3_resolver = create_credential_resolver(Session(profile=profile_name), cache=JSONFileCache())
            else:
                # We will use the normal flow
                self._boto3_resolver = None

            # Pass along all the arguments
            super(BotoCredentialAdapter, self).__init__(name, access_key=access_key,
                                                        secret_key=secret_key, security_token=security_token,
                                                        profile_name=profile_name, **kwargs)
コード例 #15
0
    def test_remove_credentials_non_default_profile(self):
        """ Removes non-default entry in ~/.aws/credentials """
        self.profile = 'foo'

        credentials = self.aws_credentials
        credentials += "\n[foo]\n" \
                       "aws_access_key_id = \n" \
                       "aws_secret_access_key = \n" \
                       "aws_session_token = \n" \
                       "aws_security_token = \n"

        self.aws_credentials += """
[foo]
aws_access_key_id = a
aws_secret_access_key = b
aws_session_token = c
aws_security_token = c
"""

        session = Session()
        remove_credentials(session)
        self.assertAwsCredentialsEquals(credentials)
コード例 #16
0
    def __init__(
        self,
        *,
        max_concurrent_requests: int = _DEFAULT_MAX_CONCURRENT_REQUESTS,
        max_attempts: int = _DEFAULT_MAX_ATTEMPTS,
        timeout: aiohttp.ClientTimeout = _DEFAULT_TIMEOUT,
        session: aiohttp.ClientSession = None,
    ):
        self._max_concurrent_requests = max_concurrent_requests
        self._max_attempts = max_attempts

        # Fetch the credentials and default region from botocore's session.
        # This will automatically find configuration in the user's .aws folder,
        # or in instance metadata.
        boto_session = Session()
        self._credentials = boto_session.get_credentials()
        self._region = boto_session.get_config_variable("region")

        if session is None:
            self._session = aiohttp.ClientSession(raise_for_status=True, timeout=timeout)
        else:
            self._session = session
コード例 #17
0
def write_image_to_s3(img, output, time, file_name):
    # Create an IoT client for sending to messages to the cloud.
    client = greengrasssdk.client('iot-data')
    iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
    session = Session()
    s3 = session.create_client('s3')
    record = 'json/record_at_' + time + '.json'
    # You can contorl the size and quality of the image
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
    _, jpg_data = cv2.imencode('.jpg', img, encode_param)
    response = s3.put_object(Body=jpg_data.tostring(),
                             Bucket='YOUR-BUCKET-NAME',
                             Key=file_name)
    response2 = s3.put_object(Body=json.dumps(output),
                              Bucket='YOUR-BUCKET-NAME',
                              Key=record)
    #client.publish(topic=iot_topic, payload="Response: {}".format(response))
    client.publish(topic=iot_topic, payload="Response: {}".format(response2))
    client.publish(topic=iot_topic, payload="Data pushed to S3")

    image_url = 'https://s3.amazonaws.com/YOUR-BUCKET-NAME/' + file_name
    return image_url
コード例 #18
0
ファイル: cli.py プロジェクト: venkiahn/python-aada
    def main(self):
        parser = self._create_parser()
        self._parsed_args = parser.parse_args(self.args)

        if self._parsed_args.profile:
            self._session = Session(profile=self._parsed_args.profile)
        else:
            self._session = get_session()

        if self._parsed_args.debug:
            self._debug = True

        if self._parsed_args.no_headless:
            self._headless = False

        if self._parsed_args.role:
            self._role = self._parsed_args.role

        if self._parsed_args.account:
            self._account = self._parsed_args.account

        return self.__getattribute__('_{}'.format(self._parsed_args.command))()
コード例 #19
0
    def run(self):
        root_path = os.path.abspath('.')
        package_path = os.path.join(root_path, 'dist', 'awslambdalayers')
        if os.path.exists(package_path):
            shutil.rmtree(package_path)
        os.makedirs(package_path)

        os.environ['DOCKER_BUILDKIT'] = '1'

        proc = Popen(('docker', 'build', '--output', package_path, '-'),
                     stdin=PIPE)
        proc.communicate(self.create_dockerfile().encode())

        if proc.returncode != 0:
            raise RuntimeError('build return %s' % proc.returncode)

        client = Session().create_client('lambda', region_name=self.region)
        arns = []

        for layername, packages in self.layers:
            zip_filename = os.path.join(package_path, f'{layername}.zip')
            os.chdir(os.path.join(package_path, layername))
            self.make_archive(zip_filename[:-4], 'zip', None, '.')
            with open(os.path.join(package_path, f'{layername}.txt'),
                      'r') as f:
                description = f.read()
            with open(os.path.join(package_path, f'{layername}.zip'),
                      'rb') as f:
                zipbuf = f.read()

            print(f'Uploading {layername} ...')
            ret = client.publish_layer_version(
                LayerName=layername,
                Description=description,
                Content={'ZipFile': zipbuf},
                CompatibleRuntimes=[self.build_params['RUNTIME']])
            arns.append(ret.get('LayerArn'))
        for arn in arns:
            print(f'Layer created for {arn}')
コード例 #20
0
    def getEnvironment(self, profile=None):
        """Return environment variables that should be set for the profile."""
        eventHooks = HierarchicalEmitter()
        session = Session(event_hooks=eventHooks)

        if profile:
            session.set_config_variable('profile', profile)

        eventHooks.register('session-initialized',
                            inject_assume_role_provider_cache,
                            unique_id='inject_assume_role_cred_provider_cache')

        session.emit('session-initialized', session=session)
        creds = session.get_credentials()

        env = {}

        def set(key, value):
            if value:
                env[key] = value

        if profile:
            set('AWS_PROFILE', profile)

        set('AWS_ACCESS_KEY_ID', creds.access_key)
        set('AWS_SECRET_ACCESS_KEY', creds.secret_key)

        # AWS_SESSION_TOKEN is the ostensibly the standard:
        # http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs
        # http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-environment
        set('AWS_SESSION_TOKEN', creds.token)

        # ...but boto expects AWS_SECURITY_TOKEN. Set both for compatibility.
        # https://github.com/boto/boto/blob/b016c07d834df5bce75141c4b9d2f3d30352e1b8/boto/connection.py#L438
        set('AWS_SECURITY_TOKEN', creds.token)

        set('AWS_DEFAULT_REGION', session.get_config_variable('region'))

        return env
コード例 #21
0
def get_spark_configuration(
    args,
    container_name,
    spark_ui_port,
    docker_img,
    system_paasta_config,
):
    spark_conf = {}
    spark_conf['APP_NAME'] = container_name
    spark_conf['SPARK_UI_PORT'] = spark_ui_port

    creds = Session().get_credentials()
    spark_conf['AWS_ACCESS_KEY_ID'] = creds.access_key
    spark_conf['AWS_SECRET_ACCESS_KEY'] = creds.secret_key

    cluster_fqdn = system_paasta_config.get_cluster_fqdn_format().format(
        cluster=args.cluster)
    mesos_address = '{}:{}'.format(
        find_mesos_leader(cluster_fqdn),
        MESOS_MASTER_PORT,
    )
    spark_conf['SPARK_MASTER'] = 'mesos://%s' % mesos_address
    spark_conf['SPARK_CORES_MAX'] = args.max_cores
    spark_conf['SPARK_EXECUTOR_CORES'] = args.executor_cores
    spark_conf['SPARK_EXECUTOR_MEMORY'] = '%dg' % args.executor_memory

    if args.driver_max_result_size:
        spark_conf[
            'SPARK_DRIVER_MAX_RESULT_SIZE'] = '%dg' % args.driver_max_result_size
    if args.driver_memory:
        spark_conf['SPARK_DRIVER_MEMORY'] = '%dg' % args.driver_memory
    if args.driver_cores:
        spark_conf['SPARK_DRIVER_CORES'] = args.driver_cores

    if args.build:
        spark_conf['SPARK_EXECUTOR_IMAGE'] = docker_img

    return spark_conf
コード例 #22
0
    def setUp(self):
        super(TestAssumeRoleCredentials, self).setUp()
        self.environ = os.environ.copy()
        self.parent_session = Session()
        self.iam = self.parent_session.create_client('iam')
        self.sts = self.parent_session.create_client('sts')
        self.tempdir = tempfile.mkdtemp()
        self.config_file = os.path.join(self.tempdir, 'config')

        # A role trust policy that allows the current account to call assume
        # role on itself.
        account_id = self.sts.get_caller_identity()['Account']
        self.role_policy = {
            "Version":
            "2012-10-17",
            "Statement": [{
                "Effect": "Allow",
                "Principal": {
                    "AWS": "arn:aws:iam::%s:root" % account_id
                },
                "Action": "sts:AssumeRole"
            }]
        }
コード例 #23
0
def main():
    docker_base_name = 'aws-py37-base'
    proc = Popen(('docker', 'build', '-t', docker_base_name, '-'), stdin=PIPE)
    proc.communicate(PY37_BASE_DOCKERFILE)
    if proc.returncode != 0:
        raise RuntimeError('Docker execution error')

    config = configparser.ConfigParser()
    config.read('packages.cfg')

    aws_options = config['aws']
    aws_region = aws_options['region']

    for layer_name, packages in config['packages'].items():
        sts_token = Session().create_client('sts').get_session_token()

        full_layer_name = 'py37_%s' % layer_name
        dockerfile = PY_PKG_DOCKERFILE % {
            'base_image_name': docker_base_name,
            'packages': packages,
            'region': aws_region,
            'full_layer_name': full_layer_name
        }

        proc = Popen(
            ('docker', 'build', '-t', 'aws-py37-%s' % layer_name,
             '--build-arg', 'RUNTIMES=python3.7', '--build-arg',
             'AWS_ACCESS_KEY_ID=%s' % sts_token['Credentials']['AccessKeyId'],
             '--build-arg', 'AWS_SECRET_ACCESS_KEY=%s' %
             sts_token['Credentials']['SecretAccessKey'], '--build-arg',
             'AWS_SESSION_TOKEN=%s' % sts_token['Credentials']['SessionToken'],
             '-'),
            stdin=PIPE)
        proc.communicate(dockerfile.encode())

        if proc.returncode != 0:
            raise RuntimeError('Docker execution error')
コード例 #24
0
def post_to_es(payload):

    # Get aws_region and credentials to post signed URL to ES
    es_region = os.environ['AWS_REGION']
    session = Session({'region': es_region})
    creds = get_credentials(session)

    # Post data with exponential backoff
    retries = 0
    while retries < ES_MAX_RETRIES:
        if retries > 0:
            seconds = (2**retries) * .1
            time.sleep(seconds)

        try:
            es_ret_str = post_data_to_es(payload, es_region, creds,
                                         elasticsearch_endpoint, '/_bulk')
            es_ret = json.loads(es_ret_str)

            if es_ret['errors']:
                logger.error('ES post unsuccessful, errors present, took=%sms',
                             es_ret['took'])
                # Filter errors
                es_errors = \
                    [item for item in es_ret['items'] if
                        item.get('index').get('error')]
                logger.error('List of items with errors: %s',
                             json.dumps(es_errors))
            else:
                logger.info('ES post successful, took=%sms', es_ret['took'])
            break  # Sending to ES was ok, break retry loop
        except ES_Exception as e:
            if (e.status_code >= 500) and (e.status_code <= 599):
                retries += 1  # Candidate for retry
        else:
            raise  # Stop retrying, re-raise exception
コード例 #25
0
def get_aws_credentials(
    service: str = DEFAULT_SPARK_SERVICE,
    no_aws_credentials: bool = False,
    aws_credentials_yaml: Optional[str] = None,
    profile_name: Optional[str] = None,
) -> Tuple[Optional[str], Optional[str]]:
    if no_aws_credentials:
        return None, None
    elif aws_credentials_yaml:
        return _load_aws_credentials_from_yaml(aws_credentials_yaml)
    elif service != DEFAULT_SPARK_SERVICE:
        service_credentials_path = os.path.join(AWS_CREDENTIALS_DIR, f"{service}.yaml")
        if os.path.exists(service_credentials_path):
            return _load_aws_credentials_from_yaml(service_credentials_path)
        else:
            paasta_print(
                PaastaColors.yellow(
                    "Did not find service AWS credentials at %s.  Falling back to "
                    "user credentials." % (service_credentials_path)
                )
            )

    creds = Session(profile=profile_name).get_credentials()
    return creds.access_key, creds.secret_key
コード例 #26
0
    def run(self):
        root_path = os.path.abspath('.')
        package_path = os.path.join(root_path, 'dist', 'aws_package')
        if os.path.exists(package_path):
            shutil.rmtree(package_path)
        os.makedirs(package_path)

        proc = Popen(('pip3', 'install', '--no-deps', '-t', package_path, '.'))
        proc.communicate()
        if proc.returncode != 0:
            raise RuntimeError('pip3 return %s' % proc.returncode)

        os.chdir(package_path)
        for item in os.listdir('.'):
            if item.endswith('.egg-info') or item.endswith('.dist-info'):
                shutil.rmtree(os.path.join(package_path, item))
        zip_filename = os.path.join(root_path, 'dist', 'aws_package.zip')
        self.make_archive(zip_filename[:-4], 'zip', None, '.')

        print('Uploading...')
        client = Session().create_client('lambda', region_name=self.region)
        with open(zip_filename, 'rb') as f:
            buf = f.read()
        ret = client.update_function_code(FunctionName=self.name, ZipFile=buf)
        revision_id = ret['RevisionId']
        print(' >> RevisionId: %s' % revision_id)
        print(' >> CodeSha256: %s' % ret['CodeSha256'])

        for i in range(20):
            info_ret = client.get_function(FunctionName=self.name)
            if info_ret['Configuration']['RevisionId'] == revision_id:
                break
            else:
                sleep(0.2)
        else:
            raise RuntimeError('Wait amz apply the code timeout')
コード例 #27
0
ファイル: main.py プロジェクト: jessemyers/botoenv
def parse_args():
    session = Session()
    parser = ArgumentParser()
    parser.add_argument("-p", "--profile", choices=session.available_profiles)
    parser.add_argument("-t", "--template", default=DEFAULT_TEMPLATE)
    return parser.parse_args()
コード例 #28
0
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This face detection model is implemented as single shot detector (ssd).
        model_type = 'ssd'
        output_map = {1: 'face'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        thing_name = os.environ['AWS_IOT_THING_NAME']
        iot_topic = '$aws/things/{}/infer'.format(thing_name)
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # Create a s3 backgound uploader
        session = Session()
        s3 = session.create_client('s3',
                                   region_name=os.getenv(
                                       'REGION_NAME', 'ap-southeast-2'))
        bucket = os.getenv('FRAMES_BUCKET',
                           'virtual-concierge-frames-ap-southeast-2')
        uploader = ImageUploader(s3, bucket, client, iot_topic)
        uploader.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_dir = '/opt/awscam/artifacts/'
        model_path = model_dir + 'mxnet_deploy_ssd_FP16_FUSED.xml'
        # Load the model onto the GPU.
        msg = 'Loading face detection model for {}'.format(thing_name)
        client.publish(topic=iot_topic, payload=msg)
        model_start = time.time()
        model = awscam.Model(model_path, {'GPU': 1})
        msg = 'Face detection model loaded in {}s'.format(time.time() -
                                                          model_start)
        client.publish(topic=iot_topic, payload=msg)
        # Attempt to load scorer library
        try:
            model_start = time.time()
            scorer = Scorer(model_dir)
            msg = 'Image classification model loaded {} in {}s'.format(
                scorer.vecs.shape[0],
                time.time() - model_start)
            client.publish(topic=iot_topic, payload=msg)
        except Exception as e:
            print('Failed to load scorer', e)
        # Set the threshold for detection
        detection_threshold = float(os.getenv('DETECT_THRESHOLD', '0.7'))
        # This is the similarity threshold
        sim_threshold = float(os.getenv('DETECT_THRESHOLD', '0.99'))
        # The height and width of the training set images
        input_height = 300
        input_width = 300
        # Do inference until the lambda is killed.
        while True:
            # get thing shadow state, to see if we should register
            cloud_output = {}
            # Get a frame from the video stream
            cloud_output["frame_start"] = time.time()
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Future integrate the shadow callback
            if False:
                cloud_output["shadow_start"] = time.time()
                shadow = client.get_thing_shadow(thingName=thing_name)
                jsonState = json.loads(shadow["payload"])
                register = jsonState['state']['desired'].get('register')
                cloud_output["shadow_register"] = register
                cloud_output["shadow_latency"] = time.time(
                ) - cloud_output["shadow_start"]
            # Resize frame to the same size as the training set.
            cloud_output["detect_start"] = time.time()
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize))
            cloud_output["detect_latency"] = time.time(
            ) - cloud_output["detect_start"]
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / input_height)
            xscale = float(frame.shape[1] / input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            # Get the detected faces and probabilities
            for i, obj in enumerate(parsed_inference_results[model_type]):
                if obj['prob'] > detection_threshold:
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - input_width/2) + input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2) + input_width/2)
                    ymax = int(yscale * obj['ymax'])
                    # Set the default title and color
                    title = '{:.2f}%'.format(obj['prob'] * 100)
                    color = (255, 0, 0)  # blue
                    upload = False
                    if scorer:
                        try:
                            # Attempt to find similar face
                            cloud_output['classify_start'] = time.time()
                            bbox = [xmin, ymin, xmax, ymax]
                            vec = scorer.vectorize(frame, bbox)
                            sim, z_score, prob, name = scorer.similar(vec)
                            if prob >= sim_threshold:
                                title = name
                                if round(prob, 3) < 1.0:
                                    title += ' ({:.2f}%)'.format(prob)
                                color = (0, 255, 0)  # green
                                upload = True
                            cloud_output['classify'] = {
                                'name': name,
                                'sim': float(sim),
                                'zscore': float(z_score),
                                'prob': float(prob)
                            }
                            cloud_output['classify_latency'] = time.time(
                            ) - cloud_output['classify_start']
                        except Exception as e:
                            msg = "Face similarity error: " + str(e)
                            client.publish(topic=iot_topic, payload=msg)
                    if upload:
                        try:
                            metadata = {
                                'ThingName':
                                thing_name,
                                'FullName':
                                title,
                                'Confidence':
                                str(obj['prob']),
                                'Similarity':
                                str(cloud_output['classify']['sim']),
                                'Probability':
                                str(cloud_output['classify']['prob']),
                                'FaceHeight':
                                str(xmax - xmin),
                                'FaceWidth':
                                str(ymax - ymin),
                            }
                            crop_img = uploader.crop(frame, xmin, ymin, xmax,
                                                     ymax)
                            item = uploader.upload(crop_img,
                                                   i,
                                                   metadata=metadata)
                            if item:
                                cloud_output['upload_key'] = item['key']
                            else:
                                cloud_output['upload_skip'] = True
                        except Exception as e:
                            msg = "Upload error: " + str(e)
                            client.publish(topic=iot_topic, payload=msg)
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cloud_output["draw_start"] = time.time()
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 12
                    cv2.putText(frame, title, (xmin, ymin - text_offset),
                                cv2.FONT_HERSHEY_SIMPLEX, 2.5, color, 6)
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']
                    cloud_output["draw_latency"] = time.time(
                    ) - cloud_output["draw_start"]
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            cloud_output["frame_end"] = time.time()
            cloud_output["frame_latency"] = cloud_output[
                "frame_end"] - cloud_output["frame_start"]
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        print('Error in face detection lambda: {}'.format(ex))
コード例 #29
0
ファイル: production.py プロジェクト: zwaluw/bakerydemo2
    if AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
        from requests_aws4auth import AWS4Auth
        WAGTAILSEARCH_BACKENDS['default']['http_auth'] = AWS4Auth(
            AWS_ACCESS_KEY_ID,
            AWS_SECRET_ACCESS_KEY,
            AWS_REGION,
            'es'
        )
    elif AWS_REGION:
        # No API keys in the environ, so attempt to discover them with Boto instead, per:
        # http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials
        # This may be useful if your credentials are obtained via EC2 instance meta data.
        from botocore.session import Session
        from requests_aws4auth import AWS4Auth
        aws_creds = Session().get_credentials()
        if aws_creds:
            WAGTAILSEARCH_BACKENDS['default']['http_auth'] = AWS4Auth(
                aws_creds.access_key,
                aws_creds.secret_key,
                AWS_REGION,
                'es',
                aws_creds.token,
            )

# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/

MIDDLEWARE.append('whitenoise.middleware.WhiteNoiseMiddleware')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
コード例 #30
0
def greengrass_infinite_infer_run():
    try:
        modelPath = "/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml"
        modelType = "ssd"
        input_width = 300
        input_height = 300
        prob_thresh = 0.1
        results_thread = FIFO_Thread()
        results_thread.start()

        # Send a starting message to IoT console
        client.publish(topic=iotTopic, payload="Face detection starts now")

        # Load model to GPU (use {"GPU": 0} for CPU)
        mcfg = {"GPU": 1}
        model = awscam.Model(modelPath, mcfg)
        client.publish(topic=iotTopic, payload="Model loaded")
        ret, frame = awscam.getLastFrame()
        if ret == False:
            raise Exception("Failed to get frame from the stream")

        yscale = float(frame.shape[0] / input_height)
        xscale = float(frame.shape[1] / input_width)
        font = cv2.FONT_HERSHEY_SIMPLEX
        rgb_color = (255, 165, 20)
        #Timers for cooldown and countdown
        cooldown = datetime.datetime.now()
        countdown = datetime.datetime.now()
        doInfer = True
        onCountdown = False

        while doInfer:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            # Raise an exception if failing to get a frame
            if ret == False:
                raise Exception("Failed to get frame from the stream")

            # Resize frame to fit model input requirement
            frameResize = cv2.resize(frame, (input_width, input_height))
            # Run model inference on the resized frame
            inferOutput = model.doInference(frameResize)
            # Output inference result to the fifo file so it can be viewed with mplayer
            parsed_results = model.parseResult(modelType, inferOutput)['ssd']

            label = '{'
            msg = 'false'

            time_now = datetime.datetime.now()

            for obj in parsed_results:
                if (obj['prob'] < prob_thresh):
                    break
                xmin = int(xscale * obj['xmin']) + int(
                    (obj['xmin'] - input_width / 2) + input_width / 2)
                ymin = int(yscale * obj['ymin'])
                xmax = int(xscale * obj['xmax']) + int(
                    (obj['xmax'] - input_width / 2) + input_width / 2)
                ymax = int(yscale * obj['ymax'])
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), rgb_color, 4)
                label += '"{}": {:.2f},'.format("prob", obj['prob'])
                label_show = '{}: {:.2f}'.format(str(obj['label']),
                                                 obj['prob'])
                cv2.putText(frame, label_show, (xmin, ymin - 15), font, 0.5,
                            rgb_color, 4)
                msg = "true"

                if (time_now >= cooldown) and obj['prob'] >= 0.60:
                    # Uploading to Amazon S3 if cooldown and countdown allow it
                    if onCountdown and time_now >= countdown:
                        message = "uploading to s3..."
                        client.publish(topic=iotTopic, payload=message)

                        key = 'images/frame-' + time.strftime(
                            "%Y%m%d-%H%M%S") + '.jpg'
                        session = Session()
                        s3 = session.create_client('s3')

                        _, jpg_data = cv2.imencode('.jpg', frame)
                        result = s3.put_object(Body=jpg_data.tostring(),
                                               Bucket=bucket_name,
                                               Key=key)

                        message = "uploaded to s3: " + key
                        client.publish(topic=iotTopic, payload=message)
                        cooldown = time_now + datetime.timedelta(seconds=10)
                        onCountdown = False
                    # Starting countdown
                    elif not onCountdown:
                        onCountdown = True
                        countdown = time_now + datetime.timedelta(seconds=4)

            if not onCountdown:
                cv2.putText(
                    frame, "Wait for picture: " +
                    str(max(0, int(
                        (cooldown - time_now).total_seconds()))) + " seconds",
                    (950, 100), font, 2, rgb_color, 4)
                if int((cooldown - time_now).total_seconds()) >= 5:
                    cv2.putText(frame, "Image Uploaded! ", (1150, 200), font,
                                2, rgb_color, 4)
                    cv2.putText(frame, "Please check the leaderboard",
                                (900, 300), font, 2, rgb_color, 4)
            else:
                if int((countdown - time_now).total_seconds()) >= -5:
                    cv2.putText(frame, "Say Cheese!", (1000, 1000), font, 3,
                                rgb_color, 4)
                    cv2.putText(
                        frame,
                        str(max(0, int(
                            (countdown - time_now).total_seconds()))) + "...",
                        (1200, 1100), font, 3, rgb_color, 4)
                else:
                    onCountdown = False

            label += '"face": "' + msg + '"'
            label += '}'
            client.publish(topic=iotTopic, payload=label)
            global jpeg
            ret, jpeg = cv2.imencode('.jpg', frame)

    except Exception as e:
        msg = "Test failed: " + str(e)
        client.publish(topic=iotTopic, payload=msg)

    # Asynchronously schedule this function to be run again in 15 seconds
    Timer(15, greengrass_infinite_infer_run).start()