Esempio n. 1
0
    def test_credential_process_returns_error(self):
        config = ('[profile processcreds]\n'
                  'credential_process = %s --raise-error\n')
        config = config % self.credential_process
        with temporary_file('w') as f:
            f.write(config)
            f.flush()
            self.environ['AWS_CONFIG_FILE'] = f.name

            session = Session(profile='processcreds')

            # This regex validates that there is no substring: b'
            # The reason why we want to validate that is that we want to
            # make sure that stderr is actually decoded so that in
            # exceptional cases the error is properly formatted.
            # As for how the regex works:
            # `(?!b').` is a negative lookahead, meaning that it will only
            # match if it is not followed by the pattern `b'`. Since it is
            # followed by a `.` it will match any character not followed by
            # that pattern. `((?!hede).)*` does that zero or more times. The
            # final pattern adds `^` and `$` to anchor the beginning and end
            # of the string so we can know the whole string is consumed.
            # Finally `(?s)` at the beginning makes dots match newlines so
            # we can handle a multi-line string.
            reg = r"(?s)^((?!b').)*$"
            with self.assertRaisesRegex(CredentialRetrievalError, reg):
                session.get_credentials()
Esempio n. 2
0
    def test_credential_process_returns_error(self):
        config = (
            '[profile processcreds]\n'
            'credential_process = %s --raise-error\n'
        )
        config = config % self.credential_process
        with temporary_file('w') as f:
            f.write(config)
            f.flush()
            self.environ['AWS_CONFIG_FILE'] = f.name

            session = Session(profile='processcreds')

            # This regex validates that there is no substring: b'
            # The reason why we want to validate that is that we want to
            # make sure that stderr is actually decoded so that in
            # exceptional cases the error is properly formatted.
            # As for how the regex works:
            # `(?!b').` is a negative lookahead, meaning that it will only
            # match if it is not followed by the pattern `b'`. Since it is
            # followed by a `.` it will match any character not followed by
            # that pattern. `((?!hede).)*` does that zero or more times. The
            # final pattern adds `^` and `$` to anchor the beginning and end
            # of the string so we can know the whole string is consumed.
            # Finally `(?s)` at the beginning makes dots match newlines so
            # we can handle a multi-line string.
            reg = r"(?s)^((?!b').)*$"
            with self.assertRaisesRegexp(CredentialRetrievalError, reg):
                session.get_credentials()
Esempio n. 3
0
    def __init__(
        self,
        upstream_request: HTTPServerRequest,
        endpoint_resolver: EndpointResolver,
        session: Session,
    ):
        """
        :param upstream_request: The original upstream HTTP request from the client(browser) to Jupyter
        :param endpoint_resolver: The botocore endpoint_resolver instance
        """
        self.upstream_request = upstream_request
        self.endpoint_resolver = endpoint_resolver

        self.credentials = session.get_credentials()

        self.upstream_auth_info = self._build_upstream_auth_info()
        self.service_info = get_service_info(
            endpoint_resolver,
            self.upstream_auth_info.service_name,
            self.upstream_auth_info.region,
        )
        # if the environment variable is not specified, os.getenv returns None, and no whitelist is in effect.
        self.whitelisted_services = (
            os.getenv("AWS_JUPYTER_PROXY_WHITELISTED_SERVICES").strip(",").
            split(",") if os.getenv("AWS_JUPYTER_PROXY_WHITELISTED_SERVICES")
            is not None else None)
def lambda_handler(event, context):
    logger.info(event)

    payload = event
    if 'bitstream_version' in payload.keys():
        if not os.path.exists(bit_folder_path):
            os.mkdir(bit_folder_path)

        version = event['bitstream_version']
        session = Session()
        _ = session.get_credentials()

        bitstream_version = '{0}/{1}/{2}'.format(bucket_bitstream_path,
                                                 version, bitstream)
        parameters_version = '{0}/{1}/{2}'.format(bucket_bitstream_path,
                                                  version, parameters)

        # download bitstream
        logger.info('Downloading bitstream [{0}]'.format(bitstream_version))
        s3.meta.client.download_file(bucket, bitstream_version,
                                     os.path.join(bit_folder_path, bitstream))

        # download parameters
        logger.info('Downloading parameters [{0}]'.format(parameters_version))
        s3.meta.client.download_file(bucket, parameters_version,
                                     os.path.join(bit_folder_path, parameters))

        # All successful, now publish the update to the core shadow.
        payload = {'state': {'reported': {'bitstream_version': version}}}
        client.publish(topic=topic, payload=json.dumps(payload))
        return
    else:
        logger.info("Hit Accepted or Rejected payload")
Esempio n. 5
0
def greengrass_hello_world_run():
    # Create the green grass client so that we can send messages to IoT console
    client = greengrasssdk.client('iot-data')
    iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])

    # Stream configuration, name and retention
    # Note that the name will appear as deeplens-myStream
    stream_name = 'myStream'
    retention = 2  #hours

    # Amount of time to stream
    wait_time = 60 * 60 * 5  #seconds

    # Use the boto session API to grab credentials
    session = Session()
    creds = session.get_credentials()

    # Create producer and stream.
    producer = dkv.createProducer(creds.access_key, creds.secret_key,
                                  creds.token, "us-east-1")
    client.publish(topic=iot_topic, payload="Producer created")
    kvs_stream = producer.createStream(stream_name, retention)
    client.publish(topic=iot_topic,
                   payload="Stream {} created".format(stream_name))

    # Start putting data into the KVS stream
    kvs_stream.start()
    client.publish(topic=iot_topic, payload="Stream started")
    time.sleep(wait_time)
    # Stop putting data into the KVS stream
    kvs_stream.stop()
    client.publish(topic=iot_topic, payload="Stream stopped")
Esempio n. 6
0
    def getEnvironment(self, profile=None):
        """Return environment variables that should be set for the profile."""
        eventHooks = HierarchicalEmitter()
        session = Session(event_hooks=eventHooks)

        if profile:
            session.set_config_variable('profile', profile)

        awscli_initialize(eventHooks)
        session.emit('session-initialized', session=session)
        creds = session.get_credentials()

        env = {}

        def set(key, value):
            if value:
                env[key] = value

        set('AWS_ACCESS_KEY_ID', creds.access_key)
        set('AWS_SECRET_ACCESS_KEY', creds.secret_key)

        # AWS_SESSION_TOKEN is the ostensibly the standard:
        # http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs
        # http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-environment
        set('AWS_SESSION_TOKEN', creds.token)

        # ...but boto expects AWS_SECURITY_TOKEN. Set both for compatibility.
        # https://github.com/boto/boto/blob/b016c07d834df5bce75141c4b9d2f3d30352e1b8/boto/connection.py#L438
        set('AWS_SECURITY_TOKEN', creds.token)

        set('AWS_DEFAULT_REGION', session.get_config_variable('region'))

        return env
Esempio n. 7
0
def main():
    args = parse_args()
    session = Session(profile=args.profile)
    credentials = session.get_credentials()
    credentials_map = build_credentials_map(session, credentials)
    render_template(args.template, credentials_map)
    return 0
Esempio n. 8
0
    def __init__(self) -> None:
        current_session = Session()
        region = current_session.get_config_variable('region')
        creds = current_session.get_credentials()
        self.signer = SigV4Auth(creds, 'execute-api', region)

        analysis_api_fqdn = os.environ.get('ANALYSIS_API_FQDN')
        analysis_api_path = os.environ.get('ANALYSIS_API_PATH')
        self.url = 'https://' + analysis_api_fqdn + '/' + analysis_api_path
Esempio n. 9
0
def run():
    "Entry proint"

    obj = BotocoreClientHandler(service='ecr')
    parser = obj._parse_args()
    print(parser.profile)
    session = Session(profile=parser.profile)
    print(dir(session))
    print(session.get_config_variable('region'))
    print(session.get_credentials(), session.profile)
Esempio n. 10
0
def get_creds():
    global access_key, secret_key, token
    session = Session()
    creds = session.get_credentials()

    if creds is None:
        logger.info("no credentials found: " + str(creds))
        return None

    access_key = creds.access_key
    secret_key = creds.secret_key
    token = creds.token
Esempio n. 11
0
def _public_apis():
    session = Session()

    # Mimic the scenario that user does not have aws credentials setup
    session.get_credentials = mock.Mock(return_value=None)

    for service_name in PUBLIC_API_TESTS:
        client = session.create_client(service_name, REGIONS[service_name])
        for operation_name in PUBLIC_API_TESTS[service_name]:
            kwargs = PUBLIC_API_TESTS[service_name][operation_name]
            method = getattr(client, xform_name(operation_name))
            yield client, method, kwargs
Esempio n. 12
0
    def test_honors_aws_shared_credentials_file_env_var(self):
        with temporary_file('w') as f:
            f.write('[default]\n'
                    'aws_access_key_id=custom1\n'
                    'aws_secret_access_key=custom2\n')
            f.flush()
            os.environ['AWS_SHARED_CREDENTIALS_FILE'] = f.name
            s = Session()
            credentials = s.get_credentials()

            self.assertEqual(credentials.access_key, 'custom1')
            self.assertEqual(credentials.secret_key, 'custom2')
Esempio n. 13
0
    def test_honors_aws_shared_credentials_file_env_var(self):
        with temporary_file('w') as f:
            f.write('[default]\n'
                    'aws_access_key_id=custom1\n'
                    'aws_secret_access_key=custom2\n')
            f.flush()
            os.environ['AWS_SHARED_CREDENTIALS_FILE'] = f.name
            s = Session()
            credentials = s.get_credentials()

            self.assertEqual(credentials.access_key, 'custom1')
            self.assertEqual(credentials.secret_key, 'custom2')
Esempio n. 14
0
def test_public_apis_will_not_be_signed():
    session = Session()

    # Mimic the scenario that user does not have aws credentials setup
    session.get_credentials = mock.Mock(return_value=None)

    for service_name in PUBLIC_API_TESTS:
        client = session.create_client(service_name, REGIONS[service_name])
        for operation_name in PUBLIC_API_TESTS[service_name]:
            kwargs = PUBLIC_API_TESTS[service_name][operation_name]
            method = getattr(client, xform_name(operation_name))
            yield _test_public_apis_will_not_be_signed, client, method, kwargs
Esempio n. 15
0
def assume_role(session: Session,
                role_arn: str,
                duration: int = 3600,
                session_name: str = None) -> Session:
    # noinspection PyTypeChecker
    fetcher = AssumeRoleCredentialFetcher(session.create_client,
                                          session.get_credentials(),
                                          role_arn,
                                          extra_args={
                                              'DurationSeconds': duration,
                                              'RoleSessionName': session_name
                                          })
    role_session = Session()
    role_session.register_component(
        'credential_provider',
        CredentialResolver([AssumeRoleProvider(fetcher)]))
    return role_session
Esempio n. 16
0
def get_session_credentials(profile_name):
    profile_session = Session(profile=profile_name)
    profile_session_credentials = profile_session.get_credentials()
    if isinstance(profile_session_credentials, RefreshableCredentials):
        # populate deferred credentials
        profile_session_credentials.get_frozen_credentials()
        return SessionCredentials(
            access_key=profile_session_credentials.access_key,
            secret_key=profile_session_credentials.secret_key,
            token=profile_session_credentials.token,
            expiry_time=profile_session_credentials._expiry_time.astimezone(),
        )
    else:
        session_credentials = get_session_token(profile_session)["Credentials"]
        return SessionCredentials(
            access_key=session_credentials["AccessKeyId"],
            secret_key=session_credentials["SecretAccessKey"],
            token=session_credentials["SessionToken"],
            expiry_time=session_credentials["Expiration"].astimezone(),
        )
    def get_spark_context(self, env_context):
        if env_context == 'local':
            session = Session()
            credentials = session.get_credentials()
            current_credentials = credentials.get_frozen_credentials()
            config = (SparkConf().setMaster(env_context).setAppName("Myapp"))
        else:
            config = (SparkConf().setAppName("Myapp"))

        sc = SparkContext(conf=config)

        if env_context == 'local':
            sc._jsc.hadoopConfiguration().set("fs.s3a.access.key",
                                              current_credentials.access_key)
            sc._jsc.hadoopConfiguration().set("fs.s3a.secret.key",
                                              current_credentials.secret_key)
        else:
            pass
        sql_context = SQLContext(sc)
        sc.setLogLevel("ERROR")
        return sc, sql_context
Esempio n. 18
0
def setup_aws_client(config):
    role_arn = "arn:aws:iam::{}:role/{}".format(
        config['account_id'].replace('-', ''), config['role_name'])
    session = Session()
    fetcher = AssumeRoleCredentialFetcher(session.create_client,
                                          session.get_credentials(),
                                          role_arn,
                                          extra_args={
                                              'DurationSeconds': 3600,
                                              'RoleSessionName': 'TapS3CSV',
                                              'ExternalId':
                                              config['external_id']
                                          },
                                          cache=JSONFileCache())

    refreshable_session = Session()
    refreshable_session.register_component(
        'credential_provider',
        CredentialResolver([AssumeRoleProvider(fetcher)]))

    LOGGER.info("Attempting to assume_role on RoleArn: %s", role_arn)
    boto3.setup_default_session(botocore_session=refreshable_session)
Esempio n. 19
0
    def __init__(
        self,
        *,
        max_concurrent_requests: int = _DEFAULT_MAX_CONCURRENT_REQUESTS,
        max_attempts: int = _DEFAULT_MAX_ATTEMPTS,
        timeout: aiohttp.ClientTimeout = _DEFAULT_TIMEOUT,
        session: aiohttp.ClientSession = None,
    ):
        self._max_concurrent_requests = max_concurrent_requests
        self._max_attempts = max_attempts

        # Fetch the credentials and default region from botocore's session.
        # This will automatically find configuration in the user's .aws folder,
        # or in instance metadata.
        boto_session = Session()
        self._credentials = boto_session.get_credentials()
        self._region = boto_session.get_config_variable("region")

        if session is None:
            self._session = aiohttp.ClientSession(raise_for_status=True, timeout=timeout)
        else:
            self._session = session
    def _get_boto3_session(region: str,
                           role_arn: str = None,
                           assume_duration: int = 3600) -> Session:
        """Creates a boto3 session, optionally assuming a role.

        Args:
            region: The AWS region for the session.
            role_arn: The ARN to assume for the session.
            assume_duration: The duration (in seconds) to assume the role.

        Returns:
            object: A boto3 Session.
        """

        # By default return a basic session
        if not role_arn:
            return Session(region_name=region)

        # The following assume role example was taken from
        # https://github.com/boto/botocore/issues/761#issuecomment-426037853

        # Create a session used to assume role
        assume_session = BotocoreSession()
        fetcher = AssumeRoleCredentialFetcher(
            assume_session.create_client,
            assume_session.get_credentials(),
            role_arn,
            extra_args={
                "DurationSeconds": assume_duration,
            },
            cache=JSONFileCache(),
        )
        role_session = BotocoreSession()
        role_session.register_component(
            "credential_provider",
            CredentialResolver([Boto3Manager.AssumeRoleProvider(fetcher)]),
        )
        return Session(region_name=region, botocore_session=role_session)
Esempio n. 21
0
def setup_aws_client(config):
    role_arn = "arn:aws:iam::{}:role/{}".format(
        config["account_id"].replace("-", ""), config["role_name"])
    session = Session()
    fetcher = AssumeRoleCredentialFetcher(
        session.create_client,
        session.get_credentials(),
        role_arn,
        extra_args={
            "DurationSeconds": 3600,
            "RoleSessionName": "TapS3CSV",
            "ExternalId": config["external_id"],
        },
        cache=JSONFileCache(),
    )

    refreshable_session = Session()
    refreshable_session.register_component(
        "credential_provider",
        CredentialResolver([AssumeRoleProvider(fetcher)]))

    LOGGER.info("Attempting to assume_role on RoleArn: %s", role_arn)
    boto3.setup_default_session(botocore_session=refreshable_session)
Esempio n. 22
0
def setup_aws_client(config):
    if 'role_name' in config:
        role_arn = "arn:aws:iam::{}:role/{}".format(
            config['account_id'].replace('-', ''), config['role_name'])

        session = Session()
        fetcher = AssumeRoleCredentialFetcher(session.create_client,
                                              session.get_credentials(),
                                              role_arn,
                                              extra_args={
                                                  'DurationSeconds':
                                                  3600,
                                                  'RoleSessionName':
                                                  'TapDynamodDB',
                                                  'ExternalId':
                                                  config['external_id']
                                              },
                                              cache=JSONFileCache())

        refreshable_session = Session()
        refreshable_session.register_component(
            'credential_provider',
            CredentialResolver([AssumeRoleProvider(fetcher)]))

        LOGGER.info("Attempting to assume_role on RoleArn: %s", role_arn)
        boto3.setup_default_session(botocore_session=refreshable_session)

    elif 'aws_access_key_id' in config and 'aws_secret_access_key' in config:
        LOGGER.info(
            "Attempting to pass AWS credentials from 'aws_access_key_id' and 'aws_secret_access_key' config values"
        )
        boto3.setup_default_session(
            aws_access_key_id=config['aws_access_key_id'],
            aws_secret_access_key=config['aws_secret_access_key'],
            aws_session_token=config.get('aws_session_token', None))
        session = Session()
Esempio n. 23
0
    def getEnvironment(self, profile=None):
        """Return environment variables that should be set for the profile."""
        eventHooks = HierarchicalEmitter()
        session = Session(event_hooks=eventHooks)

        if profile:
            session.set_config_variable('profile', profile)

        eventHooks.register('session-initialized',
                            inject_assume_role_provider_cache,
                            unique_id='inject_assume_role_cred_provider_cache')

        session.emit('session-initialized', session=session)
        creds = session.get_credentials()

        env = {}

        def set(key, value):
            if value:
                env[key] = value

        set('AWS_ACCESS_KEY_ID', creds.access_key)
        set('AWS_SECRET_ACCESS_KEY', creds.secret_key)

        # AWS_SESSION_TOKEN is the ostensibly the standard:
        # http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs
        # http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-environment
        set('AWS_SESSION_TOKEN', creds.token)

        # ...but boto expects AWS_SECURITY_TOKEN. Set both for compatibility.
        # https://github.com/boto/boto/blob/b016c07d834df5bce75141c4b9d2f3d30352e1b8/boto/connection.py#L438
        set('AWS_SECURITY_TOKEN', creds.token)

        set('AWS_DEFAULT_REGION', session.get_config_variable('region'))

        return env
def copy_to_s3(file):
    session = Session()
    _ = session.get_credentials()
    s3.meta.client.upload_file(os.path.join(sync_folder_path, file), bucket,
                               os.path.join('portal/images', file))
    return
 def __init__(self) -> None:
     botocore_session = Session()
     self.credentials = botocore_session.get_credentials()
     self.region_name = botocore_session.get_config_variable('region')
Esempio n. 26
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)

    parser.add_argument('--profile', help='The AWS config profile to use')

    group = parser.add_mutually_exclusive_group()
    group.add_argument('--json', action='store_const', const='json', dest='format', help="Print credential_process-compatible JSON to stdout (default)")
    group.add_argument('--env', action='store_const', const='env', dest='format', help="Print as env vars")
    group.add_argument('--env-export', action='store_const', const='env-export', dest='format', help="Print as env vars prefixed by 'export ' for shell sourcing")
    group.add_argument('--exec', nargs=argparse.REMAINDER, help="Exec remaining input w/ creds injected as env vars")
    group.add_argument('--credentials-file-profile', '-c', metavar='PROFILE_NAME', help="Write to a profile in AWS credentials file")

    parser.add_argument('--pretty', action='store_true', help='For --json, pretty-print')

    parser.add_argument('--version', action='store_true')
    parser.add_argument('--debug', action='store_true')

    cache_group = parser.add_argument_group('Caching')
    cache_group.add_argument('--cache-file')
    buffer_type = lambda v: timedelta(minutes=int(v))
    buffer_default = timedelta(minutes=10)
    cache_group.add_argument('--cache-expiration-buffer', type=buffer_type, default=buffer_default, metavar='MINUTES', help='Expiration buffer in minutes, defaults to 10 minutes')
    cache_group.add_argument('--refresh', action='store_true', help='Refresh the cache')

    args = parser.parse_args()

    if args.version:
        print(__version__)
        parser.exit()

    if not any([args.format, args.exec, args.credentials_file_profile]):
        args.format = 'json'
        args.pretty = True

    if args.debug:
        logging.basicConfig(level=logging.DEBUG)

    for key in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN']:
        os.environ.pop(key, None)

    # if args.profile:
    #     for key in ['AWS_PROFILE', 'AWS_DEFAULT_PROFILE']:
    #         os.environ.pop(key, None)

    credentials = None

    if args.cache_file and not args.refresh:
        credentials = load_cache(args.cache_file, args.cache_expiration_buffer)

    if credentials and args.credentials_file_profile:
        session = Session(profile=args.profile)
    elif not credentials:
        try:
            session = Session(profile=args.profile)
            session_credentials = session.get_credentials()
            if not session_credentials:
                print('Unable to locate credentials.', file=sys.stderr)
                sys.exit(2)
            read_only_credentials = session_credentials.get_frozen_credentials()
            expiration = None
            if hasattr(session_credentials, '_expiry_time') and session_credentials._expiry_time:
                if isinstance(session_credentials._expiry_time, datetime):
                    expiration = session_credentials._expiry_time
                else:
                    LOGGER.debug("Expiration in session credentials is of type {}, not datetime".format(type(expiration)))
            credentials = convert_creds(read_only_credentials, expiration)

            if args.cache_file:
                save_cache(args.cache_file, credentials)
        except Exception as e:
            if args.debug:
                traceback.print_exc()
            print(str(e), file=sys.stderr)
            sys.exit(3)



    if args.exec:
        os.environ.update({
            'AWS_ACCESS_KEY_ID': credentials.AccessKeyId,
            'AWS_SECRET_ACCESS_KEY': credentials.SecretAccessKey,
        })
        if credentials.SessionToken:
            os.environ['AWS_SESSION_TOKEN'] = credentials.SessionToken
        if credentials.Expiration:
            os.environ['AWS_CREDENTIALS_EXPIRATION'] = credentials.Expiration.strftime(TIME_FORMAT)
        command = ' '.join(shlex.quote(arg) for arg in args.exec)
        os.system(command)
    elif args.format == 'json':
        data = {
            'Version': 1,
            'AccessKeyId': credentials.AccessKeyId,
            'SecretAccessKey': credentials.SecretAccessKey,
        }
        if credentials.SessionToken:
            data['SessionToken'] = credentials.SessionToken
        if credentials.Expiration:
            data['Expiration'] = credentials.Expiration.strftime(TIME_FORMAT)

        if args.pretty:
            json_kwargs={'indent': 2}
        else:
            json_kwargs={'separators': (',', ':')}

        print(json.dumps(data, **json_kwargs))
    elif args.format in ['env', 'env-export']:
        if args.format == 'env-export':
            prefix = 'export '
        else:
            prefix = ''
        lines = [
            '{}AWS_ACCESS_KEY_ID={}'.format(prefix, credentials.AccessKeyId),
            '{}AWS_SECRET_ACCESS_KEY={}'.format(prefix, credentials.SecretAccessKey),
        ]
        if credentials.SessionToken:
            lines.append('{}AWS_SESSION_TOKEN={}'.format(prefix, credentials.SessionToken))
        if credentials.Expiration:
            lines.append('{}AWS_CREDENTIALS_EXPIRATION={}'.format(prefix, credentials.Expiration.strftime(TIME_FORMAT)))
        print('\n'.join(lines))
    elif args.credentials_file_profile:
        values = {
            'aws_access_key_id': credentials.AccessKeyId,
            'aws_secret_access_key': credentials.SecretAccessKey,
        }
        if credentials.SessionToken:
            values['aws_session_token'] = credentials.SessionToken
        if credentials.Expiration:
            values['aws_credentials_expiration'] = credentials.Expiration.strftime(TIME_FORMAT)

        write_values(session, args.credentials_file_profile, values)
    else:
        print("ERROR: no option set (this should never happen)", file=sys.stderr)
        sys.exit(1)
def main():

    parser = argparse.ArgumentParser(description=DESCRIPTION)

    parser.add_argument('--profile', help='The AWS config profile to use')

    group = parser.add_mutually_exclusive_group()
    group.add_argument(
        '--json',
        action='store_const',
        const='json',
        dest='format',
        help="Print credential_process-compatible JSON to stdout (default)")
    group.add_argument('--env',
                       action='store_const',
                       const='env',
                       dest='format',
                       help="Print as env vars")
    group.add_argument(
        '--env-export',
        action='store_const',
        const='env-export',
        dest='format',
        help="Print as env vars prefixed by 'export ' for shell sourcing")
    group.add_argument(
        '--exec',
        nargs=argparse.REMAINDER,
        help="Exec remaining input w/ creds injected as env vars")
    group.add_argument('--credentials-file-profile',
                       '-c',
                       metavar='PROFILE_NAME',
                       help="Write to a profile in AWS credentials file")

    parser.add_argument('--pretty',
                        action='store_true',
                        help='For --json, pretty-print')

    parser.add_argument('--version', action='store_true')

    args = parser.parse_args()

    if args.version:
        print(__version__)
        parser.exit()

    if not any([args.format, args.exec, args.credentials_file_profile]):
        args.format = 'json'
        args.pretty = True

    for key in [
            'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN'
    ]:
        os.environ.pop(key, None)

    session = Session(profile=args.profile)

    credentials = session.get_credentials().get_frozen_credentials()

    if args.exec:
        os.environ.update({
            'AWS_ACCESS_KEY_ID': credentials.access_key,
            'AWS_SECRET_ACCESS_KEY': credentials.secret_key,
        })
        if credentials.token:
            os.environ['AWS_SESSION_TOKEN'] = credentials.token
        command = ' '.join(shlex.quote(arg) for arg in args.exec)
        os.system(command)
    elif args.format == 'json':
        data = {
            'Version': 1,
            'AccessKeyId': credentials.access_key,
            'SecretAccessKey': credentials.secret_key,
        }
        if credentials.token:
            data['SessionToken'] = credentials.token

        if args.pretty:
            json_kwargs = {'indent': 2}
        else:
            json_kwargs = {'separators': (',', ':')}

        print(json.dumps(data, **json_kwargs))
    elif args.format in ['env', 'env-export']:
        if args.format == 'env-export':
            prefix = 'export '
        else:
            prefix = ''
        lines = [
            f'{prefix}AWS_ACCESS_KEY_ID={credentials.access_key}',
            f'{prefix}AWS_SECRET_ACCESS_KEY={credentials.secret_key}',
        ]
        if credentials.token:
            lines.append(f'{prefix}AWS_SESSION_TOKEN={credentials.token}')
        print('\n'.join(lines))
    elif args.credentials_file_profile:
        values = {
            'aws_access_key_id': credentials.access_key,
            'aws_secret_access_key': credentials.secret_key,
        }
        if credentials.token:
            values['aws_session_token'] = credentials.token

        write_values(session, args.credentials_file_profile, values)
    else:
        print("ERROR: no option set (this should never happen)",
              file=sys.stderr)
        sys.exit(1)
Esempio n. 28
0
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This face detection model is implemented as single shot detector (ssd).
        model_type = 'ssd'
        output_map = {1: 'face'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(
            os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml'
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload='Loading face detection model')
        model = awscam.Model(model_path, {'GPU': 1})
        client.publish(topic=iot_topic, payload='Face detection model loaded')
        # Set the threshold for detection
        detection_threshold = 0.25
        # The height and width of the training set images
        input_height = 300
        input_width = 300

        #code edited by Ali Rizvi
        # Use the boto session API to grab credentials
        session = Session()
        creds = session.get_credentials()
        # Stream name and retention
        stream_name = 'rekognition'
        retention = 2  #hours
        region = "us-east-1"
        # Create producer and stream.
        producer = dkv.createProducer(creds.access_key, creds.secret_key,
                                      creds.token, region)
        client.publish(topic=iot_topic, payload="Producer created")
        kvs_stream = producer.createStream(stream_name, retention)
        client.publish(topic=iot_topic,
                       payload="Stream {} created".format(stream_name))
        # Create variable to track whether or not we are streaming to KVS
        streaming = False
        # Amount of time to stream - in seconds
        wait_time = 90  #seconds

        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize))
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / input_height)
            xscale = float(frame.shape[1] / input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            # Get the detected faces and probabilities
            for obj in parsed_inference_results[model_type]:
                if obj['prob'] > detection_threshold:
                    #edited next 2 lines:
                    #Create variable that represents a face is detected
                    face_present = True
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - input_width/2) + input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2) + input_width/2)
                    ymax = int(yscale * obj['ymax'])
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (255, 165, 20), 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(frame, '{:.2f}%'.format(obj['prob'] * 100),
                                (xmin, ymin - text_offset),
                                cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20),
                                6)
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']
            #Place data into KVS stream if face is detected by the model
            if face_present and not streaming:
                client.publish(topic=iot_topic,
                               payload=json.dumps(cloud_output))
                client.publish(topic=iot_topic,
                               payload='Start streaming video at {}'.format(
                                   time.strftime("%Y-%m-%d %H:%M:%S")))
                kvs_stream.start()
                #Streaming 90 seconds of Video
                time.sleep(wait_time)
                streaming = True
            elif streaming:
                kvs_stream.stop()
                client.publish(topic=iot_topic,
                               payload='Stop streaming video at {}'.format(
                                   time.strftime("%Y-%m-%d %H:%M:%S")))
                streaming = False

            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send results to the cloud
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        client.publish(topic=iot_topic,
                       payload='Error in face detection lambda: {}'.format(ex))
Esempio n. 29
0
def greengrass_infinite_infer_run():
    try:
        modelPath = "/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml"
        modelType = "ssd"
        input_width = 300
        input_height = 300
        prob_thresh = 0.15
        results_thread = FIFO_Thread()
        results_thread.start()
        client.publish(topic=iotTopic, payload="Face detection starts now")
        mcfg = {"GPU": 1}
        model = awscam.Model(modelPath, mcfg)
        client.publish(topic=iotTopic, payload="Model loaded")
        ret, frame = awscam.getLastFrame()
        if ret == False:
            raise Exception("Failed to get frame from the stream")
            
        yscale = float(frame.shape[0]/input_height)
        xscale = float(frame.shape[1]/input_width)
        session = Session()  
        creds = session.get_credentials()  
        stream_name = thingName
        retention = 2
        region = "us-east-1"   
        producer = dkv.createProducer(creds.access_key, creds.secret_key, creds.token, region)  
        client.publish(topic=iotTopic, payload="Producer created")  
        my_stream = producer.createStream(stream_name, retention)  
        client.publish(topic=iotTopic, payload="Stream {} created".format(stream_name))  
        stopStreamingTime = datetime.datetime.now() - datetime.timedelta(minutes = 1)
        doInfer = True
        while doInfer:
            if stopStreamingTime <= datetime.datetime.now():
                my_stream.stop()
            ret, frame = awscam.getLastFrame()
            if ret == False:
                raise Exception("Failed to get frame from the stream")
            frameResize = cv2.resize(frame, (input_width, input_height))
            inferOutput = model.doInference(frameResize)
            parsed_results = model.parseResult(modelType, inferOutput)['ssd']
            label = '{'
            for i, obj in enumerate(parsed_results):
                if obj['prob'] < prob_thresh:
                    break
                xmin = int( xscale * obj['xmin'] ) + int((obj['xmin'] - input_width/2) + input_width/2)
                ymin = int( yscale * obj['ymin'] )
                xmax = int( xscale * obj['xmax'] ) + int((obj['xmax'] - input_width/2) + input_width/2)
                ymax = int( yscale * obj['ymax'] )
                if stopStreamingTime <= datetime.datetime.now():
                    my_stream.start()
                client.publish(topic=iotTopic, payload="About to save face to S3")
                crop_img = frame[ymin:ymax, xmin:xmax]
                push_to_s3(crop_img, i)
                frameKey = iso_format(datetime.datetime.utcnow())
                index_faces(thingName, frameKey)
                stopStreamingTime = datetime.datetime.now() + datetime.timedelta(seconds = 10)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
                label += '"{}": {:.2f},'.format(str(obj['label']), obj['prob'] )
                label_show = '{}: {:.2f}'.format(str(obj['label']), obj['prob'] )
                cv2.putText(frame, label_show, (xmin, ymin-15),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 20), 4)
            label += '"null": 0.0'
            label += '}'  
            global jpeg
            ret,jpeg = cv2.imencode('.jpg', frame)
    except Exception as e:
        msg = "Test failed: " + str(e)
        client.publish(topic=iotTopic, payload=msg)
        
    Timer(15, greengrass_infinite_infer_run).start()
Esempio n. 30
0
def infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This object detection model is implemented as single shot detector (ssd), since
        # the number of labels is small we create a dictionary that will help us convert
        # the machine labels to human readable labels.
        model_type = 'ssd'
        output_map = {
            1: 'aeroplane',
            2: 'bicycle',
            3: 'bird',
            4: 'boat',
            5: 'bottle',
            6: 'bus',
            7: 'car',
            8: 'cat',
            9: 'chair',
            10: 'cow',
            11: 'dinning table',
            12: 'dog',
            13: 'horse',
            14: 'motorbike',
            15: 'person',
            16: 'pottedplant',
            17: 'sheep',
            18: 'sofa',
            19: 'train',
            20: 'tvmonitor'
        }
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(
            os.environ['AWS_IOT_THING_NAME'])
        #iot_topic = '$aws/things/{}/infer'.format('DeepLens_MQ')
        #iot_metadata_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_resnet50_300_FP16_FUSED.xml'
        # Load the model onto the GPU.
        #client.publish(topic=iot_topic, payload='Loading object detection model')
        model = awscam.Model(model_path, {'GPU': 1})
        #client.publish(topic=iot_topic, payload='Object detection model loaded')
        # Set the threshold for detection
        detection_threshold = 0.40
        # The height and width of the training set images
        input_height = 300
        input_width = 300
        # Do inference until the lambda is killed.
        bucket_name = 'vishbucket2'
        images_path = "/tmp/"
        stream_name = 'threatStream'
        retention = 24  #hours
        image_count = 0
        s3 = boto3.client('s3')

        session = Session()
        creds = session.get_credentials()
        producer = dkv.createProducer(creds.access_key, creds.secret_key,
                                      creds.token, "us-east-1")
        #client.publish(topic=iot_topic, payload="Kinesis Producer created")
        kvs_stream = producer.createStream(stream_name, retention)
        #client.publish(topic=iot_topic, payload="Stream {} created".format(stream_name))

        # Start putting data into the KVS stream
        kvs_stream.start()
        #client.publish(topic=iot_topic, payload="Stream started")

        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize))
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0]) / float(input_height)
            xscale = float(frame.shape[1]) / float(input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}

            # Get the detected objects and probabilities
            for obj in parsed_inference_results[model_type]:
                if obj['prob'] > detection_threshold:
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin'])
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax'])
                    ymax = int(yscale * obj['ymax'])
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (255, 165, 20), 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(
                        frame, "{}: {:.2f}%".format(output_map[obj['label']],
                                                    obj['prob'] * 100),
                        (xmin, ymin - text_offset), cv2.FONT_HERSHEY_SIMPLEX,
                        2.5, (255, 165, 20), 6)
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']
                    if obj['label'] == 5:
                        #client.publish(topic=iot_topic, payload='Bottle found in the image')
                        image_name = 'image' + str(uuid.uuid4()) + '.jpg'
                        image_full_name = images_path + image_name
                        cv2.imwrite(image_full_name, frame)
                        s3.upload_file(image_full_name,
                                       bucket_name,
                                       image_name,
                                       ExtraArgs={'ACL': 'public-read'})
                        data = {}
                        s3_path = 'https://vishbucket2.s3.amazonaws.com/' + image_name
                        data[
                            'Location'] = 'MLK Library Main entrance, San Jose, CA'
                        data['Image'] = s3_path
                        data['Date'] = datetime.today().strftime(
                            '%Y-%m-%d-%H:%M:%S')
                        data['Device'] = 'Camera001'
                        data['Certainty'] = obj['prob']
                        data['isThreat'] = True

                        json_data = json.dumps(data)
                        client.publish(topic=iot_topic, payload=json_data)
                        image_count += 1

            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send results to the cloud
            #client.publish(topic=iot_topic, payload=json.dumps(cloud_output))

    except Exception as ex:
        client.publish(
            topic=iot_topic,
            payload='Error in object detection lambda: {}'.format(ex))
Esempio n. 31
0
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This face detection model is implemented as single shot detector (ssd).
        model_type = 'ssd'
        output_map = {1: 'face'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        iot_topic = '$aws/things/{}/infer'.format(
            os.environ['AWS_IOT_THING_NAME'])
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_path = '/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml'
        # Load the model onto the GPU.
        client.publish(topic=iot_topic, payload='Loading face detection model')
        model = awscam.Model(model_path, {'GPU': 1})
        client.publish(topic=iot_topic, payload='Face detection model loaded')
        # Set the threshold for detection
        detection_threshold = 0.25
        # The height and width of the training set images
        input_height = 300
        input_width = 300

        # create a kinesis stream where data will be pushed when there is a face.
        session = Session()
        creds = session.get_credentials()

        s3 = session.create_client('s3', region_name='us-east-1')
        # Do inference until the lambda is killed.
        while True:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            original_frame = frame.copy()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Resize frame to the same size as the training set.
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize))
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / input_height)
            xscale = float(frame.shape[1] / input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            cloud_output = {}
            # Get the detected faces and probabilities
            face_present = False
            for obj in parsed_inference_results[model_type]:
                if obj['prob'] > detection_threshold:

                    # set face to present
                    face_present = True

                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - input_width/2) + input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2) + input_width/2)
                    ymax = int(yscale * obj['ymax'])
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax),
                                  (255, 165, 20), 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 15
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.putText method.
                    # Method signature: image, text, origin, font face, font scale, color,
                    # and tickness
                    cv2.putText(frame, '{:.2f}%'.format(obj['prob'] * 100),
                                (xmin, ymin - text_offset),
                                cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 165, 20),
                                6)
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']

            # Place data into the KVS stream if at least one dog is detected by the model
            if face_present:
                candidate = local_display.frame2jpeg(original_frame)
                key_name = str(time.time()).replace('.', '') + '.jpg'
                s3.put_object(Bucket='deepenforcement-candidates',
                              Body=candidate,
                              Key=key_name)
                client.publish(topic=iot_topic,
                               payload='uploading candidate file to s3')

            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            # Send results to the cloud
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))

    except Exception as ex:
        client.publish(topic=iot_topic,
                       payload='Error in face detection lambda: {}'.format(ex))
Esempio n. 32
0
import boto3
import greengrasssdk

import sys
import logging

# Setup logging to stdout
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)

client = greengrasssdk.client('iot-data')

logger.info('Hello from pinned lambda. Outside of handler.')

# Get creds from TES
# Note: must make sure that creds are not available within local folder
# Can get cred info from /greengrass/var/log/system/tes.log
session = Session()
creds = session.get_credentials()
formatted_creds = """
Access Key: {}\n
Secret Key: {}\n
Session Key: {}\n""".format(creds.access_key, creds.secret_key, creds.token)

logger.info(formatted_creds)


def lambda_handler(event, context):
    logger.debug("Hello from pinned lambda. Inside handler.")
    return