Example #1
0
class BaseLogEventsGeneratorTest(unittest.TestCase):
    def setUp(self):
        self.session = Session()
        self.client = self.session.create_client(
            'logs', region_name='us-west-2')
        self.stubber = Stubber(self.client)
        self.group_name = 'groupName'
        self.start = '1970-01-01T00:00:01.000000'
        self.expected_start_as_milli_epoch = 1000
        self.filter_pattern = 'mypattern'
        self.log_timestamp = 1000
        self.expected_log_timestamp_as_datetime = datetime(
            1970, 1, 1, 0, 0, 1, tzinfo=tz.tzutc())

    def get_event(self, event_id, event_message, timestamp=None):
        if timestamp is None:
            timestamp = self.log_timestamp
        return {
            'eventId': event_id,
            'message': event_message,
            'timestamp': timestamp,
            'ingestionTime': self.log_timestamp,
        }

    def get_expected_event(self, event_id, event_message, add_seconds=0):
        return {
            'eventId': event_id,
            'message': event_message,
            'timestamp': self.expected_log_timestamp_as_datetime + timedelta(seconds=add_seconds),
            'ingestionTime': self.expected_log_timestamp_as_datetime,
        }
Example #2
0
def write_image_to_s3(img, output, time, file_name, devices, resized_img):
    # Create an IoT client for sending to messages to the cloud.
    client = greengrasssdk.client('iot-data')
    iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
    session = Session()
    s3 = session.create_client('s3')
    device = devices[os.environ['AWS_IOT_THING_NAME']]
    record = 'json/record_' + device + '_' + time + '.json'
    #path to a resized image
    resized_image = 'frames_resized/resized_' + device + '_' + time + '.jpg'
    #latest record uploaded to it's own directory
    latest = 'latest/latest.json'
    # You can contorl the size and quality of the image
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
    _, jpg_data = cv2.imencode('.jpg', img, encode_param)
    _, resized_data = cv2.imencode('.jpg', resized_img, encode_param)
    response = s3.put_object(Body=jpg_data.tostring(),
                             Bucket='YOUR-BUCKET-NAME',
                             Key=file_name)
    response2 = s3.put_object(Body=json.dumps(output),
                              Bucket='YOUR-BUCKET-NAME',
                              Key=record)
    response3 = s3.put_object(Body=json.dumps(output),
                              Bucket='YOUR-BUCKET-NAME',
                              Key=latest)
    response4 = s3.put_object(Body=resized_data.tostring(),
                              Bucket='YOUR-BUCKET-NAME',
                              Key=resized_image)

    #client.publish(topic=iot_topic, payload="Response: {}".format(response))
    client.publish(topic=iot_topic, payload="Response: {}".format(response2))
    client.publish(topic=iot_topic, payload="Data pushed to S3")

    image_url = 'https://s3.amazonaws.com/YOUR-BUCKET-NAME/' + file_name
    return image_url
Example #3
0
def call_lambda(function, data):
    s = Session()
    clientLambda = s.create_client("lambda",
                                   config=Config(retries={'max_attempts': 0}))
    clientLambda.invoke(FunctionName=function,
                        InvocationType="Event",
                        Payload=json.dumps(data))
Example #4
0
def test_old_model_continues_to_work():
    # This test ensures that botocore can load the service models as they exist
    # today.  There's a directory in tests/functional/models that is a
    # snapshot of a service model.  This test ensures that we can continue
    # to stub an API call using this model.  That way if the models ever
    # change we have a mechanism to ensure that the existing models continue
    # to work with botocore.  The test should not change (with the exception
    # of potential changes to the ClientHTTPStubber), and the files in
    # tests/functional/models should not change!
    session = Session()
    loader = session.get_component('data_loader')
    # We're adding our path to the existing search paths so we don't have to
    # copy additional data files such as _retry.json to our TEST_MODELS_DIR.
    # We only care about the service model and endpoints file not changing.
    # This also prevents us from having to make any changes to this models dir
    # if we end up adding a new data file that's needed to create clients.
    # We're adding our TEST_MODELS_DIR as the first element in the list to
    # ensure we load the endpoints.json file from TEST_MODELS_DIR.  For the
    # service model we have an extra safety net where we can choose a custom
    # client name.
    loader.search_paths.insert(0, TEST_MODELS_DIR)

    # The model dir we copied was renamed to 'custom-lambda'
    # to ensure we're loading our version of the model and not
    # the built in one.
    client = session.create_client(
        'custom-acm',
        region_name='us-west-2',
        aws_access_key_id='foo',
        aws_secret_access_key='bar',
    )
    with ClientHTTPStubber(client) as stubber:
        stubber.add_response(url='https://acm.us-west-2.amazonaws.com/',
                             headers={
                                 'x-amzn-RequestId': 'abcd',
                                 'Date': 'Fri, 26 Oct 2018 01:46:30 GMT',
                                 'Content-Length': '29',
                                 'Content-Type': 'application/x-amz-json-1.1'
                             },
                             body=b'{"CertificateSummaryList":[]}')
        response = client.list_certificates()
        assert_equal(
            response, {
                'CertificateSummaryList': [],
                'ResponseMetadata': {
                    'HTTPHeaders': {
                        'content-length': '29',
                        'content-type': 'application/x-amz-json-1.1',
                        'date': 'Fri, 26 Oct 2018 01:46:30 GMT',
                        'x-amzn-requestid': 'abcd'
                    },
                    'HTTPStatusCode': 200,
                    'RequestId': 'abcd',
                    'RetryAttempts': 0
                }
            })

    # Also verify we can use the paginators as well.
    assert_equal(client.can_paginate('list_certificates'), True)
    assert_equal(client.waiter_names, ['certificate_validated'])
Example #5
0
def call_lambda_sync(function, data):
    s = Session()
    clientLambda = s.create_client("lambda",
                                   config=Config(retries={'max_attempts': 0}))
    response = clientLambda.invoke(FunctionName=function,
                                   InvocationType="RequestResponse",
                                   Payload=json.dumps(data))
    body = json.loads(response['Payload'].read())
    return body
Example #6
0
def test_old_model_continues_to_work():
    # This test ensures that botocore can load the service models as they exist
    # today.  There's a directory in tests/functional/models that is a
    # snapshot of a service model.  This test ensures that we can continue
    # to stub an API call using this model.  That way if the models ever
    # change we have a mechanism to ensure that the existing models continue
    # to work with botocore.  The test should not change (with the exception
    # of potential changes to the ClientHTTPStubber), and the files in
    # tests/functional/models should not change!
    session = Session()
    loader = session.get_component('data_loader')
    # We're adding our path to the existing search paths so we don't have to
    # copy additional data files such as _retry.json to our FIXED_MODELS_DIR.
    # We only care about the service model and endpoints file not changing.
    # This also prevents us from having to make any changes to this models dir
    # if we end up adding a new data file that's needed to create clients.
    # We're adding our FIXED_MODELS_DIR as the first element in the list to
    # ensure we load the endpoints.json file from FIXED_MODELS_DIR.  For the
    # service model we have an extra safety net where we can choose a custom
    # client name.
    loader.search_paths.insert(0, FIXED_MODELS_DIR)

    # The model dir we copied was renamed to 'custom-lambda'
    # to ensure we're loading our version of the model and not
    # the built in one.
    client = session.create_client(
        'custom-acm', region_name='us-west-2',
        aws_access_key_id='foo', aws_secret_access_key='bar',
    )
    with ClientHTTPStubber(client) as stubber:
        stubber.add_response(
            url='https://acm.us-west-2.amazonaws.com/',
            headers={'x-amzn-RequestId': 'abcd',
                     'Date': 'Fri, 26 Oct 2018 01:46:30 GMT',
                     'Content-Length': '29',
                     'Content-Type': 'application/x-amz-json-1.1'},
            body=b'{"CertificateSummaryList":[]}')
        response = client.list_certificates()
        assert_equal(
            response,
            {'CertificateSummaryList': [],
             'ResponseMetadata': {
                 'HTTPHeaders': {
                     'content-length': '29',
                     'content-type': 'application/x-amz-json-1.1',
                     'date': 'Fri, 26 Oct 2018 01:46:30 GMT',
                     'x-amzn-requestid': 'abcd'},
                 'HTTPStatusCode': 200,
                 'RequestId': 'abcd',
                 'RetryAttempts': 0}
             }
        )

    # Also verify we can use the paginators as well.
    assert_equal(client.can_paginate('list_certificates'), True)
    assert_equal(client.waiter_names, ['certificate_validated'])
Example #7
0
def moto_server():
    # Start moto server
    proc = Popen(["moto_server", "s3", "-p", "8082"], stderr=DEVNULL, stdout=DEVNULL)
    time.sleep(1)
    with mock_s3(), proc:
        # Make sure bucket exists
        session = Session()
        client = session.create_client("s3", endpoint_url=f"http://{s3_netloc}")
        yield lambda: s3_reset(client)
        proc.kill()
Example #8
0
 def __init__(self, collectionId):
     self.collectionId = collectionId
     self.faceId = None
     self.allThreadStarted = False
     self._finished = False
     self.threads = []
     self.resultsQ = Queue.Queue()
     config = Config(connect_timeout=1, read_timeout=2)
     session = Session()
     self.rekClient = session.create_client('rekognition', config=config)
Example #9
0
def s3(s3_base):
    from botocore.session import Session

    os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
    os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
    # NB: we use the sync botocore client for setup
    session = Session()
    client = session.create_client("s3", endpoint_url=endpoint_uri)
    client.create_bucket(Bucket=BUCKET_NAME)

    yield
def write_image_to_s3(img):
    session = Session()
    s3 = session.create_client('s3')
    # File name matches file name in s3-image-processing
    file_name = 'DeepLens/face.jpg'
    # You can contorl the size and quality of the image
    encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]
    _, jpg_data = cv2.imencode('.jpg', img, encode_param)
    response = s3.put_object(ACL='public-read-write', Body=jpg_data.tostring(),Bucket='deeplens-fd-family',Key=file_name)
    image_url = 'https://s3.amazonaws.com/deeplens-fd-family/'+file_name
    return image_url
Example #11
0
def build_aws_import_job(db: Session, session: boto.Session,
                         confirm: ConfirmAcct,
                         external_id: Optional[int]) -> ImportJob:
    proxy = Proxy.build(session)
    sts = session.create_client('sts')
    identity = sts.get_caller_identity()
    provider = _get_or_create_provider(db, proxy, identity, confirm,
                                       external_id)
    desc = _build_import_job_desc(proxy, identity)
    org_id = desc['aws_org']['Id']
    return ImportJob.create(provider, desc, org_id)
Example #12
0
def _public_apis():
    session = Session()

    # Mimic the scenario that user does not have aws credentials setup
    session.get_credentials = mock.Mock(return_value=None)

    for service_name in PUBLIC_API_TESTS:
        client = session.create_client(service_name, REGIONS[service_name])
        for operation_name in PUBLIC_API_TESTS[service_name]:
            kwargs = PUBLIC_API_TESTS[service_name][operation_name]
            method = getattr(client, xform_name(operation_name))
            yield client, method, kwargs
Example #13
0
def test_public_apis_will_not_be_signed():
    session = Session()

    # Mimic the scenario that user does not have aws credentials setup
    session.get_credentials = mock.Mock(return_value=None)

    for service_name in PUBLIC_API_TESTS:
        client = session.create_client(service_name, REGIONS[service_name])
        for operation_name in PUBLIC_API_TESTS[service_name]:
            kwargs = PUBLIC_API_TESTS[service_name][operation_name]
            method = getattr(client, xform_name(operation_name))
            yield _test_public_apis_will_not_be_signed, client, method, kwargs
Example #14
0
def write_image_to_s3(img):
    print("writing image to s3")
    session = Session()
    s3 = session.create_client('s3')
    file_name = 'DeepLens/image-'+time.strftime("%Y%m%d-%H%M%S")+'.jpg'
    # You can contorl the size and quality of the image
    encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]
    _, jpg_data = cv2.imencode('.jpg', img, encode_param)
    response = s3.put_object(ACL='public-read', Body=jpg_data.tostring(),Bucket='rekog-face',Key=file_name)

    image_url = 'https://s3.amazonaws.com/<BUCKET_NAME>/'+file_name
    return image_url
Example #15
0
def write_image_to_s3(img):
    session = Session()
    s3 = session.create_client('s3')
    file_name = 'stranger.jpg'
        # You can contorl the size and quality of the image
    encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]
    _, jpg_data = cv2.imencode('.jpg', img, encode_param)

    mac = open('/sys/class/net/mlan0/address').readline() 
    response = s3.put_object(ACL='public-read', Body=mac,Bucket='graciafamilyphoto',Key='mac.txt')
    response = s3.put_object(ACL='public-read', Body=jpg_data.tostring(),Bucket='graciafamilyphoto',Key=file_name)
    
    image_url = 'https://s3-us-west-2.amazonaws.com/graciafamilyphoto/'+file_name
    return image_url
Example #16
0
class AWSSession(object):
    """
    AWS session wrapper.
    """
    def __init__(self, profile=None):
        self.profile = profile
        self.session = Session(profile=self.profile)

    @property
    def access_key_id(self):
        return None

    @property
    def secret_access_key(self):
        return None

    @property
    def region_name(self):
        return environ.get("AWS_REGION",
                           environ.get("AWS_DEFAULT_REGION", "us-west-2"))

    @property
    def session_token(self):
        return None

    def create_client(self,
                      service_name,
                      api_version=None,
                      use_ssl=True,
                      verify=None,
                      endpoint_url=None,
                      config=None):
        """
        Create a service from the wrapped session.

        Automatically populates the region name, access key, secret key, and session token.
        Allows other parameters to be passed.
        """
        return self.session.create_client(
            service_name=service_name,
            region_name=self.region_name,
            aws_access_key_id=self.access_key_id,
            aws_secret_access_key=self.secret_access_key,
            aws_session_token=self.session_token,
            api_version=api_version,
            use_ssl=use_ssl,
            verify=verify,
            endpoint_url=endpoint_url,
            config=config,
        )
def write_image_to_s3(img):
    session = Session()
    s3 = session.create_client('s3')
    file_name = 'DeepLens/image-' + time.strftime("%Y%m%d-%H%M%S") + '.jpg'
    # You can contorl the size and quality of the image
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 100]
    _, jpg_data = cv2.imencode('.jpg', img, encode_param)
    response = s3.put_object(
        ACL='public-read',
        Body=jpg_data.tostring(),
        Bucket='deeplens-sagemaker-f2ede581-36fb-41ce-b61c-881bb384ee61',
        Key=file_name)
    image_url = 'https://s3.amazonaws.com/deeplens-sagemaker-f2ede581-36fb-41ce-b61c-881bb384ee61/' + file_name
    return image_url
def mock_s3_bucket(s3_base):
    bucket = "test_bucket"
    session = Session()
    client = session.create_client("s3", endpoint_url=endpoint_uri)
    client.create_bucket(Bucket=bucket, ACL="public-read")

    df = pd.DataFrame({"c1": [1, 2, 3], "c2": ["a", "b", "c"]})
    keys = [
        "data/for/you.csv",
        "data/for/me.csv",
    ]
    for key in keys:
        client.put_object(Bucket=bucket,
                          Body=df.to_csv(index=None).encode("utf-8"),
                          Key=key)
    yield bucket
Example #19
0
def s3(s3_base):
    from botocore.session import Session
    # NB: we use the sync botocore client for setup
    session = Session()
    client = session.create_client('s3', endpoint_url=endpoint_uri)
    client.create_bucket(Bucket=test_bucket_name, ACL='public-read')

    client.create_bucket(
        Bucket=versioned_bucket_name, ACL='public-read')
    client.put_bucket_versioning(
        Bucket=versioned_bucket_name,
        VersioningConfiguration={
            'Status': 'Enabled'
        }
    )

    # initialize secure bucket
    client.create_bucket(
        Bucket=secure_bucket_name, ACL='public-read')
    policy = json.dumps({
        "Version": "2012-10-17",
        "Id": "PutObjPolicy",
        "Statement": [
            {
                "Sid": "DenyUnEncryptedObjectUploads",
                "Effect": "Deny",
                "Principal": "*",
                "Action": "s3:PutObject",
                "Resource": "arn:aws:s3:::{bucket_name}/*".format(
                    bucket_name=secure_bucket_name),
                "Condition": {
                    "StringNotEquals": {
                        "s3:x-amz-server-side-encryption": "aws:kms"
                    }
                }
            }
        ]
    })
    client.put_bucket_policy(Bucket=secure_bucket_name, Policy=policy)
    for flist in [files, csv_files, text_files, glob_files]:
        for f, data in flist.items():
            client.put_object(Bucket=test_bucket_name, Key=f, Body=data)

    S3FileSystem.clear_instance_cache()
    s3 = S3FileSystem(anon=False, client_kwargs={'endpoint_url': endpoint_uri})
    s3.invalidate_cache()
    yield s3
Example #20
0
    def create_session(self, profile=None):
        session = Session(profile=profile)

        # We have to set bogus credentials here or otherwise we'll trigger
        # an early credential chain resolution.
        sts = session.create_client(
            'sts',
            aws_access_key_id='spam',
            aws_secret_access_key='eggs',
        )
        self.mock_client_creator.return_value = sts
        stubber = Stubber(sts)
        stubber.activate()
        assume_role_provider = AssumeRoleProvider(
            load_config=lambda: session.full_config,
            client_creator=self.mock_client_creator,
            cache={},
            profile_name=profile,
            credential_sourcer=CanonicalNameCredentialSourcer([
                self.env_provider, self.container_provider,
                self.metadata_provider
            ]),
            profile_provider_builder=ProfileProviderBuilder(session),
        )

        component_name = 'credential_provider'
        resolver = session.get_component(component_name)
        available_methods = [p.METHOD for p in resolver.providers]
        replacements = {
            'env': self.env_provider,
            'iam-role': self.metadata_provider,
            'container-role': self.container_provider,
            'assume-role': assume_role_provider
        }
        for name, provider in replacements.items():
            try:
                index = available_methods.index(name)
            except ValueError:
                # The provider isn't in the session
                continue

            resolver.providers[index] = provider

        session.register_component('credential_provider', resolver)
        return session, stubber
Example #21
0
    def create_session(self, profile=None):
        session = Session(profile=profile)

        # We have to set bogus credentials here or otherwise we'll trigger
        # an early credential chain resolution.
        sts = session.create_client(
            'sts',
            aws_access_key_id='spam',
            aws_secret_access_key='eggs',
        )
        stubber = Stubber(sts)
        stubber.activate()
        assume_role_provider = AssumeRoleProvider(
            load_config=lambda: session.full_config,
            client_creator=lambda *args, **kwargs: sts,
            cache={},
            profile_name=profile,
            credential_sourcer=CanonicalNameCredentialSourcer([
                self.env_provider, self.container_provider,
                self.metadata_provider
            ])
        )

        component_name = 'credential_provider'
        resolver = session.get_component(component_name)
        available_methods = [p.METHOD for p in resolver.providers]
        replacements = {
            'env': self.env_provider,
            'iam-role': self.metadata_provider,
            'container-role': self.container_provider,
            'assume-role': assume_role_provider
        }
        for name, provider in replacements.items():
            try:
                index = available_methods.index(name)
            except ValueError:
                # The provider isn't in the session
                continue

            resolver.providers[index] = provider

        session.register_component(
            'credential_provider', resolver
        )
        return session, stubber
Example #22
0
def write_training_data(raw_file_name, raw_img, train_annotation):
    client = greengrasssdk.client('iot-data')
    iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])
    session = Session()
    s3 = session.create_client('s3')
    ##path to raw_image
    raw_image_path = 'raw_images/' + raw_file_name + '.jpg'
    #path to json annotation
    json_annotation_path = 'training_annotation/' + raw_file_name + '.json'

    # You can contorl the size and quality of the image
    encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
    _, raw_data = cv2.imencode('.jpg', raw_img, encode_param)
    raw_image_response = s3.put_object(Body=raw_data.tostring(),
                                       Bucket='YOUR-BUCKET-NAME',
                                       Key=raw_image_path)
    annotation_response = s3.put_object(Body=json.dumps(train_annotation),
                                        Bucket='YOUR-BUCKET-NAME',
                                        Key=json_annotation_path)
Example #23
0
class Aws(object):
    def __init__(self):
        self._session = Session()
        self._cache = {}

    def can_access_bucket(self, bucket_name):
        try:
            response = self._client('s3').head_bucket(Bucket=bucket_name)
            return response.get('HTTPStatusCode', 0) == 200
        except ClientError:
            return False

    def create_bucket(self, bucket_name):
        s3 = self._client('s3')
        waiter = s3.get_waiter('bucket_exists')
        self._client('s3').create_bucket(
            Bucket=bucket_name,
            CreateBucketConfiguration={'LocationConstraint': 'us-west-2'})
        try:
            waiter.wait(Bucket=bucket_name)
            return True
        except Exception as e:
            print(e)
            return False

    def destroy_bucket(self, bucket_name):
        s3 = self._client('s3')
        waiter = s3.get_waiter('bucket_not_exists')
        response = s3.list_objects_v2(Bucket=bucket_name)
        for key in response.get('Contents', []):
            s3.delete_object(key['Name'])
        s3.delete_bucket(Bucket=bucket_name)
        try:
            waiter.wait(Bucket=bucket_name)
            return True
        except Exception as e:
            print(e)
            return False

    def _client(self, name):
        if name not in self._cache:
            self._cache[name] = self._session.create_client(name)
        return self._cache[name]
Example #24
0
def s3(s3_base, aws_credentials):
    ''' anonymous access local s3 bucket for testing '''
    from botocore.session import Session
    session = Session()
    client = session.create_client("s3", endpoint_url=endpoint_uri)
    client.create_bucket(Bucket=test_bucket_name, ACL="public-read")

    for file_name in [os.path.join(DIRECTORY, x) for x in test_files]:
        with open(file_name, 'rb') as f:
            data = f.read()
            key = os.path.basename(file_name)
            client.put_object(Bucket=test_bucket_name, Key=key, Body=data)

    # Make sure cache not being used
    s3fs.S3FileSystem.clear_instance_cache()
    s3 = s3fs.S3FileSystem(anon=True,
                           client_kwargs={"endpoint_url": endpoint_uri})
    s3.invalidate_cache()
    yield
def main(profile: Profile, session: Session):
    is_parent = True

    if profile.force_refresh:
        try:
            profile.raise_if_logged_in()
        except AlreadyLoggedIn:
            send(profile.pidfile, SIGINT)
            return

        logger.warn("Logged out: ignoring --force-refresh.")

    try:
        client = session.create_client('sts')

        # Exit if already logged in
        profile.raise_if_logged_in()

        # Must know username to lookup cookies
        profile.get_username()

        try:
            saml, roles = refresh(
                profile.ecp_endpoint_url,
                profile.cookies,
            )
        except Exception:
            creds = profile.get_credentials()
            saml, roles = authenticate(profile.ecp_endpoint_url,
                                       profile.cookies, *creds)

        duration = profile.duration
        role = get_selection(roles, profile.role_arn)
        expires = save_sts_token(session, client, saml, role, duration)

        if os.name == 'posix' and not profile.disable_refresh:
            is_parent = daemonize(profile, session, client, role, expires)
    except Exception:
        raise
    finally:
        if not is_parent:
            logger.info('Exiting refresh process')
Example #26
0
    def test_assume_role_uses_correct_region(self):
        config = ('[profile A]\n'
                  'role_arn = arn:aws:iam::123456789:role/RoleA\n'
                  'source_profile = B\n\n'
                  '[profile B]\n'
                  'aws_access_key_id = abc123\n'
                  'aws_secret_access_key = def456\n')
        self.write_config(config)
        session = Session(profile='A')
        # Verify that when we configure the session with a specific region
        # that we use that region when creating the sts client.
        session.set_config_variable('region', 'cn-north-1')

        create_client, expected_creds = self.create_stubbed_sts_client(session)
        session.create_client = create_client

        resolver = create_credential_resolver(session)
        provider = resolver.get_provider('assume-role')
        creds = provider.load()
        self.assert_creds_equal(creds, expected_creds)
        self.assertEqual(self.actual_client_region, 'cn-north-1')
Example #27
0
    def test_assume_role_uses_correct_region(self):
        config = (
            '[profile A]\n'
            'role_arn = arn:aws:iam::123456789:role/RoleA\n'
            'source_profile = B\n\n'
            '[profile B]\n'
            'aws_access_key_id = abc123\n'
            'aws_secret_access_key = def456\n'
        )
        self.write_config(config)
        session = Session(profile='A')
        # Verify that when we configure the session with a specific region
        # that we use that region when creating the sts client.
        session.set_config_variable('region', 'cn-north-1')

        create_client, expected_creds = self.create_stubbed_sts_client(session)
        session.create_client = create_client

        resolver = create_credential_resolver(session)
        provider = resolver.get_provider('assume-role')
        creds = provider.load()
        self.assert_creds_equal(creds, expected_creds)
        self.assertEqual(self.actual_client_region, 'cn-north-1')
Example #28
0
import os
from threading import Timer
import time
import datetime
import awscam
import cv2
from botocore.session import Session
from threading import Thread

# Setup the S3 client
session = Session()
s3 = session.create_client(
    's3')  ###session manages state about a particular configuration
s3_bucket = 'doorman-faces'

# setup the camera and frame
ret, frame = awscam.getLastFrame()
ret, jpeg = cv2.imencode('.jpg', frame)
Write_To_FIFO = True


class FIFO_Thread(Thread):
    def __init__(self):
        ''' Constructor. '''
        Thread.__init__(self)

    def run(self):
        # write to tmp file for local debugging purpose
        fifo_path = "/tmp/results.mjpeg"
        if not os.path.exists(fifo_path):
            os.mkfifo(fifo_path)
Example #29
0
class TestAssumeRoleCredentials(unittest.TestCase):
    def setUp(self):
        super(TestAssumeRoleCredentials, self).setUp()
        self.environ = os.environ.copy()
        self.parent_session = Session()
        self.iam = self.parent_session.create_client('iam')
        self.sts = self.parent_session.create_client('sts')
        self.tempdir = tempfile.mkdtemp()
        self.config_file = os.path.join(self.tempdir, 'config')

        # A role trust policy that allows the current account to call assume
        # role on itself.
        account_id = self.sts.get_caller_identity()['Account']
        self.role_policy = {
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Effect": "Allow",
                    "Principal": {
                        "AWS": "arn:aws:iam::%s:root" % account_id
                    },
                    "Action": "sts:AssumeRole"
                }
            ]
        }

    def tearDown(self):
        super(TestAssumeRoleCredentials, self).tearDown()
        shutil.rmtree(self.tempdir)

    def random_name(self):
        return 'clitest-' + random_chars(10)

    def create_role(self, policy_document, policy_arn=None):
        name = self.random_name()
        response = self.iam.create_role(
            RoleName=name,
            AssumeRolePolicyDocument=json.dumps(policy_document)
        )
        self.addCleanup(self.iam.delete_role, RoleName=name)
        if policy_arn:
            self.iam.attach_role_policy(RoleName=name, PolicyArn=policy_arn)
            self.addCleanup(
                self.iam.detach_role_policy, RoleName=name,
                PolicyArn=policy_arn
            )
        return response['Role']

    def create_user(self, policy_arns):
        name = self.random_name()
        user = self.iam.create_user(UserName=name)['User']
        self.addCleanup(self.iam.delete_user, UserName=name)

        for arn in policy_arns:
            self.iam.attach_user_policy(
                UserName=name,
                PolicyArn=arn
            )
            self.addCleanup(
                self.iam.detach_user_policy,
                UserName=name, PolicyArn=arn
            )

        return user

    def create_creds(self, user_name):
        creds = self.iam.create_access_key(UserName=user_name)['AccessKey']
        self.addCleanup(
            self.iam.delete_access_key,
            UserName=user_name, AccessKeyId=creds['AccessKeyId']
        )
        return creds

    def wait_for_assume_role(self, role_arn, access_key, secret_key,
                             token=None, attempts=30, delay=10):
        # "Why not use the policy simulator?" you might ask. The answer is
        # that the policy simulator will return success far before you can
        # actually make the calls.
        client = self.parent_session.create_client(
            'sts', aws_access_key_id=access_key,
            aws_secret_access_key=secret_key, aws_session_token=token
        )
        attempts_remaining = attempts
        role_session_name = random_chars(10)
        while attempts_remaining > 0:
            attempts_remaining -= 1
            try:
                result = client.assume_role(
                    RoleArn=role_arn, RoleSessionName=role_session_name)
                return result['Credentials']
            except ClientError as e:
                code = e.response.get('Error', {}).get('Code')
                if code in ["InvalidClientTokenId", "AccessDenied"]:
                    time.sleep(delay)
                else:
                    raise

        raise Exception("Unable to assume role %s" % role_arn)

    def create_assume_policy(self, role_arn):
        policy_document = {
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Effect": "Allow",
                    "Resource": role_arn,
                    "Action": "sts:AssumeRole"
                }
            ]
        }
        name = self.random_name()
        response = self.iam.create_policy(
            PolicyName=name,
            PolicyDocument=json.dumps(policy_document)
        )
        self.addCleanup(
            self.iam.delete_policy, PolicyArn=response['Policy']['Arn']
        )
        return response['Policy']['Arn']

    def assert_s3_read_only_profile(self, profile_name):
        # Calls to S3 should succeed
        command = 's3api list-buckets --profile %s' % profile_name
        result = aws(command, env_vars=self.environ)
        self.assertEqual(result.rc, 0, result.stderr)

        # Calls to other services should not
        command = 'iam list-groups --profile %s' % profile_name
        result = aws(command, env_vars=self.environ)
        self.assertNotEqual(result.rc, 0, result.stdout)
        self.assertIn('AccessDenied', result.stderr)

    def test_recursive_assume_role(self):
        # Create the final role, the one that will actually have access to s3
        final_role = self.create_role(self.role_policy, S3_READ_POLICY_ARN)

        # Create the role that can assume the final role
        middle_policy_arn = self.create_assume_policy(final_role['Arn'])
        middle_role = self.create_role(self.role_policy, middle_policy_arn)

        # Create a user that can only assume the middle-man role, and then get
        # static credentials for it.
        user_policy_arn = self.create_assume_policy(middle_role['Arn'])
        user = self.create_user([user_policy_arn])
        user_creds = self.create_creds(user['UserName'])

        # Setup the config file with the profiles we'll be using. For
        # convenience static credentials are placed here instead of putting
        # them in the credentials file.
        config = (
            '[default]\n'
            'aws_access_key_id = %s\n'
            'aws_secret_access_key = %s\n'
            '[profile middle]\n'
            'source_profile = default\n'
            'role_arn = %s\n'
            '[profile final]\n'
            'source_profile = middle\n'
            'role_arn = %s\n'
        )
        config = config % (
            user_creds['AccessKeyId'], user_creds['SecretAccessKey'],
            middle_role['Arn'], final_role['Arn']
        )
        with open(self.config_file, 'w') as f:
            f.write(config)

        # Wait for IAM permissions to propagate
        middle_creds = self.wait_for_assume_role(
            role_arn=middle_role['Arn'],
            access_key=user_creds['AccessKeyId'],
            secret_key=user_creds['SecretAccessKey'],
        )
        self.wait_for_assume_role(
            role_arn=final_role['Arn'],
            access_key=middle_creds['AccessKeyId'],
            secret_key=middle_creds['SecretAccessKey'],
            token=middle_creds['SessionToken'],
        )

        # Configure our credentials file to be THE credentials file
        self.environ['AWS_CONFIG_FILE'] = self.config_file

        self.assert_s3_read_only_profile(profile_name='final')

    def test_assume_role_with_credential_source(self):
        # Create a role with read access to S3
        role = self.create_role(self.role_policy, S3_READ_POLICY_ARN)

        # Create a user that can assume the role and get static credentials
        # for it.
        user_policy_arn = self.create_assume_policy(role['Arn'])
        user = self.create_user([user_policy_arn])
        user_creds = self.create_creds(user['UserName'])

        # Setup the config file with the profile we'll be using.
        config = (
            '[profile assume]\n'
            'role_arn = %s\n'
            'credential_source = Environment\n'
        )
        config = config % role['Arn']
        with open(self.config_file, 'w') as f:
            f.write(config)

        # Wait for IAM permissions to propagate
        self.wait_for_assume_role(
            role_arn=role['Arn'],
            access_key=user_creds['AccessKeyId'],
            secret_key=user_creds['SecretAccessKey'],
        )

        # Setup the environment so that our new config file is THE config
        # file and add the expected credentials since we're using the
        # environment as our credential source.
        self.environ['AWS_CONFIG_FILE'] = self.config_file
        self.environ['AWS_SECRET_ACCESS_KEY'] = user_creds['SecretAccessKey']
        self.environ['AWS_ACCESS_KEY_ID'] = user_creds['AccessKeyId']

        self.assert_s3_read_only_profile(profile_name='assume')
def greengrass_infinite_infer_run():
    """ Entry point of the lambda function"""
    try:
        # This face detection model is implemented as single shot detector (ssd).
        model_type = 'ssd'
        output_map = {1: 'face'}
        # Create an IoT client for sending to messages to the cloud.
        client = greengrasssdk.client('iot-data')
        thing_name = os.environ['AWS_IOT_THING_NAME']
        iot_topic = '$aws/things/{}/infer'.format(thing_name)
        # Create a local display instance that will dump the image bytes to a FIFO
        # file that the image can be rendered locally.
        local_display = LocalDisplay('480p')
        local_display.start()
        # Create a s3 backgound uploader
        session = Session()
        s3 = session.create_client('s3',
                                   region_name=os.getenv(
                                       'REGION_NAME', 'ap-southeast-2'))
        bucket = os.getenv('FRAMES_BUCKET',
                           'virtual-concierge-frames-ap-southeast-2')
        uploader = ImageUploader(s3, bucket, client, iot_topic)
        uploader.start()
        # The sample projects come with optimized artifacts, hence only the artifact
        # path is required.
        model_dir = '/opt/awscam/artifacts/'
        model_path = model_dir + 'mxnet_deploy_ssd_FP16_FUSED.xml'
        # Load the model onto the GPU.
        msg = 'Loading face detection model for {}'.format(thing_name)
        client.publish(topic=iot_topic, payload=msg)
        model_start = time.time()
        model = awscam.Model(model_path, {'GPU': 1})
        msg = 'Face detection model loaded in {}s'.format(time.time() -
                                                          model_start)
        client.publish(topic=iot_topic, payload=msg)
        # Attempt to load scorer library
        try:
            model_start = time.time()
            scorer = Scorer(model_dir)
            msg = 'Image classification model loaded {} in {}s'.format(
                scorer.vecs.shape[0],
                time.time() - model_start)
            client.publish(topic=iot_topic, payload=msg)
        except Exception as e:
            print('Failed to load scorer', e)
        # Set the threshold for detection
        detection_threshold = float(os.getenv('DETECT_THRESHOLD', '0.7'))
        # This is the similarity threshold
        sim_threshold = float(os.getenv('DETECT_THRESHOLD', '0.99'))
        # The height and width of the training set images
        input_height = 300
        input_width = 300
        # Do inference until the lambda is killed.
        while True:
            # get thing shadow state, to see if we should register
            cloud_output = {}
            # Get a frame from the video stream
            cloud_output["frame_start"] = time.time()
            ret, frame = awscam.getLastFrame()
            if not ret:
                raise Exception('Failed to get frame from the stream')
            # Future integrate the shadow callback
            if False:
                cloud_output["shadow_start"] = time.time()
                shadow = client.get_thing_shadow(thingName=thing_name)
                jsonState = json.loads(shadow["payload"])
                register = jsonState['state']['desired'].get('register')
                cloud_output["shadow_register"] = register
                cloud_output["shadow_latency"] = time.time(
                ) - cloud_output["shadow_start"]
            # Resize frame to the same size as the training set.
            cloud_output["detect_start"] = time.time()
            frame_resize = cv2.resize(frame, (input_height, input_width))
            # Run the images through the inference engine and parse the results using
            # the parser API, note it is possible to get the output of doInference
            # and do the parsing manually, but since it is a ssd model,
            # a simple API is provided.
            parsed_inference_results = model.parseResult(
                model_type, model.doInference(frame_resize))
            cloud_output["detect_latency"] = time.time(
            ) - cloud_output["detect_start"]
            # Compute the scale in order to draw bounding boxes on the full resolution
            # image.
            yscale = float(frame.shape[0] / input_height)
            xscale = float(frame.shape[1] / input_width)
            # Dictionary to be filled with labels and probabilities for MQTT
            # Get the detected faces and probabilities
            for i, obj in enumerate(parsed_inference_results[model_type]):
                if obj['prob'] > detection_threshold:
                    # Add bounding boxes to full resolution frame
                    xmin = int(xscale * obj['xmin']) \
                           + int((obj['xmin'] - input_width/2) + input_width/2)
                    ymin = int(yscale * obj['ymin'])
                    xmax = int(xscale * obj['xmax']) \
                           + int((obj['xmax'] - input_width/2) + input_width/2)
                    ymax = int(yscale * obj['ymax'])
                    # Set the default title and color
                    title = '{:.2f}%'.format(obj['prob'] * 100)
                    color = (255, 0, 0)  # blue
                    upload = False
                    if scorer:
                        try:
                            # Attempt to find similar face
                            cloud_output['classify_start'] = time.time()
                            bbox = [xmin, ymin, xmax, ymax]
                            vec = scorer.vectorize(frame, bbox)
                            sim, z_score, prob, name = scorer.similar(vec)
                            if prob >= sim_threshold:
                                title = name
                                if round(prob, 3) < 1.0:
                                    title += ' ({:.2f}%)'.format(prob)
                                color = (0, 255, 0)  # green
                                upload = True
                            cloud_output['classify'] = {
                                'name': name,
                                'sim': float(sim),
                                'zscore': float(z_score),
                                'prob': float(prob)
                            }
                            cloud_output['classify_latency'] = time.time(
                            ) - cloud_output['classify_start']
                        except Exception as e:
                            msg = "Face similarity error: " + str(e)
                            client.publish(topic=iot_topic, payload=msg)
                    if upload:
                        try:
                            metadata = {
                                'ThingName':
                                thing_name,
                                'FullName':
                                title,
                                'Confidence':
                                str(obj['prob']),
                                'Similarity':
                                str(cloud_output['classify']['sim']),
                                'Probability':
                                str(cloud_output['classify']['prob']),
                                'FaceHeight':
                                str(xmax - xmin),
                                'FaceWidth':
                                str(ymax - ymin),
                            }
                            crop_img = uploader.crop(frame, xmin, ymin, xmax,
                                                     ymax)
                            item = uploader.upload(crop_img,
                                                   i,
                                                   metadata=metadata)
                            if item:
                                cloud_output['upload_key'] = item['key']
                            else:
                                cloud_output['upload_skip'] = True
                        except Exception as e:
                            msg = "Upload error: " + str(e)
                            client.publish(topic=iot_topic, payload=msg)
                    # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html
                    # for more information about the cv2.rectangle method.
                    # Method signature: image, point1, point2, color, and tickness.
                    cloud_output["draw_start"] = time.time()
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 10)
                    # Amount to offset the label/probability text above the bounding box.
                    text_offset = 12
                    cv2.putText(frame, title, (xmin, ymin - text_offset),
                                cv2.FONT_HERSHEY_SIMPLEX, 2.5, color, 6)
                    # Store label and probability to send to cloud
                    cloud_output[output_map[obj['label']]] = obj['prob']
                    cloud_output["draw_latency"] = time.time(
                    ) - cloud_output["draw_start"]
            # Set the next frame in the local display stream.
            local_display.set_frame_data(frame)
            cloud_output["frame_end"] = time.time()
            cloud_output["frame_latency"] = cloud_output[
                "frame_end"] - cloud_output["frame_start"]
            client.publish(topic=iot_topic, payload=json.dumps(cloud_output))
    except Exception as ex:
        print('Error in face detection lambda: {}'.format(ex))
def greengrass_infinite_infer_run():
    try:
        modelPath = "/opt/awscam/artifacts/mxnet_deploy_ssd_FP16_FUSED.xml"
        modelType = "ssd"
        input_width = 300
        input_height = 300
        prob_thresh = 0.1
        results_thread = FIFO_Thread()
        results_thread.start()

        # Send a starting message to IoT console
        client.publish(topic=iotTopic, payload="Face detection starts now")

        # Load model to GPU (use {"GPU": 0} for CPU)
        mcfg = {"GPU": 1}
        model = awscam.Model(modelPath, mcfg)
        client.publish(topic=iotTopic, payload="Model loaded")
        ret, frame = awscam.getLastFrame()
        if ret == False:
            raise Exception("Failed to get frame from the stream")

        yscale = float(frame.shape[0] / input_height)
        xscale = float(frame.shape[1] / input_width)
        font = cv2.FONT_HERSHEY_SIMPLEX
        rgb_color = (255, 165, 20)
        #Timers for cooldown and countdown
        cooldown = datetime.datetime.now()
        countdown = datetime.datetime.now()
        doInfer = True
        onCountdown = False

        while doInfer:
            # Get a frame from the video stream
            ret, frame = awscam.getLastFrame()
            # Raise an exception if failing to get a frame
            if ret == False:
                raise Exception("Failed to get frame from the stream")

            # Resize frame to fit model input requirement
            frameResize = cv2.resize(frame, (input_width, input_height))
            # Run model inference on the resized frame
            inferOutput = model.doInference(frameResize)
            # Output inference result to the fifo file so it can be viewed with mplayer
            parsed_results = model.parseResult(modelType, inferOutput)['ssd']

            label = '{'
            msg = 'false'

            time_now = datetime.datetime.now()

            for obj in parsed_results:
                if (obj['prob'] < prob_thresh):
                    break
                xmin = int(xscale * obj['xmin']) + int(
                    (obj['xmin'] - input_width / 2) + input_width / 2)
                ymin = int(yscale * obj['ymin'])
                xmax = int(xscale * obj['xmax']) + int(
                    (obj['xmax'] - input_width / 2) + input_width / 2)
                ymax = int(yscale * obj['ymax'])
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), rgb_color, 4)
                label += '"{}": {:.2f},'.format("prob", obj['prob'])
                label_show = '{}: {:.2f}'.format(str(obj['label']),
                                                 obj['prob'])
                cv2.putText(frame, label_show, (xmin, ymin - 15), font, 0.5,
                            rgb_color, 4)
                msg = "true"

                if (time_now >= cooldown) and obj['prob'] >= 0.60:
                    # Uploading to Amazon S3 if cooldown and countdown allow it
                    if onCountdown and time_now >= countdown:
                        message = "uploading to s3..."
                        client.publish(topic=iotTopic, payload=message)

                        key = 'images/frame-' + time.strftime(
                            "%Y%m%d-%H%M%S") + '.jpg'
                        session = Session()
                        s3 = session.create_client('s3')

                        _, jpg_data = cv2.imencode('.jpg', frame)
                        result = s3.put_object(Body=jpg_data.tostring(),
                                               Bucket=bucket_name,
                                               Key=key)

                        message = "uploaded to s3: " + key
                        client.publish(topic=iotTopic, payload=message)
                        cooldown = time_now + datetime.timedelta(seconds=10)
                        onCountdown = False
                    # Starting countdown
                    elif not onCountdown:
                        onCountdown = True
                        countdown = time_now + datetime.timedelta(seconds=4)

            if not onCountdown:
                cv2.putText(
                    frame, "Wait for picture: " +
                    str(max(0, int(
                        (cooldown - time_now).total_seconds()))) + " seconds",
                    (950, 100), font, 2, rgb_color, 4)
                if int((cooldown - time_now).total_seconds()) >= 5:
                    cv2.putText(frame, "Image Uploaded! ", (1150, 200), font,
                                2, rgb_color, 4)
                    cv2.putText(frame, "Please check the leaderboard",
                                (900, 300), font, 2, rgb_color, 4)
            else:
                if int((countdown - time_now).total_seconds()) >= -5:
                    cv2.putText(frame, "Say Cheese!", (1000, 1000), font, 3,
                                rgb_color, 4)
                    cv2.putText(
                        frame,
                        str(max(0, int(
                            (countdown - time_now).total_seconds()))) + "...",
                        (1200, 1100), font, 3, rgb_color, 4)
                else:
                    onCountdown = False

            label += '"face": "' + msg + '"'
            label += '}'
            client.publish(topic=iotTopic, payload=label)
            global jpeg
            ret, jpeg = cv2.imencode('.jpg', frame)

    except Exception as e:
        msg = "Test failed: " + str(e)
        client.publish(topic=iotTopic, payload=msg)

    # Asynchronously schedule this function to be run again in 15 seconds
    Timer(15, greengrass_infinite_infer_run).start()
Example #32
0
import boto3
import base64
import os
from botocore.session import Session
from botocore.config import Config

# get env vars
function_name = os.environ['LAMBDA_FUNCTION_NAME']
function_version = os.environ['LAMBDA_VERSION']
steps = os.environ['LAMBDA_STEPS']
interval = os.environ['LAMBDA_INTERVAL']

s = Session()
client = s.create_client('lambda',
                         config=Config(connect_timeout=5,
                                       read_timeout=600,
                                       retries={'max_attempts': 1}))

response = client.invoke(FunctionName='LambdaDeploy',
                         InvocationType='RequestResponse',
                         LogType='Tail',
                         Payload='{"function-name": "' + function_name +
                         '","alias-name": "prod","new-version":"' +
                         function_version + '","steps": ' + steps +
                         ',"interval" : ' + interval + ',"type": "linear"}')

if "FunctionError" in response:
    raise Exception(base64.b64decode(response['LogResult']).decode("utf-8"))
Example #33
0
class TestConfigureSSOCommand(unittest.TestCase):
    def setUp(self):
        self.global_args = mock.Mock()
        self._session = Session()
        self.sso_client = self._session.create_client(
            'sso',
            region_name='us-west-2',
        )
        self.sso_stub = Stubber(self.sso_client)
        self.profile = 'a-profile'
        self.scoped_config = {}
        self.full_config = {
            'profiles': {
                self.profile: self.scoped_config
            }
        }
        self.mock_session = mock.Mock(spec=Session)
        self.mock_session.get_scoped_config.return_value = self.scoped_config
        self.mock_session.full_config = self.full_config
        self.mock_session.create_client.return_value = self.sso_client
        self.mock_session.profile = self.profile
        self.config_path = '/some/path'
        self.session_config = {
            'config_file': self.config_path,
        }
        self.mock_session.get_config_variable = self.session_config.get
        self.mock_session.get_available_regions.return_value = ['us-east-1']
        self.token_cache = {}
        self.writer = mock.Mock(spec=ConfigFileWriter)
        self.prompter = mock.Mock(spec=PTKPrompt)
        self.selector = mock.Mock(spec=select_menu)
        self.configure_sso = ConfigureSSOCommand(
            self.mock_session,
            prompter=self.prompter,
            selector=self.selector,
            config_writer=self.writer,
            sso_token_cache=self.token_cache,
        )
        self.region = 'us-west-2'
        self.output = 'json'
        self.sso_region = 'us-east-1'
        self.start_url = 'https://d-92671207e4.awsapps.com/start'
        self.account_id = '0123456789'
        self.role_name = 'roleA'
        self.cached_token_key = '13f9d35043871d073ab260e020f0ffde092cb14b'
        self.expires_at = datetime.now(tzlocal()) + timedelta(hours=24)
        self.access_token = {
            'accessToken': 'access.token.string',
            'expiresAt': self.expires_at,
        }
        self.token_cache[self.cached_token_key] = self.access_token

    def _add_list_accounts_response(self, accounts):
        params = {
            'accessToken': self.access_token['accessToken'],
        }
        response = {
            'accountList': accounts,
        }
        self.sso_stub.add_response('list_accounts', response, params)

    def _add_list_account_roles_response(self, roles):
        params = {
            'accountId': self.account_id,
            'accessToken': self.access_token['accessToken'],
        }
        response = {
            'roleList': roles,
        }
        self.sso_stub.add_response('list_account_roles', response, params)

    def _add_prompt_responses(self):
        self.prompter.get_value.side_effect = [
            self.start_url,
            self.sso_region,
            self.region,
            self.output,
        ]

    def _add_simple_single_item_responses(self):
        selected_account = {
            'accountId': self.account_id,
            'emailAddress': '*****@*****.**',
        }
        self._add_list_accounts_response([selected_account])
        self._add_list_account_roles_response([{'roleName': self.role_name}])

    def assert_config_updates(self, config=None):
        if config is None:
            config = {
                '__section__': 'profile %s' % self.profile,
                'sso_start_url': self.start_url,
                'sso_region': self.sso_region,
                'sso_account_id': self.account_id,
                'sso_role_name': self.role_name,
                'region': self.region,
                'output': self.output,
            }
        self.writer.update_config.assert_called_with(config, self.config_path)

    def test_basic_configure_sso_flow(self):
        self._add_prompt_responses()
        selected_account = {
            'accountId': self.account_id,
            'emailAddress': '*****@*****.**',
        }
        self.selector.side_effect = [
            selected_account,
            self.role_name,
        ]
        accounts = [
            selected_account,
            {'accountId': '1234567890', 'emailAddress': '*****@*****.**'},
        ]
        self._add_list_accounts_response(accounts)
        roles = [
            {'roleName': self.role_name},
            {'roleName': 'roleB'},
        ]
        self._add_list_account_roles_response(roles)
        with self.sso_stub:
            self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()
        self.assert_config_updates()

    def test_single_account_single_role_flow(self):
        self._add_prompt_responses()
        self._add_simple_single_item_responses()
        with self.sso_stub:
            self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()
        self.assert_config_updates()
        # Account / Role should be auto selected if only one is returned
        self.assertEqual(self.selector.call_count, 0)

    def test_no_accounts_flow_raises_error(self):
        self.prompter.get_value.side_effect = [self.start_url, self.sso_region]
        self._add_list_accounts_response([])
        with self.assertRaises(RuntimeError):
            with self.sso_stub:
                self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()

    def test_no_roles_flow_raises_error(self):
        self._add_prompt_responses()
        selected_account = {
            'accountId': self.account_id,
            'emailAddress': '*****@*****.**',
        }
        self._add_list_accounts_response([selected_account])
        self._add_list_account_roles_response([])
        with self.assertRaises(RuntimeError):
            with self.sso_stub:
                self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()

    def assert_default_prompt_args(self, defaults):
        calls = self.prompter.get_value.call_args_list
        self.assertEqual(len(calls), len(defaults))
        for call, default in zip(calls, defaults):
            # The default to the prompt call is the first positional param
            self.assertEqual(call[0][0], default)

    def assert_prompt_completions(self, completions):
        calls = self.prompter.get_value.call_args_list
        self.assertEqual(len(calls), len(completions))
        for call, completions in zip(calls, completions):
            _, kwargs = call
            self.assertEqual(kwargs['completions'], completions)

    def test_defaults_to_scoped_config(self):
        self.scoped_config['sso_start_url'] = 'default-url'
        self.scoped_config['sso_region'] = 'default-sso-region'
        self.scoped_config['region'] = 'default-region'
        self.scoped_config['output'] = 'default-output'
        self._add_prompt_responses()
        self._add_simple_single_item_responses()
        with self.sso_stub:
            self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()
        self.assert_config_updates()
        expected_defaults = [
            'default-url',
            'default-sso-region',
            'default-region',
            'default-output',
        ]
        self.assert_default_prompt_args(expected_defaults)

    def test_handles_no_profile(self):
        expected_profile = 'profile-a'
        self.profile = None
        self.mock_session.profile = None
        self.configure_sso = ConfigureSSOCommand(
            self.mock_session,
            prompter=self.prompter,
            selector=self.selector,
            config_writer=self.writer,
            sso_token_cache=self.token_cache,
        )
        # If there is no profile, it will be prompted for as the last value
        self.prompter.get_value.side_effect = [
            self.start_url,
            self.sso_region,
            self.region,
            self.output,
            expected_profile,
        ]
        self._add_simple_single_item_responses()
        with self.sso_stub:
            self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()
        self.profile = expected_profile
        self.assert_config_updates()

    def test_handles_non_existant_profile(self):
        not_found_exception = ProfileNotFound(profile=self.profile)
        self.mock_session.get_scoped_config.side_effect = not_found_exception
        self.configure_sso = ConfigureSSOCommand(
            self.mock_session,
            prompter=self.prompter,
            selector=self.selector,
            config_writer=self.writer,
            sso_token_cache=self.token_cache,
        )
        self._add_prompt_responses()
        self._add_simple_single_item_responses()
        with self.sso_stub:
            self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()
        self.assert_config_updates()

    def test_cli_config_is_none_not_written(self):
        self.prompter.get_value.side_effect = [
            self.start_url,
            self.sso_region,
            # The CLI region and output format shouldn't be written
            # to the config as they are None
            None,
            None
        ]
        self._add_simple_single_item_responses()
        with self.sso_stub:
            self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()
        expected_config = {
            '__section__': 'profile %s' % self.profile,
            'sso_start_url': self.start_url,
            'sso_region': self.sso_region,
            'sso_account_id': self.account_id,
            'sso_role_name': self.role_name,
        }
        self.assert_config_updates(config=expected_config)

    def test_prompts_suggest_values(self):
        self.full_config['profiles']['another_profile'] = {
            'sso_start_url': self.start_url,
        }
        self._add_prompt_responses()
        self._add_simple_single_item_responses()
        with self.sso_stub:
            self.configure_sso(args=[], parsed_globals=self.global_args)
        self.sso_stub.assert_no_pending_responses()
        expected_start_urls = [self.start_url]
        expected_sso_regions = ['us-east-1']
        expected_cli_regions = None
        expected_cli_outputs = list(CLI_OUTPUT_FORMATS.keys())
        expected_completions = [
            expected_start_urls,
            expected_sso_regions,
            expected_cli_regions,
            expected_cli_outputs,
        ]
        self.assert_prompt_completions(expected_completions)
Example #34
0
""" A sample lambda for object detection"""
import awscam
import cv2
import datetime
import greengrasssdk
import json
import numpy as np
import os

from botocore.session import Session
from threading import Thread, Event

# Setup the S3 client
session = Session()
s3 = session.create_client("s3")
storage_name = os.environ.get("STORAGE_NAME", "deeplens-doorman-demo")


class LocalDisplay(Thread):
    """ Class for facilitating the local display of inference results
        (as images). The class is designed to run on its own thread. In
        particular the class dumps the inference results into a FIFO
        located in the tmp directory (which lambda has access to). The
        results can be rendered using mplayer by typing:
        mplayer -demuxer lavf -lavfdopts format=mjpeg:probesize=32 /tmp/results.mjpeg
    """

    def __init__(self, resolution):
        """ resolution - Desired resolution of the project stream """
        # Initialize the base class, so that the object can run on its own
        # thread.
Example #35
0
def populate_profiles(sso_start_url, sso_region, regions, dry_run,
                      config_default, existing_config_action,
                      profile_name_components, profile_name_separator,
                      profile_name_include_region, profile_name_region_style,
                      profile_name_trim_account_name_patterns,
                      profile_name_trim_role_name_patterns,
                      profile_name_process, credential_process, force_refresh,
                      verbose):
    """Configure profiles for all accounts and roles.

    Writes a profile to your AWS config file (~/.aws/config) for every account and role you have access to,
    for the regions you specify.
    """

    configure_logging(LOGGER, verbose)

    missing = []

    try:
        instance = get_instance(
            sso_start_url,
            sso_region,
            sso_start_url_vars=DEFAULT_START_URL_VARS,
            sso_region_vars=DEFAULT_SSO_REGION_VARS,
        )
    except GetInstanceError as e:
        LOGGER.fatal(str(e))
        sys.exit(1)

    if not regions:
        for var_name in DEFAULT_REGION_VARS:
            value = os.environ.get(var_name)
            if value:
                LOGGER.debug(f"Got default region {value} from {var_name}")
                regions = [value]
                break
    if not regions:
        missing.append("--region")

    if missing:
        raise click.UsageError("Missing arguments: {}".format(
            ", ".join(missing)))

    if config_default:
        config_default = dict(v.split("=", 1) for v in config_default)
    else:
        config_default = {}

    if not profile_name_separator:
        profile_name_separator = os.environ.get(
            "AWS_CONFIGURE_SSO_DEFAULT_PROFILE_NAME_SEPARATOR"
        ) or DEFAULT_SEPARATOR

    if profile_name_process:
        profile_name_formatter = get_process_formatter(profile_name_process)
    else:
        region_format, no_region_format = generate_profile_name_format(
            profile_name_components, profile_name_separator,
            profile_name_region_style)
        LOGGER.debug(
            "Profile name format (region):    {}".format(region_format))
        LOGGER.debug(
            "Profile name format (no region): {}".format(no_region_format))
        profile_name_formatter = get_formatter(profile_name_include_region,
                                               region_format, no_region_format)
        if profile_name_trim_account_name_patterns or profile_name_trim_role_name_patterns:
            profile_name_formatter = get_trim_formatter(
                profile_name_trim_account_name_patterns,
                profile_name_trim_role_name_patterns, profile_name_formatter)

    try:
        profile_name_formatter(0,
                               account_name="foo",
                               account_id="bar",
                               role_name="baz",
                               region="us-east-1")
    except Exception as e:
        raise click.UsageError("Invalid profile name format: {}".format(e))

    session = Session()

    token_fetcher = get_token_fetcher(
        session,
        instance.region,
        interactive=True,
    )

    LOGGER.info(f"Logging in to {instance.start_url}")
    token = token_fetcher.fetch_token(instance.start_url,
                                      force_refresh=force_refresh)

    LOGGER.debug("Token: {}".format(token))

    config = botocore.config.Config(
        region_name=instance.region,
        signature_version=botocore.UNSIGNED,
    )
    client = session.create_client("sso", config=config)

    LOGGER.info("Gathering accounts and roles")
    accounts = []
    list_accounts_args = {"accessToken": token["accessToken"]}
    while True:
        response = client.list_accounts(**list_accounts_args)

        accounts.extend(response["accountList"])

        next_token = response.get("nextToken")
        if not next_token:
            break
        else:
            list_accounts_args["nextToken"] = response["nextToken"]

    LOGGER.debug("Account list: {} {}".format(len(accounts), accounts))

    configs = []
    for account in accounts:
        LOGGER.debug("Getting roles for {}".format(account["accountId"]))
        list_role_args = {
            "accessToken": token["accessToken"],
            "accountId": account["accountId"],
        }

        while True:
            response = client.list_account_roles(**list_role_args)

            for role in response["roleList"]:
                for i, region in enumerate(regions):
                    profile_name = profile_name_formatter(
                        i,
                        account_name=account["accountName"],
                        account_id=account["accountId"],
                        role_name=role["roleName"],
                        region=region,
                    )
                    configs.append(
                        ConfigParams(profile_name, account["accountName"],
                                     account["accountId"], role["roleName"],
                                     region))

            next_token = response.get("nextToken")
            if not next_token:
                break
            else:
                list_role_args["nextToken"] = response["nextToken"]

    configs.sort(key=lambda v: v.profile_name)

    LOGGER.debug("Got configs: {}".format(configs))

    if not dry_run:
        LOGGER.info("Writing {} profiles to {}".format(
            len(configs), get_config_filename(session)))

        config_writer = ConfigFileWriter()

        def write_config(profile_name, config_values):
            # discard because we're already loading the existing values
            write_values(session,
                         profile_name,
                         config_values,
                         config_file_writer=config_writer,
                         existing_config_action="discard")
    else:
        LOGGER.info("Dry run for {} profiles".format(len(configs)))

        def write_config(profile_name, config_values):
            lines = ["[profile {}]".format(process_profile_name(profile_name))]
            for key, value in config_values.items():
                lines.append("{} = {}".format(key, value))
            lines.append("")
            print("\n".join(lines))

    for config in configs:
        LOGGER.debug("Processing config: {}".format(config))

        config_values = {}
        existing_profile = False
        existing_config = {}
        if existing_config_action != "discard":
            try:
                existing_config = Session(
                    profile=config.profile_name).get_scoped_config()
                config_values.update(existing_config)
                existing_profile = True
            except ProfileNotFound:
                pass

        config_values.update({
            "sso_start_url": instance.start_url,
            "sso_region": instance.region,
            "sso_account_name": config.account_name,
            "sso_account_id": config.account_id,
            "sso_role_name": config.role_name,
            "region": config.region,
        })

        for k, v in config_default.items():
            if k in existing_config and existing_config_action in ["keep"]:
                continue
            config_values[k] = v

        if credential_process is not None:
            set_credential_process = credential_process
        elif os.environ.get(DISABLE_CREDENTIAL_PROCESS_VAR,
                            "").lower() in ["1", "true"]:
            set_credential_process = False
        else:
            set_credential_process = SET_CREDENTIAL_PROCESS_DEFAULT

        if set_credential_process:
            credential_process_name = os.environ.get(
                CREDENTIAL_PROCESS_NAME_VAR
            ) or "aws-sso-util credential-process"
            config_values[
                "credential_process"] = f"{credential_process_name} --profile {config.profile_name}"
        elif set_credential_process is False:
            config_values.pop("credential_process", None)

        config_values["sso_auto_populated"] = "true"

        LOGGER.debug("Config values for profile {}: {}".format(
            config.profile_name, config_values))

        write_config(config.profile_name, config_values)