Beispiel #1
0
def create_clusters(provider, context, **kwargs):
    """Creates ECS clusters.

    Expects a "clusters" argument, which should contain a list of cluster
    names to create.

    Args:
        provider (:class:`stacker.providers.base.BaseProvider`): provider
            instance
        context (:class:`stacker.context.Context`): context instance

    Returns: boolean for whether or not the hook succeeded.

    """
    conn = get_session(provider.region).client('ecs')

    try:
        clusters = kwargs["clusters"]
    except KeyError:
        logger.error("setup_clusters hook missing \"clusters\" argument")
        return False

    if isinstance(clusters, basestring):
        clusters = [clusters]

    cluster_info = {}
    for cluster in clusters:
        logger.debug("Creating ECS cluster: %s", cluster)
        r = conn.create_cluster(clusterName=cluster)
        cluster_info[r["cluster"]["clusterName"]] = r
    return {"clusters": cluster_info}
Beispiel #2
0
def handler(value, provider, **kwargs):  # pylint: disable=W0613
    """ Lookup a EC2 Instance's attribute by it's 'Name' tag value.

    Need to specify the name tag value and attribute name (same as with
    the `aws ec2 describe-instances` command.

    Region is obtained from the environment file

    For example:

    configuration file:
        InstanceId: ${EC2AttrByNameTag /aws/service/ecs/optimized-ami/amazon-linux/recommended}
        ImageId: ${EC2AttrByNameTag /aws/service/ecs/optimized-ami/amazon-linux/recommended}

    environment file:
        region: us-east-1
    """

    session = get_session(provider.region)
    ec2_client = session.client('ec2')
    describe_instances_output = ec2_client.get_parameters(Names=[value])
    parameter_value_dict = literal_eval(describe_instances_output['Parameters'][0]['Value'])
    image_id = parameter_value_dict['image_id']

    LOGGER.debug('found EC2 instance attribute %s (%s) with name tag (%s)' % (image_id, provider.region))

    return image_id
Beispiel #3
0
    def handle(cls, value, **kwargs):
        """Decrypt the specified value with a master key in KMS.

        kmssimple field types should be in the following format:

            [<region>@]<base64 encrypted value>

        Note: The region is optional, and defaults to the environment's
        `AWS_DEFAULT_REGION` if not specified.

        For example:

            # We use the aws cli to get the encrypted value for the string
            # "PASSWORD" using the master key called "myStackerKey" in
            # us-east-1
            $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \
                    --plaintext "PASSWORD" --output text --query CiphertextBlob

            CiD6bC8t2Y<...encrypted blob...>

            # In stacker we would reference the encrypted value like:
            conf_key: ${kms us-east-1@CiD6bC8t2Y<...encrypted blob...>}

            You can optionally store the encrypted value in a file, ie:

            kms_value.txt
            us-east-1@CiD6bC8t2Y<...encrypted blob...>

            and reference it within stacker (NOTE: the path should be relative
            to the stacker config file):

            conf_key: ${kms file://kms_value.txt}

            # Both of the above would resolve to
            conf_key: PASSWORD

        """
        value = read_value_from_path(value)

        region = None
        if "@" in value:
            region, value = value.split("@", 1)

        kms = get_session(region).client('kms')

        # encode str value as an utf-8 bytestring for use with codecs.decode.
        value = value.encode('utf-8')

        # get raw but still encrypted value from base64 version.
        decoded = codecs.decode(value, 'base64')

        # check python version in your system
        python3_or_later = sys.version_info[0] >= 3

        # decrypt and return the plain text raw value.
        if python3_or_later:
            return kms.decrypt(CiphertextBlob=decoded)["Plaintext"]\
                .decode('utf-8')
        else:
            return kms.decrypt(CiphertextBlob=decoded)["Plaintext"]
Beispiel #4
0
    def test_stack_template_url(self):
        context = mock_context("mynamespace")
        blueprint = TestBlueprint(name="myblueprint", context=context)

        region = "us-east-1"
        endpoint = "https://example.com"
        session = get_session(region)
        provider = Provider(session)
        action = BaseAction(
            context=context,
            provider_builder=MockProviderBuilder(provider, region=region)
        )

        with mock.patch('stacker.actions.base.get_s3_endpoint', autospec=True,
                        return_value=endpoint):
            self.assertEqual(
                action.stack_template_url(blueprint),
                "%s/%s/stack_templates/%s/%s-%s.json" % (
                    endpoint,
                    "stacker-mynamespace",
                    "mynamespace-myblueprint",
                    "myblueprint",
                    MOCK_VERSION
                )
            )
Beispiel #5
0
def handler(value, provider, **kwargs):  # pylint: disable=W0613
    """ Find the AWS recommended AMI for ECS instances
        stored in AWS managed SSM

    Need to specify the SSM key value to the lookup.
    (/aws/service/ecs/optimized-ami/amazon-linux/recommended)

    Region is obtained from the environment file

    For example:

    configuration file:
        ImageId: ${ecsinstanceami /aws/service/ecs/optimized-ami/amazon-linux/recommended}

    environment file:
        region: us-east-1
    """

    session = get_session(provider.region)
    ssm_client = session.client('ssm')
    get_parameters_output = ssm_client.get_parameters(Names=[value])
    parameter_value_dict = literal_eval(
        get_parameters_output['Parameters'][0]['Value'])
    image_id = parameter_value_dict['image_id']

    LOGGER.debug('found ECS image ID: %s for region: %s', image_id,
                 provider.region)

    return image_id
Beispiel #6
0
 def test_ensure_cfn_bucket_doesnt_exist_us_west(self):
     session = get_session("us-west-1")
     provider = Provider(session)
     action = BaseAction(
         context=mock_context("mynamespace"),
         provider_builder=MockProviderBuilder(provider, region="us-west-1")
     )
     stubber = Stubber(action.s3_conn)
     stubber.add_client_error(
         "head_bucket",
         service_error_code="NoSuchBucket",
         service_message="Not Found",
         http_status_code=404,
     )
     stubber.add_response(
         "create_bucket",
         service_response={},
         expected_params={
             "Bucket": ANY,
             "CreateBucketConfiguration": {
                 "LocationConstraint": "us-west-1",
             }
         }
     )
     with stubber:
         action.ensure_cfn_bucket()
Beispiel #7
0
    def s3_conn(self):
        """The boto s3 connection object used for communication with S3."""
        if not hasattr(self, "_s3_conn"):
            session = get_session(self.provider.region)
            self._s3_conn = session.client('s3')

        return self._s3_conn
Beispiel #8
0
def ensure_server_cert_exists(provider, context, **kwargs):
    client = get_session(provider.region).client('iam')
    cert_name = kwargs["cert_name"]
    status = "unknown"
    try:
        response = client.get_server_certificate(
            ServerCertificateName=cert_name)
        cert_arn = _get_cert_arn_from_response(response)
        status = "exists"
        logger.info("certificate exists: %s (%s)", cert_name, cert_arn)
    except ClientError:
        if kwargs.get("prompt", True):
            upload = raw_input(
                "Certificate '%s' wasn't found. Upload it now? (yes/no) " %
                (cert_name, ))
            if upload != "yes":
                return False

        parameters = get_cert_contents(kwargs)
        if not parameters:
            return False
        response = client.upload_server_certificate(**parameters)
        cert_arn = _get_cert_arn_from_response(response)
        status = "uploaded"
        logger.info(
            "uploaded certificate: %s (%s)",
            cert_name,
            cert_arn,
        )

    return {
        "status": status,
        "cert_name": cert_name,
        "cert_arn": cert_arn,
    }
Beispiel #9
0
def handler(value, provider, **kwargs):  # pylint: disable=W0613
    """ Lookup a EC2 Instance's attribute by it's 'Name' tag value.

    Need to specify the name tag value and attribute name (same as with
    the `aws ec2 describe-instances` command.

    Region is obtained from the environment file

    [in the environment file]:
      region: us-east-1

    For example:

    [in the stacker yaml (configuration) file]:

      lookups:
        EC2AttrByNameTag: lookups.instance-attribute-by-name-tag-lookup.handler

      variables:
        InstanceId: ${EC2AttrByNameTag ${instance_name_tag}::InstanceID}
        ImageId: ${EC2AttrByNameTag ${instance_name_tag}::ImageId}
    """

    name_tag_val = value.split('::')[0]
    inst_attr = value.split('::')[1]

    session = get_session(provider.region)
    ec2_client = session.client('ec2')
    describe_instances_output = ec2_client.describe_instances(
        Filters=[{
            'Name': 'instance-state-name',
            'Values': ['running']
        }, {
            'Name': 'tag:Name',
            'Values': [name_tag_val]
        }])
    reservations = describe_instances_output['Reservations']
    if reservations:
        number_found = len(reservations)
        LOGGER.debug('found %s instances', number_found)
        if number_found == 1:
            instance = reservations[0]['Instances'][0]
            if inst_attr in [
                    'ImageId', 'InstanceId', 'InstanceType', 'KeyName',
                    'LaunchTime', 'Platform', 'PrivateIpAddress',
                    'PublicIpAddress', 'VpcId'
            ]:
                inst_attr_val = instance[inst_attr]
            else:
                return ('error: unsupported attribute lookup'
                        ' type ({})'.format(inst_attr))
        else:
            return 'error: too many matching instances'
    else:
        LOGGER.debug('did not find any matching instances')
        return 'error: no matching instances'

    LOGGER.debug('found EC2 instance attribute %s (%s)'
                 ' with name tag (%s)', inst_attr, inst_attr_val, name_tag_val)
    return inst_attr_val
Beispiel #10
0
 def setUp(self):
     region = "us-east-1"
     self.session = get_session(region=region)
     self.provider = Provider(self.session,
                              interactive=True,
                              recreate_failed=True)
     self.stubber = Stubber(self.provider.cloudformation)
Beispiel #11
0
    def s3_conn(self):
        """The boto s3 connection object used for communication with S3."""
        if not hasattr(self, "_s3_conn"):
            # Always use the global client for s3
            session = get_session(self.bucket_region)
            self._s3_conn = session.client('s3')

        return self._s3_conn
Beispiel #12
0
 def __init__(self, context, provider_builder=None, cancel=None):
     self.context = context
     self.provider_builder = provider_builder
     self.bucket_name = context.bucket_name
     self.cancel = cancel or threading.Event()
     self.bucket_region = context.config.stacker_bucket_region
     if not self.bucket_region and provider_builder:
         self.bucket_region = provider_builder.region
     self.s3_conn = get_session(self.bucket_region).client('s3')
Beispiel #13
0
 def __init__(self, context, provider_builder=None, cancel=None):
     self.context = context
     self.provider_builder = provider_builder
     self.bucket_name = context.bucket_name
     self.cancel = cancel or threading.Event()
     self.bucket_region = context.config.stacker_bucket_region
     if not self.bucket_region and provider_builder:
         self.bucket_region = provider_builder.region
     self.s3_conn = get_session(self.bucket_region).client('s3')
Beispiel #14
0
    def cloudformation(self):
        # deals w/ multiprocessing issues w/ sharing ssl conns
        # see https://github.com/remind101/stacker/issues/196
        pid = os.getpid()
        if pid != self._pid or not self._cloudformation:
            session = get_session(self.region)
            self._cloudformation = session.client('cloudformation')

        return self._cloudformation
Beispiel #15
0
def set_parameter(provider, context, **kwargs):  # pylint: disable=W0613
    """Ensure a SSM parameter is set.

    Args:
        provider (:class:`stacker.providers.base.BaseProvider`): provider
            instance
        context (:class:`stacker.context.Context`): context instance

    Returns: boolean for whether or not the hook succeeded.

    """
    parameter_name = kwargs.get('parameter')
    parameter_type = kwargs.get('type', 'String')
    parameter_key_id = kwargs.get('key_id', False)
    parameter_overwrite = kwargs.get('overwrite', False)

    session = get_session(provider.region)
    client = session.client('ssm')

    if parameter_overwrite is False:
        response = client.describe_parameters(
            ParameterFilters=[{
                'Key': 'Name',
                'Values': [parameter_name]
            }])

        if len(response['Parameters']) == 1:
            LOGGER.info('SSM parameter %s already present on AWS; skipping...',
                        parameter_name)
            return True

    if kwargs.get('value', False):
        parameter_value = kwargs['value']
    elif kwargs.get('value_output', False):
        parameter_value = output_handler(kwargs.get('value_output'),
                                         provider=provider,
                                         context=context)
    elif kwargs.get('random', False):
        chars = string.ascii_letters + string.digits
        parameter_value = ''.join(random.choice(chars) for _ in range(25))
    else:
        LOGGER.info('')  # line break to better visually separate next request
        LOGGER.info('Please enter value for SSM parameter %s : ',
                    parameter_name)
        parameter_value = raw_input()
    if parameter_key_id is not False:
        client.put_parameter(Name=parameter_name,
                             Value=parameter_value,
                             Type=parameter_type,
                             KeyId=parameter_key_id,
                             Overwrite=parameter_overwrite)
    else:
        client.put_parameter(Name=parameter_name,
                             Value=parameter_value,
                             Type=parameter_type,
                             Overwrite=parameter_overwrite)
    return True
Beispiel #16
0
def get_principal_arn(provider):
    """Return ARN of current session principle."""
    # looking up caller identity
    session = get_session(provider.region)
    sts_client = session.client('sts')
    caller_identity_arn = sts_client.get_caller_identity()['Arn']
    if caller_identity_arn.split(':')[2] == 'iam' and (
            caller_identity_arn.split(':')[5].startswith('user/')):
        return caller_identity_arn  # user arn
    return assumed_role_to_principle(caller_identity_arn)
Beispiel #17
0
def sync(context, provider, **kwargs):
    """Sync static website to S3 bucket."""
    if context.hook_data['staticsite']['deploy_is_current']:
        LOGGER.info('staticsite: skipping upload; latest version already '
                    'deployed')
        return True

    bucket_name = output_handler(kwargs.get('bucket_output_lookup'),
                                 provider=provider,
                                 context=context)
    distribution_id = output_handler(
        kwargs.get('distributionid_output_lookup'),
        provider=provider,
        context=context)
    distribution_domain = output_handler(
        kwargs.get('distributiondomain_output_lookup'),
        provider=provider,
        context=context)

    # Using the awscli for s3 syncing is incredibly suboptimal, but on balance
    # it's probably the most stable/efficient option for syncing the files
    # until https://github.com/boto/boto3/issues/358 is resolved
    aws_cli([
        's3', 'sync', context.hook_data['staticsite']['app_directory'],
        "s3://%s/" % bucket_name, '--delete'
    ])

    session = get_session(provider.region)
    cf_client = session.client('cloudfront')
    cf_client.create_invalidation(DistributionId=distribution_id,
                                  InvalidationBatch={
                                      'Paths': {
                                          'Quantity': 1,
                                          'Items': ['/*']
                                      },
                                      'CallerReference': str(time.time())
                                  })
    LOGGER.info(
        "staticsite: sync & CF invalidation of %s (domain %s) "
        "complete", distribution_id, distribution_domain)

    if not context.hook_data['staticsite'].get('hash_tracking_disabled'):
        LOGGER.info(
            "staticsite: updating environment SSM parameter %s with "
            "hash %s",
            context.hook_data['staticsite']['hash_tracking_parameter'],
            context.hook_data['staticsite']['hash'])
        ssm_client = session.client('ssm')
        ssm_client.put_parameter(
            Name=context.hook_data['staticsite']['hash_tracking_parameter'],
            Description='Hash of currently deployed static website source',
            Value=context.hook_data['staticsite']['hash'],
            Type='String',
            Overwrite=True)
    return True
Beispiel #18
0
    def cloudformation(self):
        # deals w/ multiprocessing issues w/ sharing ssl conns
        # see https://github.com/remind101/stacker/issues/196
        pid = os.getpid()
        if pid != self._pid or not self._cloudformation:
            config = Config(retries=dict(max_attempts=MAX_ATTEMPTS))
            session = get_session(self.region)
            self._cloudformation = session.client('cloudformation',
                                                  config=config)

        return self._cloudformation
Beispiel #19
0
    def handle(cls, value, **kwargs):
        """Decrypt the specified value with a master key in KMS.

        kmssimple field types should be in the following format:

            [<region>@]<base64 encrypted value>

        Note: The region is optional, and defaults to the environment's
        `AWS_DEFAULT_REGION` if not specified.

        For example:

            # We use the aws cli to get the encrypted value for the string
            # "PASSWORD" using the master key called "myStackerKey" in
            # us-east-1
            $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \
                    --plaintext "PASSWORD" --output text --query CiphertextBlob

            CiD6bC8t2Y<...encrypted blob...>

            # In stacker we would reference the encrypted value like:
            conf_key: ${kms us-east-1@CiD6bC8t2Y<...encrypted blob...>}

            You can optionally store the encrypted value in a file, ie:

            kms_value.txt
            us-east-1@CiD6bC8t2Y<...encrypted blob...>

            and reference it within stacker (NOTE: the path should be relative
            to the stacker config file):

            conf_key: ${kms file://kms_value.txt}

            # Both of the above would resolve to
            conf_key: PASSWORD

        """
        value = read_value_from_path(value)

        region = None
        if "@" in value:
            region, value = value.split("@", 1)

        kms = get_session(region).client('kms')

        # encode str value as an utf-8 bytestring for use with codecs.decode.
        value = value.encode('utf-8')

        # get raw but still encrypted value from base64 version.
        decoded = codecs.decode(value, 'base64')

        # decrypt and return the plain text raw value.
        return kms.decrypt(CiphertextBlob=decoded)["Plaintext"]
Beispiel #20
0
    def setUp(self):
        self.context = self._get_context()
        self.session = get_session(region=None)
        self.provider = Provider(self.session,
                                 interactive=False,
                                 recreate_failed=False)
        provider_builder = MockProviderBuilder(self.provider)
        self.build_action = build.Action(self.context,
                                         provider_builder=provider_builder,
                                         cancel=MockThreadingEvent())

        self.stack = mock.MagicMock()
        self.stack.region = None
        self.stack.name = 'vpc'
        self.stack.fqn = 'vpc'
        self.stack.blueprint.rendered = '{}'
        self.stack.locked = False
        self.stack_status = None

        plan = self.build_action._generate_plan()
        self.step = plan.steps[0]
        self.step.stack = self.stack

        def patch_object(*args, **kwargs):
            m = mock.patch.object(*args, **kwargs)
            self.addCleanup(m.stop)
            m.start()

        def get_stack(name, *args, **kwargs):
            if name != self.stack.name or not self.stack_status:
                raise StackDoesNotExist(name)

            return {
                'StackName': self.stack.name,
                'StackStatus': self.stack_status,
                'Outputs': [],
                'Tags': []
            }

        def get_events(name, *args, **kwargs):
            return [{
                'ResourceStatus': 'ROLLBACK_IN_PROGRESS',
                'ResourceStatusReason': 'CFN fail'
            }]

        patch_object(self.provider, 'get_stack', side_effect=get_stack)
        patch_object(self.provider, 'update_stack')
        patch_object(self.provider, 'create_stack')
        patch_object(self.provider, 'destroy_stack')
        patch_object(self.provider, 'get_events', side_effect=get_events)

        patch_object(self.build_action, "s3_stack_push")
Beispiel #21
0
 def test_ensure_cfn_bucket_exists(self):
     session = get_session("us-east-1")
     provider = Provider(session)
     action = BaseAction(context=mock_context("mynamespace"),
                         provider_builder=MockProviderBuilder(provider))
     stubber = Stubber(action.s3_conn)
     stubber.add_response("head_bucket",
                          service_response={},
                          expected_params={
                              "Bucket": ANY,
                          })
     with stubber:
         action.ensure_cfn_bucket()
Beispiel #22
0
def purge_bucket(context, provider, **kwargs):
    """Delete objects in bucket."""
    session = get_session(provider.region)

    if kwargs.get('bucket_name'):
        bucket_name = kwargs['bucket_name']
    else:
        if kwargs.get('bucket_output_lookup'):
            value = kwargs['bucket_output_lookup']
            handler = OutputLookup.handle
        elif kwargs.get('bucket_rxref_lookup'):
            value = kwargs['bucket_rxref_lookup']
            handler = RxrefLookup.handle
        elif kwargs.get('bucket_xref_lookup'):
            value = kwargs['bucket_xref_lookup']
            handler = XrefLookup.handle
        else:
            LOGGER.fatal('No bucket name/source provided.')
            return False

        try:  # Exit early if the bucket's stack is already deleted
            session.client('cloudformation').describe_stacks(
                StackName=context.get_fqn(value.split('::')[0])
            )
        except ClientError as exc:
            if 'does not exist' in exc.response['Error']['Message']:
                LOGGER.info('S3 bucket stack appears to have already been '
                            'deleted...')
                return True
            raise

        bucket_name = handler(
            value,
            provider=provider,
            context=context
        )

    s3_resource = session.resource('s3')
    try:
        s3_resource.meta.client.head_bucket(Bucket=bucket_name)
    except ClientError as exc:
        if exc.response['Error']['Code'] == '404':
            LOGGER.info("%s S3 bucket appears to have already been deleted...",
                        bucket_name)
            return True
        raise

    bucket = s3_resource.Bucket(bucket_name)
    bucket.object_versions.delete()
    return True
Beispiel #23
0
 def test_ensure_cfn_forbidden(self):
     session = get_session("us-west-1")
     provider = Provider(session)
     action = BaseAction(context=mock_context("mynamespace"),
                         provider_builder=MockProviderBuilder(provider))
     stubber = Stubber(action.s3_conn)
     stubber.add_client_error(
         "head_bucket",
         service_error_code="AccessDenied",
         service_message="Forbidden",
         http_status_code=403,
     )
     with stubber:
         with self.assertRaises(botocore.exceptions.ClientError):
             action.ensure_cfn_bucket()
Beispiel #24
0
def delete_param(context, provider, **kwargs):  # noqa pylint: disable=unused-argument
    """Delete SSM parameter."""
    parameter_name = kwargs.get('parameter_name')
    if not parameter_name:
        raise ValueError('Must specify `parameter_name` for delete_param '
                         'hook.')

    session = get_session(provider.region)
    ssm_client = session.client('ssm')

    try:
        ssm_client.delete_parameter(Name=parameter_name)
    except ssm_client.exceptions.ParameterNotFound:
        LOGGER.info("%s parameter appears to have already been deleted...",
                    parameter_name)
    return True
Beispiel #25
0
    def setUp(self):
        self.context = self._get_context()
        self.session = get_session(region=None)
        self.provider = self._make_provider()
        provider_builder = MockProviderBuilder(self.provider)
        self.build_action = build.Action(self.context,
                                         provider_builder=provider_builder,
                                         cancel=MockThreadingEvent())
        self._patch_object(self.build_action, "s3_stack_push")

        self.stack = TestStack("vpc", self.context)
        self.stack_status = None

        plan = self.build_action._generate_plan()
        self.step = plan.steps[0]
        self.step.stack = self.stack
Beispiel #26
0
    def setUp(self):
        self.context = self._get_context()
        self.session = get_session(region=None)
        self.provider = Provider(self.session, interactive=False,
                                 recreate_failed=False)
        provider_builder = MockProviderBuilder(self.provider)
        self.build_action = build.Action(self.context,
                                         provider_builder=provider_builder,
                                         cancel=MockThreadingEvent())

        self.stack = mock.MagicMock()
        self.stack.region = None
        self.stack.name = 'vpc'
        self.stack.fqn = 'vpc'
        self.stack.blueprint.rendered = '{}'
        self.stack.locked = False
        self.stack_status = None

        plan = self.build_action._generate_plan()
        self.step = plan.steps[0]
        self.step.stack = self.stack

        def patch_object(*args, **kwargs):
            m = mock.patch.object(*args, **kwargs)
            self.addCleanup(m.stop)
            m.start()

        def get_stack(name, *args, **kwargs):
            if name != self.stack.name or not self.stack_status:
                raise StackDoesNotExist(name)

            return {'StackName': self.stack.name,
                    'StackStatus': self.stack_status,
                    'Outputs': [],
                    'Tags': []}

        def get_events(name, *args, **kwargs):
            return [{'ResourceStatus': 'ROLLBACK_IN_PROGRESS',
                    'ResourceStatusReason': 'CFN fail'}]

        patch_object(self.provider, 'get_stack', side_effect=get_stack)
        patch_object(self.provider, 'update_stack')
        patch_object(self.provider, 'create_stack')
        patch_object(self.provider, 'destroy_stack')
        patch_object(self.provider, 'get_events', side_effect=get_events)

        patch_object(self.build_action, "s3_stack_push")
Beispiel #27
0
    def handle(cls, value, **kwargs):
        """Retrieve (and decrypt if applicable) a parameter from
        AWS SSM Parameter Store.

        ssmstore field types should be in the following format:

            [<region>@]ssmkey

        Note: The region is optional, and defaults to us-east-1 if not given.

        For example:

            # In stacker we would reference the encrypted value like:
            conf_key: ${ssmstore us-east-1@ssmkey}

            You can optionally store the value in a file, ie:

            ssmstore_value.txt
            us-east-1@ssmkey

            and reference it within stacker (NOTE: the path should be relative
            to the stacker config file):

            conf_key: ${ssmstore file://ssmstore_value.txt}

            # Both of the above would resolve to
            conf_key: PASSWORD

        """
        value = read_value_from_path(value)

        region = "us-east-1"
        if "@" in value:
            region, value = value.split("@", 1)

        client = get_session(region).client("ssm")
        response = client.get_parameters(
            Names=[
                value,
            ],
            WithDecryption=True
        )
        if 'Parameters' in response:
            return str(response['Parameters'][0]['Value'])

        raise ValueError('SSMKey "{}" does not exist in region {}'.format(
            value, region))
Beispiel #28
0
def handler(value, **kwargs):
    """Retrieve (and decrypt if applicable) a parameter from
    AWS SSM Parameter Store.

    ssmstore field types should be in the following format:

        [<region>@]ssmkey

    Note: The region is optional, and defaults to us-east-1 if not given.

    For example:

        # In stacker we would reference the encrypted value like:
        conf_key: ${ssmstore us-east-1@ssmkey}

        You can optionally store the value in a file, ie:

        ssmstore_value.txt
        us-east-1@ssmkey

        and reference it within stacker (NOTE: the path should be relative to
        the stacker config file):

        conf_key: ${ssmstore file://ssmstore_value.txt}

        # Both of the above would resolve to
        conf_key: PASSWORD

    """
    value = read_value_from_path(value)

    region = "us-east-1"
    if "@" in value:
        region, value = value.split("@", 1)

    client = get_session(region).client("ssm")
    response = client.get_parameters(
        Names=[
            value,
        ],
        WithDecryption=True
    )
    if 'Parameters' in response:
        return str(response['Parameters'][0]['Value'])

    raise ValueError('SSMKey "{}" does not exist in region {}'.format(value,
                                                                      region))
Beispiel #29
0
def handler(value, provider, context, **kwargs):  # pylint: disable=W0613
    """Cross account SSM Parameter Store look up handler."""
    """Format of value:

        <role_arn>@<ssm_parameter_name>

    For example:

        AppAMI: ${crssacctssm arn:aws:iam::5555555555:role/ssm-role@/infra/ami/windows/latest}  # noqa

    This lookup will assume an IAM role and use it to retrieve a SSM Parameter.
    The return value will be the parameter value as a string.
    """

    # Split value for the Role and Parameter Name
    try:
        role_arn, param_name = value.split('@', 1)
    except ValueError:
        raise ValueError('Invalid value for crssacctssm: {}. Must be in '
                         '<role_arn>@<ssm_parameter_name> format'.format(
                            value))

    # Use role_arn for sts assume role
    session = get_session(provider.region)
    sts_client = session.client('sts')
    LOGGER.info('Assuming Role: {}'.format(role_arn))
    response = sts_client.assume_role(
        RoleArn=role_arn,
        RoleSessionName='runway-ssm-get-param',
        DurationSeconds=900,
    )

    # Use tokens from assume role to create ssm GetParameter
    ssm_client = session.client(
        'ssm',
        aws_access_key_id=response['Credentials']['AccessKeyId'],
        aws_secret_access_key=response['Credentials']['SecretAccessKey'],
        aws_session_token=response['Credentials']['SessionToken'],
    )
    LOGGER.info('Looking up Parameter: {}'.format(param_name))
    param_resp = ssm_client.get_parameter(Name=param_name)

    # Return the value from the parameter
    LOGGER.debug(param_resp.get('Parameter', 'Error getting SSM Parameter'))
    param_value = param_resp['Parameter'].get('Value')
    return param_value
Beispiel #30
0
 def test_ensure_cfn_forbidden(self):
     session = get_session("us-west-1")
     provider = Provider(session)
     action = BaseAction(
         context=mock_context("mynamespace"),
         provider_builder=MockProviderBuilder(provider)
     )
     stubber = Stubber(action.s3_conn)
     stubber.add_client_error(
         "head_bucket",
         service_error_code="AccessDenied",
         service_message="Forbidden",
         http_status_code=403,
     )
     with stubber:
         with self.assertRaises(botocore.exceptions.ClientError):
             action.ensure_cfn_bucket()
Beispiel #31
0
 def test_ensure_cfn_bucket_exists(self):
     session = get_session("us-east-1")
     provider = Provider(session)
     action = BaseAction(
         context=mock_context("mynamespace"),
         provider_builder=MockProviderBuilder(provider)
     )
     stubber = Stubber(action.s3_conn)
     stubber.add_response(
         "head_bucket",
         service_response={},
         expected_params={
             "Bucket": ANY,
         }
     )
     with stubber:
         action.ensure_cfn_bucket()
Beispiel #32
0
def handler(value, **kwargs):
    """Decrypt the specified value with a master key in KMS.

    kmssimple field types should be in the following format:

        [<region>@]<base64 encrypted value>

    Note: The region is optional, and defaults to the environment's
    `AWS_DEFAULT_REGION` if not specified.

    For example:

        # We use the aws cli to get the encrypted value for the string
        # "PASSWORD" using the master key called "myStackerKey" in us-east-1
        $ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \
                --plaintext "PASSWORD" --output text --query CiphertextBlob

        CiD6bC8t2Y<...encrypted blob...>

        # In stacker we would reference the encrypted value like:
        conf_key: ${kms us-east-1@CiD6bC8t2Y<...encrypted blob...>}

        You can optionally store the encrypted value in a file, ie:

        kms_value.txt
        us-east-1@CiD6bC8t2Y<...encrypted blob...>

        and reference it within stacker (NOTE: the path should be relative to
        the stacker config file):

        conf_key: ${kms file://kms_value.txt}

        # Both of the above would resolve to
        conf_key: PASSWORD

    """
    value = read_value_from_path(value)

    region = None
    if "@" in value:
        region, value = value.split("@", 1)

    kms = get_session(region).client('kms')
    decoded = value.decode("base64")
    return kms.decrypt(CiphertextBlob=decoded)["Plaintext"]
Beispiel #33
0
def create_ecs_service_role(provider, context, **kwargs):
    """Used to create the ecsServieRole, which has to be named exactly that
    currently, so cannot be created via CloudFormation. See:

    http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role

    Args:
        provider (:class:`stacker.providers.base.BaseProvider`): provider
            instance
        context (:class:`stacker.context.Context`): context instance

    Returns: boolean for whether or not the hook succeeded.

    """
    role_name = kwargs.get("role_name", "ecsServiceRole")
    client = get_session(provider.region).client('iam')

    try:
        client.create_role(
            RoleName=role_name,
            AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json()
        )
    except ClientError as e:
        if "already exists" in str(e):
            pass
        else:
            raise

    policy = Policy(
        Statement=[
            Statement(
                Effect=Allow,
                Resource=["*"],
                Action=[ecs.CreateCluster, ecs.DeregisterContainerInstance,
                        ecs.DiscoverPollEndpoint, ecs.Poll,
                        ecs.Action("Submit*")]
            )
        ])
    client.put_role_policy(
        RoleName=role_name,
        PolicyName="AmazonEC2ContainerServiceRolePolicy",
        PolicyDocument=policy.to_json()
    )
    return True
Beispiel #34
0
def create_domain(provider, context, **kwargs):
    """Create a domain within route53.

    Args:
        provider (:class:`stacker.providers.base.BaseProvider`): provider
            instance
        context (:class:`stacker.context.Context`): context instance

    Returns: boolean for whether or not the hook succeeded.

    """
    session = get_session(provider.region)
    client = session.client("route53")
    domain = kwargs.get("domain")
    if not domain:
        logger.error("domain argument or BaseDomain variable not provided.")
        return False
    zone_id = create_route53_zone(client, domain)
    return {"domain": domain, "zone_id": zone_id}
Beispiel #35
0
 def test_ensure_cfn_bucket_doesnt_exist_us_east(self):
     session = get_session("us-east-1")
     provider = Provider(session)
     action = BaseAction(context=mock_context("mynamespace"),
                         provider_builder=MockProviderBuilder(provider))
     stubber = Stubber(action.s3_conn)
     stubber.add_client_error(
         "head_bucket",
         service_error_code="NoSuchBucket",
         service_message="Not Found",
         http_status_code=404,
     )
     stubber.add_response("create_bucket",
                          service_response={},
                          expected_params={
                              "Bucket": ANY,
                          })
     with stubber:
         action.ensure_cfn_bucket()
Beispiel #36
0
def create_ecs_service_role(provider, context, **kwargs):
    """Used to create the ecsServieRole, which has to be named exactly that
    currently, so cannot be created via CloudFormation. See:

    http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role

    Args:
        provider (:class:`stacker.providers.base.BaseProvider`): provider
            instance
        context (:class:`stacker.context.Context`): context instance

    Returns: boolean for whether or not the hook succeeded.

    """
    role_name = kwargs.get("role_name", "ecsServiceRole")
    client = get_session(provider.region).client('iam')

    try:
        client.create_role(
            RoleName=role_name,
            AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json())
    except ClientError as e:
        if "already exists" in str(e):
            pass
        else:
            raise

    policy = Policy(Version='2012-10-17',
                    Statement=[
                        Statement(Effect=Allow,
                                  Resource=["*"],
                                  Action=[
                                      ecs.CreateCluster,
                                      ecs.DeregisterContainerInstance,
                                      ecs.DiscoverPollEndpoint, ecs.Poll,
                                      ecs.Action("Submit*")
                                  ])
                    ])
    client.put_role_policy(RoleName=role_name,
                           PolicyName="AmazonEC2ContainerServiceRolePolicy",
                           PolicyDocument=policy.to_json())
    return True
Beispiel #37
0
def upload(provider, context, **kwargs):  # pylint: disable=W0613
    s3_file_key = kwargs.get('s3_file_key', 'common')
    local_path = kwargs.get('local_path', 'undefined')
    local_file = kwargs.get('local_file', 'undefined')

    bucket = xref_handler(
        kwargs.get('bucket_xref'),
        provider=provider,
        context=context,
    )

    session = get_session(provider.region)
    client = session.client('s3')

    print("Uploading to s3 bucket: %s" % bucket)
    client.put_object(Body=open(os.path.join(local_path, local_file), 'rb'),
                      Bucket=bucket,
                      Key='%s' % (s3_file_key),
                      ContentType='text/html')
    return True
Beispiel #38
0
    def test_stack_template_url(self):
        context = mock_context("mynamespace")
        blueprint = TestBlueprint(name="myblueprint", context=context)

        region = "us-east-1"
        endpoint = "https://example.com"
        session = get_session(region)
        provider = Provider(session)
        action = BaseAction(context=context,
                            provider_builder=MockProviderBuilder(
                                provider, region=region))

        with mock.patch('stacker.actions.base.get_s3_endpoint',
                        autospec=True,
                        return_value=endpoint):
            self.assertEqual(
                action.stack_template_url(blueprint),
                "%s/%s/stack_templates/%s/%s-%s.json" %
                (endpoint, "stacker-mynamespace", "mynamespace-myblueprint",
                 "myblueprint", MOCK_VERSION))
Beispiel #39
0
def handler(value, provider, **kwargs):  # pylint: disable=W0613
    """ Lookup a Cognito User Pool App Client secret by UserPoolId::AppClientId.

    Need to specify the Cognito User Pool ID and App Client ID

    Region is obtained from the environment file

    [in the environment file]:
      region: us-west-2

    For example:

    [in the stacker yaml (configuration) file]:

      lookups:
        CognitoUserPoolAppClientSecret: lookups.instance-attribute-by-name-tag-lookup.handler

      stacks:
        variables:
          AppClientSecret: ${CognitoUserPoolAppClientSecret ${user-pool-id}::${app-client-id}}
    """

    user_pool_id = value.split('::')[0]
    app_client_id = value.split('::')[1]

    session = get_session(provider.region)
    cognito_client = session.client('cognito-idp')
    try:
        desc_user_pool_client_output = cognito_client.describe_user_pool_client(
            ClientId=app_client_id, UserPoolId=user_pool_id)
    except Exception as e:
        LOGGER.error('could not describe user pool client: %s', e)
        return 'error: could not describe user pool client'

    secret = desc_user_pool_client_output['UserPoolClient'].get('ClientSecret')
    if secret:
        LOGGER.debug('found user pool app client secret')
        return secret
    else:
        LOGGER.debug('did not find user pool app client secret')
        return 'not found'
Beispiel #40
0
def ensure_server_cert_exists(provider, context, **kwargs):
    client = get_session(provider.region).client('iam')
    cert_name = kwargs["cert_name"]
    status = "unknown"
    try:
        response = client.get_server_certificate(
            ServerCertificateName=cert_name
        )
        cert_arn = _get_cert_arn_from_response(response)
        status = "exists"
        logger.info("certificate exists: %s (%s)", cert_name, cert_arn)
    except ClientError:
        if kwargs.get("prompt", True):
            upload = input(
                "Certificate '%s' wasn't found. Upload it now? (yes/no) " % (
                    cert_name,
                )
            )
            if upload != "yes":
                return False

        parameters = get_cert_contents(kwargs)
        if not parameters:
            return False
        response = client.upload_server_certificate(**parameters)
        cert_arn = _get_cert_arn_from_response(response)
        status = "uploaded"
        logger.info(
            "uploaded certificate: %s (%s)",
            cert_name,
            cert_arn,
        )

    return {
        "status": status,
        "cert_name": cert_name,
        "cert_arn": cert_arn,
    }
Beispiel #41
0
    def build(self, region=None, profile=None):
        """Get or create the provider for the given region and profile."""

        with self.lock:
            # memoization lookup key derived from region + profile.
            key = "{}-{}".format(profile, region)
            try:
                # assume provider is in provider dictionary.
                provider = self.providers[key]
            except KeyError:
                msg = "Missed memoized lookup ({}), creating new AWS Provider."
                logger.debug(msg.format(key))
                if not region:
                    region = self.region
                # memoize the result for later.
                self.providers[key] = Provider(
                    get_session(region=region, profile=profile),
                    region=region,
                    **self.kwargs
                )
                provider = self.providers[key]

        return provider
Beispiel #42
0
def upload_lambda_functions(context, provider, **kwargs):
    """Builds Lambda payloads from user configuration and uploads them to S3.

    Constructs ZIP archives containing files matching specified patterns for
    each function, uploads the result to Amazon S3, then stores objects (of
    type :class:`troposphere.awslambda.Code`) in the context's hook data,
    ready to be referenced in blueprints.

    Configuration consists of some global options, and a dictionary of function
    specifications. In the specifications, each key indicating the name of the
    function (used for generating names for artifacts), and the value
    determines what files to include in the ZIP (see more details below).

    Payloads are uploaded to either a custom bucket or stackers default bucket,
    with the key containing it's checksum, to allow repeated uploads to be
    skipped in subsequent runs.

    The configuration settings are documented as keyword arguments below.

    Keyword Arguments:
        bucket (str, optional): Custom bucket to upload functions to.
            Omitting it will cause the default stacker bucket to be used.
        bucket_region (str, optional): The region in which the bucket should
            exist. If not given, the region will be either be that of the
            global `stacker_bucket_region` setting, or else the region in
            use by the provider.
        prefix (str, optional): S3 key prefix to prepend to the uploaded
            zip name.
        follow_symlinks (bool, optional): Will determine if symlinks should
            be followed and included with the zip artifact. Default: False
        payload_acl (str, optional): The canned S3 object ACL to be applied to
            the uploaded payload. Default: private
        functions (dict):
            Configurations of desired payloads to build. Keys correspond to
            function names, used to derive key names for the payload. Each
            value should itself be a dictionary, with the following data:

                * path (str):

                    Base directory of the Lambda function payload content.
                    If it not an absolute path, it will be considered relative
                    to the directory containing the stacker configuration file
                    in use.

                    Files in this directory will be added to the payload ZIP,
                    according to the include and exclude patterns. If not
                    patterns are provided, all files in this directory
                    (respecting default exclusions) will be used.

                    Files are stored in the archive with path names relative to
                    this directory. So, for example, all the files contained
                    directly under this directory will be added to the root of
                    the ZIP file.

                * include(str or list[str], optional):

                    Pattern or list of patterns of files to include in the
                    payload. If provided, only files that match these
                    patterns will be included in the payload.

                    Omitting it is equivalent to accepting all files that are
                    not otherwise excluded.

                * exclude(str or list[str], optional):
                    Pattern or list of patterns of files to exclude from the
                    payload. If provided, any files that match will be ignored,
                    regardless of whether they match an inclusion pattern.

                    Commonly ignored files are already excluded by default,
                    such as ``.git``, ``.svn``, ``__pycache__``, ``*.pyc``,
                    ``.gitignore``, etc.

    Examples:
        .. Hook configuration.
        .. code-block:: yaml

            pre_build:
              - path: stacker.hooks.aws_lambda.upload_lambda_functions
                required: true
                enabled: true
                data_key: lambda
                args:
                  bucket: custom-bucket
                  follow_symlinks: true
                  prefix: cloudformation-custom-resources/
                  payload_acl: authenticated-read
                  functions:
                    MyFunction:
                      path: ./lambda_functions
                      include:
                        - '*.py'
                        - '*.txt'
                      exclude:
                        - '*.pyc'
                        - test/

        .. Blueprint usage
        .. code-block:: python

            from troposphere.awslambda import Function
            from stacker.blueprints.base import Blueprint

            class LambdaBlueprint(Blueprint):
                def create_template(self):
                    code = self.context.hook_data['lambda']['MyFunction']

                    self.template.add_resource(
                        Function(
                            'MyFunction',
                            Code=code,
                            Handler='my_function.handler',
                            Role='...',
                            Runtime='python2.7'
                        )
                    )
    """
    custom_bucket = kwargs.get('bucket')
    if not custom_bucket:
        bucket_name = context.bucket_name
        logger.info("lambda: using default bucket from stacker: %s",
                    bucket_name)
    else:
        bucket_name = custom_bucket
        logger.info("lambda: using custom bucket: %s", bucket_name)

    custom_bucket_region = kwargs.get("bucket_region")
    if not custom_bucket and custom_bucket_region:
        raise ValueError("Cannot specify `bucket_region` without specifying "
                         "`bucket`.")

    bucket_region = select_bucket_region(
        custom_bucket,
        custom_bucket_region,
        context.config.stacker_bucket_region,
        provider.region
    )

    # Check if we should walk / follow symlinks
    follow_symlinks = kwargs.get('follow_symlinks', False)
    if not isinstance(follow_symlinks, bool):
        raise ValueError('follow_symlinks option must be a boolean')

    # Check for S3 object acl. Valid values from:
    # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
    payload_acl = kwargs.get('payload_acl', 'private')

    # Always use the global client for s3
    session = get_session(bucket_region)
    s3_client = session.client('s3')

    ensure_s3_bucket(s3_client, bucket_name, bucket_region)

    prefix = kwargs.get('prefix', '')

    results = {}
    for name, options in kwargs['functions'].items():
        results[name] = _upload_function(s3_client, bucket_name, prefix, name,
                                         options, follow_symlinks, payload_acl)

    return results
Beispiel #43
0
 def setUp(self):
     region = "us-east-1"
     self.session = get_session(region=region)
     self.provider = Provider(
         self.session, interactive=True, recreate_failed=True)
     self.stubber = Stubber(self.provider.cloudformation)
Beispiel #44
0
    def fetch_s3_package(self, config):
        """Make a remote S3 archive available for local use.

        Args:
            config (dict): git config dictionary

        """
        extractor_map = {'.tar.gz': TarGzipExtractor,
                         '.tar': TarExtractor,
                         '.zip': ZipExtractor}
        extractor = None
        for suffix, klass in extractor_map.items():
            if config['key'].endswith(suffix):
                extractor = klass()
                logger.debug("Using extractor %s for S3 object \"%s\" in "
                             "bucket %s.",
                             klass.__name__,
                             config['key'],
                             config['bucket'])
                dir_name = self.sanitize_uri_path(
                    "s3-%s-%s" % (config['bucket'],
                                  config['key'][:-len(suffix)])
                )
                break

        if extractor is None:
            raise ValueError(
                "Archive type could not be determined for S3 object \"%s\" "
                "in bucket %s." % (config['key'], config['bucket'])
            )

        session = get_session(region=None)
        extra_s3_args = {}
        if config.get('requester_pays', False):
            extra_s3_args['RequestPayer'] = 'requester'

        # We can skip downloading the archive if it's already been cached
        if config.get('use_latest', True):
            try:
                # LastModified should always be returned in UTC, but it doesn't
                # hurt to explicitly convert it to UTC again just in case
                modified_date = session.client('s3').head_object(
                    Bucket=config['bucket'],
                    Key=config['key'],
                    **extra_s3_args
                )['LastModified'].astimezone(dateutil.tz.tzutc())
            except botocore.exceptions.ClientError as client_error:
                logger.error("Error checking modified date of "
                             "s3://%s/%s : %s",
                             config['bucket'],
                             config['key'],
                             client_error)
                sys.exit(1)
            dir_name += "-%s" % modified_date.strftime(self.ISO8601_FORMAT)
        cached_dir_path = os.path.join(self.package_cache_dir, dir_name)
        if not os.path.isdir(cached_dir_path):
            logger.debug("Remote package s3://%s/%s does not appear to have "
                         "been previously downloaded - starting download and "
                         "extraction to %s",
                         config['bucket'],
                         config['key'],
                         cached_dir_path)
            tmp_dir = tempfile.mkdtemp(prefix='stacker')
            tmp_package_path = os.path.join(tmp_dir, dir_name)
            try:
                extractor.set_archive(os.path.join(tmp_dir, dir_name))
                logger.debug("Starting remote package download from S3 to %s "
                             "with extra S3 options \"%s\"",
                             extractor.archive,
                             str(extra_s3_args))
                session.resource('s3').Bucket(config['bucket']).download_file(
                    config['key'],
                    extractor.archive,
                    ExtraArgs=extra_s3_args
                )
                logger.debug("Download complete; extracting downloaded "
                             "package to %s",
                             tmp_package_path)
                extractor.extract(tmp_package_path)
                logger.debug("Moving extracted package directory %s to the "
                             "Stacker cache at %s",
                             dir_name,
                             self.package_cache_dir)
                shutil.move(tmp_package_path, self.package_cache_dir)
            finally:
                shutil.rmtree(tmp_dir)
        else:
            logger.debug("Remote package s3://%s/%s appears to have "
                         "been previously downloaded to %s -- bypassing "
                         "download",
                         config['bucket'],
                         config['key'],
                         cached_dir_path)

        # Update sys.path & merge in remote configs (if necessary)
        self.update_paths_and_config(config=config,
                                     pkg_dir_name=dir_name)