def package_template(session, args):

    print('\nPackaging...')
    client = session.create_client('s3')
    config = utils.read_json(args.config)
    s3_prefix = args.s3_prefix or conventions.generate_stack_name(
        config['Parameters'])

    try:
        s3_uploader = S3Uploader(client, args.s3_bucket,
                                 aws.get_region(session), s3_prefix,
                                 args.kms_key_id, False)
        template = Template(args.template, os.getcwd(), s3_uploader)
        exported_template = template.export()
        exported_template_yaml = yaml_dump(exported_template)
    except exceptions.ExportFailedError as ex:
        if template_has_resources_to_upload_to_s3(
                template) and not args.s3_bucket:
            logging.error(
                'The template contains resources to upload, please provide an S3 Bucket (--s3-bucket).'
            )
        else:
            logging.error(ex)
        sys.exit(1)

    logging.info(exported_template_yaml)
    print('Done.')
    return exported_template_yaml
Esempio n. 2
0
    def _run_main(self, parsed_args, parsed_globals):
        s3_client = self._session.create_client(
            "s3",
            config=Config(signature_version='s3v4'),
            region_name=parsed_globals.region,
            verify=parsed_globals.verify_ssl)

        template_path = parsed_args.template_file
        if not os.path.isfile(template_path):
            raise exceptions.InvalidTemplatePathError(
                template_path=template_path)

        bucket = parsed_args.s3_bucket

        self.s3_uploader = S3Uploader(s3_client, bucket, parsed_args.s3_prefix,
                                      parsed_args.kms_key_id,
                                      parsed_args.force_upload)
        # attach the given metadata to the artifacts to be uploaded
        self.s3_uploader.artifact_metadata = parsed_args.metadata

        output_file = parsed_args.output_template_file
        use_json = parsed_args.use_json
        exported_str = self._export(template_path, use_json)

        sys.stdout.write("\n")
        self.write_output(output_file, exported_str)

        if output_file:
            msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format(
                output_file_name=output_file,
                output_file_path=os.path.abspath(output_file))
            sys.stdout.write(msg)

        sys.stdout.flush()
        return 0
Esempio n. 3
0
 def test_artifact_metadata_invalid_type(self):
     prefix = "SomePrefix"
     s3uploader = S3Uploader(self.s3client, self.bucket_name, prefix, None,
                             False, self.transfer_manager_mock)
     invalid_metadata = ["key", "val"]
     with self.assertRaises(TypeError):
         s3uploader.artifact_metadata = invalid_metadata
Esempio n. 4
0
    def test_upload_successful_odict(self, progress_percentage_mock,
                                     get_size_patch):
        file_name = "filename"
        remote_path = "remotepath"
        prefix = "SomePrefix"
        remote_path_with_prefix = "{0}/{1}".format(prefix, remote_path)
        s3uploader = S3Uploader(self.s3client, self.bucket_name, prefix, None,
                                False, self.transfer_manager_mock)
        expected_upload_url = "s3://{0}/{1}/{2}".format(
            self.bucket_name, prefix, remote_path)

        # Setup mock to fake that file does not exist
        s3uploader.file_exists = Mock()
        s3uploader.file_exists.return_value = False
        # set the metadata used by the uploader when uploading
        artifact_metadata = OrderedDict({"key": "val"})
        s3uploader.artifact_metadata = artifact_metadata

        upload_url = s3uploader.upload(file_name, remote_path)
        self.assertEquals(expected_upload_url, upload_url)

        expected_extra_args = {
            # expected encryption args
            "ServerSideEncryption": "AES256",
            # expected metadata
            "Metadata": artifact_metadata
        }
        self.transfer_manager_mock.upload.assert_called_once_with(
            file_name, self.bucket_name, remote_path_with_prefix,
            expected_extra_args, mock.ANY)
        s3uploader.file_exists.assert_called_once_with(remote_path_with_prefix)
Esempio n. 5
0
    def test_upload_successful_custom_kms_key(self, progress_percentage_mock,
                                              get_size_patch):
        file_name = "filename"
        remote_path = "remotepath"
        kms_key_id = "kms_id"
        expected_upload_url = "s3://{0}/{1}".format(self.bucket_name,
                                                    remote_path)
        # Set KMS Key Id
        self.s3uploader = S3Uploader(self.s3client, self.bucket_name,
                                     self.prefix, kms_key_id, False,
                                     self.transfer_manager_mock)

        # Setup mock to fake that file does not exist
        self.s3uploader.file_exists = Mock()
        self.s3uploader.file_exists.return_value = False

        upload_url = self.s3uploader.upload(file_name, remote_path)
        self.assertEquals(expected_upload_url, upload_url)

        expected_encryption_args = {
            "ServerSideEncryption": "aws:kms",
            "SSEKMSKeyId": kms_key_id
        }
        self.transfer_manager_mock.upload.assert_called_once_with(
            file_name, self.bucket_name, remote_path, expected_encryption_args,
            mock.ANY)
        self.s3uploader.file_exists.assert_called_once_with(remote_path)
Esempio n. 6
0
    def test_file_exists(self):
        key = "some/path"
        expected_params = {"Bucket": self.bucket_name, "Key": key}
        response = {
            "AcceptRanges": "bytes",
            "ContentType": "text/html",
            "LastModified": "Thu, 16 Apr 2015 18:19:14 GMT",
            "ContentLength": 77,
            "VersionId": "null",
            "ETag": "\"30a6ec7e1a9ad79c203d05a589c8b400\"",
            "Metadata": {}
        }

        # Let's pretend file exists
        self.s3client_stub.add_response("head_object", response,
                                        expected_params)

        with self.s3client_stub:
            self.assertTrue(self.s3uploader.file_exists(key))

        # Let's pretend file does not exist
        self.s3client_stub.add_client_error('head_object', "ClientError",
                                            "some error")
        with self.s3client_stub:
            self.assertFalse(self.s3uploader.file_exists(key))

        # Let's pretend some other unknown exception happened
        s3mock = Mock()
        uploader = S3Uploader(s3mock, self.bucket_name)
        s3mock.head_object = Mock()
        s3mock.head_object.side_effect = RuntimeError()

        with self.assertRaises(RuntimeError):
            uploader.file_exists(key)
Esempio n. 7
0
    def test_upload_force_upload(self, progress_percentage_mock,
                                 get_size_patch):
        file_name = "filename"
        remote_path = "remotepath"
        expected_upload_url = "s3://{0}/{1}".format(self.bucket_name,
                                                    remote_path)

        # Set ForceUpload = True
        self.s3uploader = S3Uploader(self.s3client, self.bucket_name,
                                     self.prefix, None, True,
                                     self.transfer_manager_mock)

        # Pretend file already exists
        self.s3uploader.file_exists = Mock()
        self.s3uploader.file_exists.return_value = True

        # Because we forced an update, this should reupload even if file exists
        upload_url = self.s3uploader.upload(file_name, remote_path)
        self.assertEquals(expected_upload_url, upload_url)

        expected_encryption_args = {"ServerSideEncryption": "AES256"}
        self.transfer_manager_mock.upload.assert_called_once_with(
            file_name, self.bucket_name, remote_path, expected_encryption_args,
            mock.ANY)

        # Since ForceUpload=True, we should NEVER do the file-exists check
        self.s3uploader.file_exists.assert_not_called()
Esempio n. 8
0
    def test_to_path_style_s3_url_other_regions(self):
        key = "path/to/file"
        version = "someversion"
        region = "us-west-2"

        s3uploader = S3Uploader(self.s3client, self.bucket_name, region)
        result = s3uploader.to_path_style_s3_url(key, version)
        self.assertEqual(
            result,
            "https://s3-{0}.amazonaws.com/{1}/{2}?versionId={3}".format(
                region, self.bucket_name, key, version))

        # Without versionId, that query parameter should be omitted
        s3uploader = S3Uploader(self.s3client, self.bucket_name, region)
        result = s3uploader.to_path_style_s3_url(key)
        self.assertEqual(
            result, "https://s3-{0}.amazonaws.com/{1}/{2}".format(
                region, self.bucket_name, key))
Esempio n. 9
0
    def _run_main(self, parsed_args, parsed_globals):
        cloudformation_client = \
            self._session.create_client(
                    'cloudformation', region_name=parsed_globals.region,
                    endpoint_url=parsed_globals.endpoint_url,
                    verify=parsed_globals.verify_ssl)

        template_path = parsed_args.template_file
        if not os.path.isfile(template_path):
            raise exceptions.InvalidTemplatePathError(
                    template_path=template_path)

        # Parse parameters
        with open(template_path, "r") as handle:
            template_str = handle.read()

        stack_name = parsed_args.stack_name
        parameter_overrides = self.parse_key_value_arg(
                parsed_args.parameter_overrides,
                self.PARAMETER_OVERRIDE_CMD)

        tags_dict = self.parse_key_value_arg(parsed_args.tags, self.TAGS_CMD)
        tags = [{"Key": key, "Value": value}
                for key, value in tags_dict.items()]

        template_dict = yaml_parse(template_str)

        parameters = self.merge_parameters(template_dict, parameter_overrides)

        template_size = os.path.getsize(parsed_args.template_file)
        if template_size > 51200 and not parsed_args.s3_bucket:
            raise exceptions.DeployBucketRequiredError()

        bucket = parsed_args.s3_bucket
        if bucket:
            s3_client = self._session.create_client(
                "s3",
                config=Config(signature_version='s3v4'),
                region_name=parsed_globals.region,
                verify=parsed_globals.verify_ssl)

            s3_uploader = S3Uploader(s3_client,
                                      bucket,
                                      parsed_args.s3_prefix,
                                      parsed_args.kms_key_id,
                                      parsed_args.force_upload)
        else:
            s3_uploader = None

        deployer = Deployer(cloudformation_client)
        return self.deploy(deployer, stack_name, template_str,
                           parameters, parsed_args.capabilities,
                           parsed_args.execute_changeset, parsed_args.role_arn,
                           parsed_args.notification_arns, s3_uploader,
                           tags,
                           parsed_args.fail_on_empty_changeset)
Esempio n. 10
0
    def test_to_path_style_s3_url_us_east_1(self):
        key = "path/to/file"
        version = "someversion"
        region = "us-east-1"
        self._construct_uploader(region)

        s3uploader = S3Uploader(self.s3client, self.bucket_name)
        result = s3uploader.to_path_style_s3_url(key, version)
        self.assertEqual(
            result, "https://s3.amazonaws.com/{0}/{1}?versionId={2}".format(
                self.bucket_name, key, version))

        # Without versionId, that query parameter should be omitted
        s3uploader = S3Uploader(self.s3client, self.bucket_name, region)
        result = s3uploader.to_path_style_s3_url(key)
        self.assertEqual(
            result,
            "https://s3.amazonaws.com/{0}/{1}".format(self.bucket_name, key,
                                                      version))
Esempio n. 11
0
    def _construct_uploader(self, region):
        self.s3client = botocore.session.get_session().create_client(
            's3', region_name=region)
        self.s3client_stub = Stubber(self.s3client)
        self.transfer_manager_mock = Mock(spec=S3Transfer)
        self.transfer_manager_mock.upload = Mock()
        self.bucket_name = "bucketname"
        self.prefix = None

        self.s3uploader = S3Uploader(self.s3client, self.bucket_name,
                                     self.prefix, None, False,
                                     self.transfer_manager_mock)
Esempio n. 12
0
    def setUp(self):
        self.s3client = botocore.session.get_session().create_client(
            's3', region_name="us-east-1")
        self.s3client_stub = Stubber(self.s3client)
        self.transfer_manager_mock = Mock(spec=S3Transfer)
        self.transfer_manager_mock.upload = Mock()
        self.bucket_name = "bucketname"
        self.prefix = None
        self.region = "us-east-1"

        self.s3uploader = S3Uploader(self.s3client, self.bucket_name,
                                     self.region, self.prefix, None, False,
                                     self.transfer_manager_mock)
Esempio n. 13
0
 def _run_main(self, parsed_args, parsed_globals):
     self.region = self.get_and_validate_region(parsed_globals)
     self.s3_client = self._session.create_client(
         's3',
         region_name=self.region,
         endpoint_url=parsed_globals.endpoint_url,
         verify=parsed_globals.verify_ssl
     )
     self.s3_uploader = S3Uploader(self.s3_client,
                                   parsed_args.bucket_name,
                                   self.region,
                                   force_upload=True)
     self.s3_uploader.upload(parsed_args.file_path,
                             get_s3_path(parsed_args.file_path))
Esempio n. 14
0
 def _run_main(self, parsed_args, parsed_globals):
     self.region = self.get_and_validate_region(parsed_globals)
     self.s3_client = self._session.create_client(
         's3',
         region_name=self.region,
         endpoint_url=parsed_globals.endpoint_url,
         verify=parsed_globals.verify_ssl)
     self.s3_uploader = S3Uploader(self.s3_client,
                                   parsed_args.bucket_name,
                                   force_upload=True)
     try:
         self.s3_uploader.upload(parsed_args.file_path,
                                 get_s3_path(parsed_args.file_path))
     except OSError as ex:
         raise RuntimeError("%s cannot be found" % parsed_args.file_path)
Esempio n. 15
0
    def test_upload_successful(self, progress_percentage_mock, get_size_patch):
        file_name = "filename"
        remote_path = "remotepath"
        prefix = "SomePrefix"
        remote_path_with_prefix = "{0}/{1}".format(prefix, remote_path)
        s3uploader = S3Uploader(self.s3client, self.bucket_name, self.region,
                                prefix, None, False,
                                self.transfer_manager_mock)
        expected_upload_url = "s3://{0}/{1}/{2}".format(
            self.bucket_name, prefix, remote_path)

        # Setup mock to fake that file does not exist
        s3uploader.file_exists = Mock()
        s3uploader.file_exists.return_value = False

        upload_url = s3uploader.upload(file_name, remote_path)
        self.assertEquals(expected_upload_url, upload_url)

        expected_encryption_args = {"ServerSideEncryption": "AES256"}
        self.transfer_manager_mock.upload.assert_called_once_with(
            file_name, self.bucket_name, remote_path_with_prefix,
            expected_encryption_args, mock.ANY)
        s3uploader.file_exists.assert_called_once_with(remote_path_with_prefix)
Esempio n. 16
0
def package_template(ppt,
                     session,
                     template_path,
                     bucket_region,
                     bucket_name=None,
                     prefix=None,
                     kms_key_id=None):
    # validate template path
    if not os.path.isfile(template_path):
        raise ConfigError('Invalid Template Path "%s"' % template_path)

    # if bucket name is not provided, create a default bucket with name
    # awscfncli-{AWS::AccountId}-{AWS::Region}
    if bucket_name is None:
        sts = session.client('sts')
        account_id = sts.get_caller_identity()["Account"]
        bucket_name = 'awscfncli-%s-%s' % (account_id, bucket_region)
        ppt.secho('Using default artifact bucket s3://{}'.format(bucket_name))
    else:
        ppt.secho(
            'Using specified artifact bucket s3://{}'.format(bucket_name))

    s3_client = session.client('s3')

    # create bucket if not exists
    try:
        s3_client.head_bucket(Bucket=bucket_name)
    except ClientError as e:
        if e.response['Error']['Code'] == '404':
            if bucket_region != 'us-east-1':
                s3_client.create_bucket(Bucket=bucket_name,
                                        CreateBucketConfiguration={
                                            'LocationConstraint': bucket_region
                                        })
            else:
                s3_client.create_bucket(Bucket=bucket_name)
            ppt.secho('Created artifact bucket {}'.format(bucket_name))
        else:
            raise e

    try:
        s3_uploader = S3Uploader(s3_client,
                                 bucket_name,
                                 bucket_region,
                                 prefix,
                                 kms_key_id,
                                 force_upload=False)
    except TypeError:
        # HACK: since awscli 1.16.145+ the bucket region parameter is removed
        s3_uploader = S3Uploader(s3_client,
                                 bucket_name,
                                 prefix,
                                 kms_key_id,
                                 force_upload=False)

    template = Template(template_path,
                        os.getcwd(),
                        s3_uploader,
                        resources_to_export=EXPORTS)

    exported_template = template.export()

    ppt.secho(
        'Successfully packaged artifacts and '
        'uploaded to s3://{bucket_name}.'.format(bucket_name=bucket_name),
        fg='green')

    template_body = yaml_dump(exported_template)

    template_data = template_body.encode('ascii')
    if len(template_data) <= TEMPLATE_BODY_SIZE_LIMIT:
        template_url = None
    else:
        ppt.secho('Template body is too large, uploading as artifact.',
                  fg='red')
        with tempfile.NamedTemporaryFile(mode='wb') as fp:
            # write template body to local temp file
            fp.write(template_data)
            fp.flush()
            # upload to s3
            template_location = s3_uploader.upload_with_dedup(
                fp.name, extension='template.json')
            ppt.secho('Template uploaded to %s' % template_location)

        # get s3 object key ...upload_with_dedup() returns s3://bucket/key
        template_key = template_location.replace('s3://%s/' % bucket_name, '')
        # generate a pre-signed url for CloudFormation as the object in S3
        # is private by default
        template_url = s3_client.generate_presigned_url(
            ClientMethod='get_object',
            Params={
                'Bucket': bucket_name,
                'Key': template_key
            },
            ExpiresIn=3600)

    return template_body, template_url