示例#1
0
def rollback(options):
    """Rollback the current production AWS Lambda version by 1

    Notes:
        Ignores if the production version is $LATEST
        Only rollsback if published version is greater than 1
    """
    clusters = CONFIG.clusters()
    if options.processor == 'all':
        lambda_functions = {'rule_processor', 'alert_processor'}
    else:
        lambda_functions = {'{}_processor'.format(options.processor)}

    for cluster in clusters:
        for lambda_function in lambda_functions:
            stream_alert_key = CONFIG['clusters'][cluster]['modules']['stream_alert']
            current_vers = stream_alert_key[lambda_function]['current_version']
            if current_vers != '$LATEST':
                current_vers = int(current_vers)
                if current_vers > 1:
                    new_vers = current_vers - 1
                    CONFIG['clusters'][cluster]['modules']['stream_alert'][lambda_function]['current_version'] = new_vers
                    CONFIG.write()

    targets = ['module.stream_alert_{}'.format(x)
               for x in CONFIG.clusters()]

    terraform_generate(config=CONFIG)
    tf_runner(targets=targets)
示例#2
0
def lambda_handler(options):
    """Handle all Lambda CLI operations"""

    if options.subcommand == 'deploy':
        # Make sure the Terraform code is up to date
        terraform_generate(config=CONFIG)
        deploy(options)

    elif options.subcommand == 'rollback':
        # Make sure the Terraform code is up to date
        terraform_generate(config=CONFIG)
        rollback(options)

    elif options.subcommand == 'test':
        stream_alert_test(options)
示例#3
0
def deploy(options):
    """Deploy new versions of both Lambda functions

    Steps:
    - build lambda deployment package
    - upload to S3
    - update variables.json with uploaded package hash/key
    - publish latest version
    - update variables.json with latest published version
    - terraform apply
    """
    processor = options.processor
    # terraform apply only to the module which contains our lambda functions
    targets = ['module.stream_alert_{}'.format(x) for x in CONFIG.clusters()]
    packages = []

    def publish_version(packages):
        """Publish Lambda versions"""
        for package in packages:
            LambdaVersion(config=CONFIG, package=package).publish_function()

    def deploy_rule_processor():
        """Create Rule Processor package and publish versions"""
        rule_package = RuleProcessorPackage(config=CONFIG,
                                            version=rule_processor_version)
        rule_package.create_and_upload()
        return rule_package

    def deploy_alert_processor():
        """Create Alert Processor package and publish versions"""
        alert_package = AlertProcessorPackage(config=CONFIG,
                                              version=alert_processor_version)
        alert_package.create_and_upload()
        return alert_package

    if processor == 'rule':
        packages.append(deploy_rule_processor())

    elif processor == 'alert':
        packages.append(deploy_alert_processor())

    elif processor == 'all':
        packages.append(deploy_rule_processor())
        packages.append(deploy_alert_processor())

    # update the source code in $LATEST
    if not tf_runner(targets=targets):
        sys.exit(1)

    # TODO(jack) write integration test to verify newly updated function

    # create production version by running a second time
    publish_version(packages)
    # after the version is published and the config is written, generate the files
    # to ensure the alias is properly updated
    if not terraform_generate(config=CONFIG):
        return
    # apply the changes from publishing
    tf_runner(targets=targets)
示例#4
0
def terraform_handler(options):
    """Handle all Terraform CLI operations"""
    # Verify terraform is installed
    if not terraform_check():
        return
    # Use a named tuple to match the 'processor' attribute in the argparse options
    deploy_opts = namedtuple('DeployOptions', ['processor'])

    # Plan and Apply our streamalert infrastructure
    if options.subcommand == 'build':
        # Generate Terraform files
        if not terraform_generate(config=CONFIG):
            return
        # Target is for terraforming a specific streamalert module.
        # This value is passed as a list
        if options.target:
            targets = ['module.{}_{}'.format(target, cluster)
                       for cluster in CONFIG.clusters()
                       for target in options.target]
            tf_runner(targets=targets)
        else:
            tf_runner()

    # generate terraform files
    elif options.subcommand == 'generate':
        if not terraform_generate(config=CONFIG):
            return

    elif options.subcommand == 'init-backend':
        run_command(['terraform', 'init'])

    # initialize streamalert infrastructure from a blank state
    elif options.subcommand == 'init':
        LOGGER_CLI.info('Initializing StreamAlert')

        # generate init Terraform files
        if not terraform_generate(config=CONFIG, init=True):
            return

        LOGGER_CLI.info('Initializing Terraform')
        if not run_command(['terraform', 'init']):
            sys.exit(1)

        # build init infrastructure
        LOGGER_CLI.info('Building Initial Infrastructure')
        init_targets = [
            'aws_s3_bucket.lambda_source',
            'aws_s3_bucket.logging_bucket',
            'aws_s3_bucket.stream_alert_secrets',
            'aws_s3_bucket.terraform_remote_state',
            'aws_s3_bucket.streamalerts',
            'aws_kms_key.stream_alert_secrets',
            'aws_kms_alias.stream_alert_secrets'
        ]
        if not tf_runner(targets=init_targets):
            LOGGER_CLI.error('An error occured while running StreamAlert init')
            sys.exit(1)

        # generate the main.tf with remote state enabled
        LOGGER_CLI.info('Configuring Terraform Remote State')
        if not terraform_generate(config=CONFIG):
            return

        if not run_command(['terraform', 'init']):
            return

        LOGGER_CLI.info('Deploying Lambda Functions')
        # deploy both lambda functions
        deploy(deploy_opts('all'))
        # create all remainder infrastructure

        LOGGER_CLI.info('Building Remainder Infrastructure')
        tf_runner()

    elif options.subcommand == 'clean':
        terraform_clean()

    elif options.subcommand == 'destroy':
        if options.target:
            target = options.target
            targets = ['module.{}_{}'.format(target, cluster)
                       for cluster in CONFIG.clusters()]
            tf_runner(targets=targets, action='destroy')
            return

        # Migrate back to local state so Terraform can successfully
        # destroy the S3 bucket used by the backend.
        if not terraform_generate(config=CONFIG, init=True):
            return

        if not run_command(['terraform', 'init']):
            return

        # Destroy all of the infrastructure
        if not tf_runner(action='destroy'):
            return

        # Remove old Terraform files
        terraform_clean()

    # get a quick status on our declared infrastructure
    elif options.subcommand == 'status':
        status()
示例#5
0
def deploy(options):
    """Deploy new versions of all Lambda functions

    Steps:
    - Build AWS Lambda deployment package
    - Upload to S3
    - Update lambda.json with uploaded package checksum and S3 key
    - Publish new version
    - Update each cluster's Lambda configuration with latest published version
    - Run Terraform Apply
    """
    processor = options.processor
    # Terraform apply only to the module which contains our lambda functions
    targets = []
    packages = []

    def _publish_version(packages):
        """Publish Lambda versions"""
        for package in packages:
            if package.package_name == 'athena_partition_refresh':
                published = LambdaVersion(
                    config=CONFIG, package=package,
                    clustered_deploy=False).publish_function()
            else:
                published = LambdaVersion(config=CONFIG,
                                          package=package).publish_function()
            if not published:
                return False

        return True

    def _deploy_rule_processor():
        """Create Rule Processor package and publish versions"""
        rule_package = RuleProcessorPackage(config=CONFIG,
                                            version=rule_processor_version)
        rule_package.create_and_upload()
        return rule_package

    def _deploy_alert_processor():
        """Create Alert Processor package and publish versions"""
        alert_package = AlertProcessorPackage(config=CONFIG,
                                              version=alert_processor_version)
        alert_package.create_and_upload()
        return alert_package

    def _deploy_athena_partition_refresh():
        """Create Athena Partition Refresh package and publish"""
        athena_package = AthenaPackage(config=CONFIG,
                                       version=alert_processor_version)
        athena_package.create_and_upload()
        return athena_package

    if 'rule' in processor:
        targets.extend(
            ['module.stream_alert_{}'.format(x) for x in CONFIG.clusters()])

        packages.append(_deploy_rule_processor())

    if 'alert' in processor:
        targets.extend(
            ['module.stream_alert_{}'.format(x) for x in CONFIG.clusters()])

        packages.append(_deploy_alert_processor())

    if 'athena' in processor:
        targets.append('module.stream_alert_athena')

        packages.append(_deploy_athena_partition_refresh())

    if 'all' in processor:
        targets.extend(
            ['module.stream_alert_{}'.format(x) for x in CONFIG.clusters()])
        targets.append('module.stream_alert_athena')

        packages.append(_deploy_rule_processor())
        packages.append(_deploy_alert_processor())
        packages.append(_deploy_athena_partition_refresh())

    # Regenerate the Terraform configuration with the new S3 keys
    if not terraform_generate(config=CONFIG):
        return

    # Run Terraform: Update the Lambda source code in $LATEST
    if not tf_runner(targets=targets):
        sys.exit(1)

    # TODO(jack) write integration test to verify newly updated function

    # Publish a new production Lambda version
    if not _publish_version(packages):
        return

    # Regenerate the Terraform configuration with the new Lambda versions
    if not terraform_generate(config=CONFIG):
        return

    # Apply the changes to the Lambda aliases
    tf_runner(targets=targets)
示例#6
0
def terraform_handler(options):
    """Handle all Terraform CLI operations"""
    # verify terraform is installed
    terraform_check()
    # use a named tuple to match the 'processor' attribute in the argparse options
    deploy_opts = namedtuple('DeployOptions', ['processor'])

    # plan/apply our streamalert infrastructure
    if options.subcommand == 'build':
        # Make sure the Terraform is completely up to date
        terraform_generate(config=CONFIG)
        # --target is for terraforming a specific streamalert module
        if options.target:
            target = options.target
            targets = ['module.{}_{}'.format(target, cluster)
                       for cluster in CONFIG.clusters()]
            tf_runner(targets=targets)
        else:
            tf_runner()

    # generate terraform files
    elif options.subcommand == 'generate':
        terraform_generate(config=CONFIG)

    elif options.subcommand == 'init-backend':
        run_command(['terraform', 'init'])

    # initialize streamalert infrastructure from a blank state
    elif options.subcommand == 'init':
        LOGGER_CLI.info('Initializing StreamAlert')

        # generate init Terraform files
        if not terraform_generate(config=CONFIG, init=True):
            LOGGER_CLI.error('An error occured while generating Terraform files')
            sys.exit(1)

        LOGGER_CLI.info('Initializing Terraform')
        if not run_command(['terraform', 'init']):
            sys.exit(1)

        # build init infrastructure
        LOGGER_CLI.info('Building Initial Infrastructure')
        init_targets = [
            'aws_s3_bucket.lambda_source',
            'aws_s3_bucket.integration_testing',
            'aws_s3_bucket.terraform_state',
            'aws_s3_bucket.stream_alert_secrets',
            'aws_s3_bucket.logging_bucket',
            'aws_kms_key.stream_alert_secrets',
            'aws_kms_alias.stream_alert_secrets'
        ]
        if not tf_runner(targets=init_targets):
            LOGGER_CLI.error('An error occured while running StreamAlert init')
            sys.exit(1)

        # generate the main.tf with remote state enabled
        LOGGER_CLI.info('Configuring Terraform Remote State')
        terraform_generate(config=CONFIG)
        if not run_command(['terraform', 'init']):
            sys.exit(1)

        LOGGER_CLI.info('Deploying Lambda Functions')
        # deploy both lambda functions
        deploy(deploy_opts('all'))
        # create all remainder infrastructure

        LOGGER_CLI.info('Building Remainder Infrastructure')
        tf_runner()

    elif options.subcommand == 'destroy':
        if options.target:
            target = options.target
            targets = ['module.{}_{}'.format(target, cluster)
                       for cluster in CONFIG.clusters()]
            tf_runner(targets=targets, action='destroy')
            return

        # Migrate back to local state so Terraform can successfully
        # destroy the S3 bucket used by the backend.
        terraform_generate(config=CONFIG, init=True)
        if not run_command(['terraform', 'init']):
            sys.exit(1)

        # Destroy all of the infrastructure
        if not tf_runner(action='destroy'):
            sys.exit(1)

        # Remove old Terraform files
        LOGGER_CLI.info('Removing old Terraform files')
        cleanup_files = ['{}.tf'.format(cluster) for cluster in CONFIG.clusters()]
        cleanup_files.extend([
            'main.tf',
            'terraform.tfstate',
            'terraform.tfstate.backup'
        ])
        for tf_file in cleanup_files:
            file_to_remove = 'terraform/{}'.format(tf_file)
            if not os.path.isfile(file_to_remove):
                continue
            os.remove(file_to_remove)
        # Finally, delete the Terraform directory
        shutil.rmtree('terraform/.terraform/')

    # get a quick status on our declared infrastructure
    elif options.subcommand == 'status':
        status()