Esempio n. 1
0
def update_migrate(bosslet_config, config):
    migration_progress = constants.repo_path("cloud_formation", "configs", "migrations", config, "progress")

    if not os.path.exists(migration_progress):
        console.info("No migrations to apply")
        return

    with open(migration_progress, "r") as fh:
        cur_ver = int(fh.read())

    new_ver = CloudFormationConfiguration(config, bosslet_config).existing_version()

    migrations = MigrationManager(config, cur_ver, new_ver)
    if not migrations.has_migrations:
        console.info("No migrations to apply")
        os.remove(migration_progress)
        return

    def callback(migration_file):
        with open(migration_progress, 'w') as fh:
            fh.write(str(migration_file.stop))

    migrations.add_callback(post=callback)

    migrations.post_update(bosslet_config)

    os.remove(migration_progress)
Esempio n. 2
0
    def role_remove(self, resource):
        """Remove the referenced IAM Role

        Args:
            resource (str): Name of the IAM Role to remove
        """
        role = self.resource.Role(resource)
        try:
            role.load()
        except self.client.exceptions.NoSuchEntityException:
            console.info("Role {} doesn't exist".format(resource))
            return

        # Attached resources
        for policy in role.attached_policies.all():
            self.client.detach_role_policy(RoleName=resource,
                                           PolicyArn=policy.arn)

        for profile in role.instance_profiles.all():
            self.client.remove_role_from_instance_profile(
                InstanceProfileName=profile.name, RoleName=resource)

            self.client.delete_instance_profile(
                InstanceProfileName=profile.name)

        # TODO ??? Inline policies?

        # The role itself
        self.client.delete_role(RoleName=resource)
Esempio n. 3
0
    def policy_remove(self, resource):
        """Remove the referenced IAM Policy

        Args:
            resource (str): Name of the IAM Policy to remove
        """
        arn = self.policy_lookup(resource)
        if arn is None:
            console.info("Policy {} doesn't exist".format(resource))
            return

        policy = self.resource.Policy(arn)

        # Attached resources
        for group in policy.attached_groups.all():
            self.client.detach_group_policy(GroupName=group.group_id,
                                            PolicyArn=arn)

        for role in policy.attached_roles.all():
            self.client.detach_role_policy(RoleName=role.name, PolicyArn=arn)

        for user in policy.attached_users.all():
            self.client.detach_user_policy(UserName=user.name, PolicyArn=arn)

        # Non-default versions
        resp = self.client.list_policy_versions(PolicyArn=arn)
        for version in resp['Versions']:
            if not version['IsDefaultVersion']:
                self.client.delete_policy_version(
                    PolicyArn=arn, VersionId=version['VersionId'])

        # The policy itself
        self.client.delete_policy(PolicyArn=arn)
Esempio n. 4
0
def freshen_lambda(bosslet_config, lambda_name):
    """
    Tell a lambda to reload its code from S3.  

    Useful when developing and small changes need to be made to a lambda function, 
    but a full rebuild of the entire zip file isn't required.
    """
    lambda_dir = lambda_dirs(bosslet_config)[lambda_name]
    lambda_config = load_lambda_config(lambda_dir)

    zip_name = code_zip(bosslet_config, lambda_config)

    client = bosslet_config.session.client('lambda')
    resp = client.update_function_code(
        FunctionName=lambda_name,
        S3Bucket=bosslet_config.LAMBDA_BUCKET,
        S3Key=zip_name,
        Publish=True)
    console.info("Updated {} function code".format(lambda_name))

    if lambda_config.get('layers'):
        layer_arns = get_layer_arns(bosslet_config, lambda_config['layers'])
        resp = client.update_function_configuration(FunctionName=full_name,
                                                    Layers=layer_arns)
        console.info("Updated {} layer references".format(lambda_name))
Esempio n. 5
0
 def create(self):
     console.info("Creating {} SNS topic".format(self.topic))
     session = self.bosslet_config.session
     arn = aws.sns_create_topic(session, self.topic)
     if arn == None:
         console.fail("Could not create {} SNS toppic".format(self.topic))
         return False
     return True
Esempio n. 6
0
    def get_thresholds(self):
        try:
            thresholds = eval(self.bosslet_config.BILLING_THRESHOLDS)
            console.info("Creating {} billing alarms".format(len(thresholds)))
        except AttributeError: # Assume BILLING_THRESHOLDS is not provided
            console.error("Bosslet value 'BILLING_THRESHOLDS' needs to be defined before creating alarms")
            thresholds = None

        return thresholds
Esempio n. 7
0
    def update_aws(self, resource_type, current, desired):
        """Compare loaded data against the current data in IAM and create or
        update IAM to reflect the loaded data

        Args:
            resource_type (str): One of - groups, roles, policies
            current (list[object]): List of objects loaded from IAM
            desired (list[object]): List of objects loaded from disk
        """
        key = {
            'groups': 'GroupName',
            'roles': 'RoleName',
            'policies': 'PolicyName',
        }[resource_type]

        lookup = {resource[key]: resource for resource in current}

        for resource in desired:
            resource_ = lookup.get(resource[key])
            if resource_ is None:  # Doesn't exist currently, create
                console.info("Creating {} {}".format(key[:-4], resource[key]))

                try:
                    if resource_type == 'groups':
                        self.group_create(resource)
                    elif resource_type == 'roles':
                        self.role_create(resource)
                    elif resource_type == 'policies':
                        self.policy_create(resource)
                except ClientError as ex:
                    if ex.response['Error']['Code'] == 'EntityAlreadyExists':
                        console.error(
                            "{} {} already exists cannot load again.".format(
                                key, resource[key]))
                    else:
                        console.error("Problem creating {}: {}".format(
                            resource_type, resource[key]))
                        console.error("\tDetails: {}".format(str(ex)))
            else:  # Currently exists, compare and update
                console.info("Updating {} {}".format(key[:-4], resource[key]))

                if resource['Path'] != resource_['Path']:
                    console.warning(
                        "Paths differ for {} {}: '{}' != '{}'".format(
                            key, resource[key], resource['Path'],
                            resource_['Path']))
                    console.info(
                        "You will need to manually delete the old resource for the Path to be changed"
                    )
                    continue

                if resource_type == 'groups':
                    self.group_update(resource, resource_)
                elif resource_type == 'roles':
                    self.role_update(resource, resource_)
                elif resource_type == 'policies':
                    self.policy_update(resource, resource_)
Esempio n. 8
0
    def group_remove(self, resource):
        """Remove the referenced IAM Group

        Args:
            resource (str): Name of the IAM Group to remove
        """
        group = self.resource.Group(resource)
        try:
            group.load()
        except self.client.exceptions.NoSuchEntityException:
            console.info("Group {} doesn't exist".format(resource))
            return

        # Attached resources
        for policy in group.attached_policies.all():
            self.client.detach_group_policy(GroupName=resource,
                                            PolicyArn=policy.arn)

        for policy in group.policies.all():
            self.client.delete_group_policy(GroupName=resource,
                                            PolicyName=policy.name)

        # The role itself
        self.client.delete_group(GroupName=resource)
Esempio n. 9
0
def build_dependency_graph(action, bosslet_config, modules):
    """
    Given the list of bossDB modules (CloudFormation Stacks) to be installed and the action (Ex: create, update, delete)
    figure, create a dependency graph and then order the modules in the correct order (It reverses the order for deletes)
    Args:
        action(str):
        bosslet_config(BossConfiguration): bosslet_config based on a configuration file like hiderrt.boss or bossdb.boss
        modules(list[(str,module)]: List of tuples (cloudformation stack, module)

    Returns:

    """
    class Node(object):
        """Directed Dependency Graph Node"""
        def __init__(self, name):
            self.name = name
            self.edges = []

        def __repr__(self):
            return "<Node: {}>".format(self.name)

        def depends_on(self, node):
            self.edges.append(node)

    def resolve(node, resolved, seen=None):
        """From a root node, add all sub elements and then the root"""
        if seen is None:
            seen = []
        seen.append(node)
        for edge in node.edges:
            if edge not in resolved:
                if edge in seen:
                    raise exceptions.CircularDependencyError(node.name, edge.name)
                resolve(edge, resolved, seen)
        resolved.append(node)

    nums = {} # Mapping of config to index in modules list
    nodes = {} # Mapping of config to Node
    no_deps = [] # List of configs that are not the target of a dependency
                 # meaning that they are the root of a dependency tree
    # Populate variables
    for i in range(len(modules)):
        config = modules[i][0]
        nums[config] = i
        nodes[config] = Node(config)
        no_deps.append(config)

    # lookup the existing stacks to so we can verify that all dependencies will
    # be satisfied (by either existing or being launched)
    existing = { k : v['StackStatus']
                 for k, v in aws.get_existing_stacks(bosslet_config).items() }

    stop = False
    for name, status in existing.items():
        if status.endswith('_IN_PROGRESS'):
            console.warning("Config '{}' is in progress".format(name))
            stop = True
        elif status.endswith('_FAILED'):
            if name not in nodes:
                console.fail("Config '{}' is failed and should be acted upon".format(name))
                stop = True
            else:
                if action == 'delete':
                    console.info("Config '{}' is failed, deleting".format(name))
                elif status == 'UPDATE_ROLLBACK_FAILED':
                    console.fail("Config '{}' needs to be manually resolved in the AWS console".format(name))
                    stop = True
                else: # CREATE, DELETE, or ROLLBACK FAILED
                    console.fail("Config '{}' is failed, needs to be deleted".format(name))
                    stop = True
    if stop:
        raise exceptions.BossManageError('Problems with existing stacks')

    # Create dependency graph and locate root nodes
    for config, module in modules:
        deps = module.__dict__.get('DEPENDENCIES')
        if deps is None:
            continue
        if type(deps) == str:
            deps = [deps]

        for dep in deps:
            if dep not in nodes:
                # dependency not part of configs to launch
                if dep not in existing and action == "create":
                    raise exceptions.MissingDependencyError(config, dep)
            else:
                # If action is update, post-init, pre-init, verify that
                # the config is already existing
                if action not in ('create', 'delete') and dep not in existing:
                    raise exceptions.MissingDependencyError(config, dep)

                nodes[config].depends_on(nodes[dep])

                try:
                    no_deps.remove(dep)
                except:
                    pass # Doesn't exist in no_deps list

    # Resolve dependency graph
    resolved = []
    for no_dep in no_deps: # Don't have any dependencies
        resolve(nodes[no_dep], resolved)

    # Reorder input
    reordered = [ modules[nums[node.name]] for node in resolved ]

    # Extra check
    if len(reordered) != len(modules):
        raise exceptions.CircularDependencyError()

    # Removed configs that don't need to be created
    if action == "create":
        for config, module in reordered[:]:
            if config in existing:
                # If the config already exists, don't try to create it again
                console.debug('Not executing create on {} as it already exists'.format(config))
                reordered.remove((config, module))

    # Remove configs that don't need to be deleted / updated / etc
    else:
        for config, module in reordered[:]:
            if config not in existing:
                # If the config doesn't exist, don't try to delete it again
                # If the config doesn't exist, don't try to update it
                console.debug("Not executing {} on {} as it doesn't exist".format(action, config))
                reordered.remove((config, module))

        if action == "delete": # Delete in reverse order
            reordered.reverse()

    # Make sure that the target configs are not currently being processed
    for config, module in reordered:
        if config in existing and existing[config].endswith("_IN_PROGRESS"):
            raise exceptions.DependencyInProgressError(config)

    return reordered
Esempio n. 10
0
    def role_update(self, resource, resource_):
        """Compare and potentially update the referenced IAM Role

        Args:
            resource (object): Desired IAM Role definition
            resource_ (object): Current IAM Role definition
        """
        policy = json_dumps(resource['AssumeRolePolicyDocument'])
        policy_ = json_dumps(resource_['AssumeRolePolicyDocument'])
        if policy != policy_:
            console.warning('Role policy document differs')
            self.iw.update_assume_role_policy(
                resource['RoleName'], resource['AssumeRolePolicyDocument'])

        lookup = {
            policy['PolicyName']: policy['PolicyDocument']
            for policy in resource_['RolePolicyList']
        }
        for policy in resource['RolePolicyList']:
            policy_ = lookup.get(policy['PolicyName'])
            if policy_ is None:
                self.iw.put_role_policy(resource['RoleName'],
                                        policy['PolicyName'],
                                        policy['PolicyDocument'])
            else:
                del lookup[policy['PolicyName']]
                document = json_dumps(policy['PolicyDocument'])
                document_ = json_dumps(policy_)
                if document != document_:
                    self.iw.put_role_policy(resource['RoleName'],
                                            policy['PolicyName'],
                                            policy['PolicyDocument'])

        for policy in lookup.keys():
            # AWS has a policy that is not in the desired version, it should be deleted
            self.iw.delete_role_policy(resource['RoleName'], policy)

        for arn in resource['AttachedManagedPolicies']:
            if arn not in resource_['AttachedManagedPolicies']:
                self.iw.attach_role_policy(resource["RoleName"], arn)

        for arn in resource_['AttachedManagedPolicies']:
            if arn not in resource['AttachedManagedPolicies']:
                # AWS has a managed policy that is not in the desired version, it should be deleted.
                self.iw.detach_role_policy(resource["RoleName"], arn)

        lookup = {
            profile['InstanceProfileName']: profile
            for profile in resource_['InstanceProfileList']
        }
        for profile in resource['InstanceProfileList']:
            profile_ = lookup.get(profile['InstanceProfileName'])
            if profile_ is None:
                self.iw.create_instance_profile(profile['InstanceProfileName'],
                                                profile['Path'])
                self.iw.add_role_to_instance_profile(
                    resource['RoleName'], profile['InstanceProfileName'])
            else:
                del lookup[profile['InstanceProfileName']]
                if profile['Path'] != profile_['Path']:
                    console.warning(
                        "Paths differ for {} Instance Profile {}: '{}' != '{}'"
                        .format(resource['RoleName'],
                                profile['InstanceProfileName'],
                                profile['Path'], profile_['Path']))
                    console.info(
                        'You will need to manually delete the old instance profile for the Path to be changed'
                    )

        for profile in lookup.keys():
            # AWS has an instance profile that is not in the desired version, it should be deleted
            self.iw.remove_role_from_instance_profile(resource['RoleName'],
                                                      profile)
            self.iw.delete_instance_profile(profile)
Esempio n. 11
0
def load_lambdas_on_s3(bosslet_config, lambda_name = None, lambda_dir = None):
    """Package up the lambda files and send them through the lambda build process
    where the lambda code zip is produced and uploaded to S3

    NOTE: This function is also used to build lambda layer code zips, the only requirement
          for a layer is that the files in the resulting zip should be in the correct
          subdirectory (`python/` for Python libraries) so that when a lambda uses the
          layer the libraries included in the layer can be correctly loaded

    NOTE: If lambda_name and lambda_dir are both None then lambda_dir is set to
          'multi_lambda' for backwards compatibility

    Args:
        bosslet_config (BossConfiguration): Configuration object of the stack the
                                            lambda will be deployed into
        lambda_name (str): Name of the lambda, which will be mapped to the name of the
                           lambda directory that contains the lambda's code
        lambda_dir (str): Name of the directory in `cloud_formation/lambda/` that
                          contains the `lambda.yml` configuration file for the lambda

    Raises:
        BossManageError: If there was a problem with building the lambda code zip or
                         uploading it to the given S3 bucket
    """
    # For backwards compatibility build the multi_lambda code zip
    if lambda_name is None and lambda_dir is None:
        lambda_dir = 'multi_lambda'

    # Map from lambda_name to lambda_dir if needed
    if lambda_dir is None:
        try:
            lambda_dir = lambda_dirs(bosslet_config)[lambda_name]
        except KeyError:
            console.error("Cannot build a lambda that doesn't use a code zip file")
            return None

    # To prevent rubuilding a lambda code zip multiple times during an individual execution memorize what has been built
    if lambda_dir in BUILT_ZIPS:
        console.debug('Lambda code {} has already be build recently, skipping...'.format(lambda_dir))
        return
    BUILT_ZIPS.append(lambda_dir)

    lambda_dir = pathlib.Path(const.repo_path('cloud_formation', 'lambda', lambda_dir))
    lambda_config = lambda_dir / 'lambda.yml'
    with lambda_config.open() as fh:
        lambda_config = yaml.full_load(fh.read())

    if lambda_config.get('layers'):
        for layer in lambda_config['layers']:
            # Layer names should end with `layer`
            if not layer.endswith('layer'):
                console.warning("Layer '{}' doesn't conform to naming conventions".format(layer))

            load_lambdas_on_s3(bosslet_config, lambda_dir=layer)

    console.debug("Building {} lambda code zip".format(lambda_dir))

    domain = bosslet_config.INTERNAL_DOMAIN
    tempname = tempfile.NamedTemporaryFile(delete=True)
    zipname = pathlib.Path(tempname.name + '.zip')
    tempname.close()
    console.debug('Using temp zip file: {}'.format(zipname))

    cwd = os.getcwd()

    # Copy the lambda files into the zip
    for filename in lambda_dir.glob('*'):
        zip.write_to_zip(str(filename), zipname, arcname=filename.name)

    # Copy the other files that should be included
    if lambda_config.get('include'):
        for src in lambda_config['include']:
            dst = lambda_config['include'][src]
            src_path, src_file = src.rsplit('/', 1)

            os.chdir(const.repo_path(src_path))

            # Generate dynamic configuration files, as needed
            if src_file == 'ndingest.git':
                with open(NDINGEST_SETTINGS_TEMPLATE, 'r') as tmpl:
                    # Generate settings.ini file for ndingest.
                    create_ndingest_settings(bosslet_config, tmpl)

            zip.write_to_zip(src_file, zipname, arcname=dst)
            os.chdir(cwd)

    # Currently any Docker CLI compatible container setup can be used (like podman)
    CONTAINER_CMD = '{EXECUTABLE} run --rm -it --env AWS_* --volume {HOST_DIR}:/var/task/ lambci/lambda:build-{RUNTIME} {CMD}'

    BUILD_CMD = 'python3 {PREFIX}/build_lambda.py {DOMAIN} {BUCKET}'
    BUILD_ARGS = {
        'DOMAIN': domain,
        'BUCKET': bosslet_config.LAMBDA_BUCKET,
    }

    # DP NOTE: not sure if this should be in the bosslet_config, as it is more about the local dev
    #          environment instead of the stack's environment. Different maintainer may have different
    #          container commands installed.
    container_executable = os.environ.get('LAMBDA_BUILD_CONTAINER')
    lambda_build_server = bosslet_config.LAMBDA_SERVER
    if lambda_build_server is None:
        staging_target = pathlib.Path(const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files', 'staging'))
        if not staging_target.exists():
            staging_target.mkdir()

        console.debug("Copying build zip to {}".format(staging_target))
        staging_zip = staging_target / (domain + '.zip')
        try:
            zipname.rename(staging_zip)
        except OSError:
            # rename only works within the same filesystem
            # Using the shell version, as using copy +  chmod doesn't always work depending on the filesystem
            utils.run('mv {} {}'.format(zipname, staging_zip), shell=True)

        # Provide the AWS Region and Credentials (for S3 upload) via environmental variables
        env_extras = { 'AWS_REGION': bosslet_config.REGION,
                       'AWS_DEFAULT_REGION': bosslet_config.REGION }

        if container_executable is None:
            BUILD_ARGS['PREFIX'] = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files')
            CMD = BUILD_CMD.format(**BUILD_ARGS)

            if bosslet_config.PROFILE is not None:
                env_extras['AWS_PROFILE'] = bosslet_config.PROFILE

            console.info("calling build lambda on localhost")
        else:
            BUILD_ARGS['PREFIX'] = '/var/task'
            CMD = BUILD_CMD.format(**BUILD_ARGS)
            CMD = CONTAINER_CMD.format(EXECUTABLE = container_executable,
                                       HOST_DIR = const.repo_path('salt_stack', 'salt', 'lambda-dev', 'files'),
                                       RUNTIME = lambda_config['runtime'],
                                       CMD = CMD)

            if bosslet_config.PROFILE is not None:
                # Cannot set the profile as the container will not have the credentials file
                # So extract the underlying keys and provide those instead
                creds = bosslet_config.session.get_credentials()
                env_extras['AWS_ACCESS_KEY_ID'] = creds.access_key
                env_extras['AWS_SECRET_ACCESS_KEY'] = creds.secret_key

            console.info("calling build lambda in {}".format(container_executable))

        try:
            utils.run(CMD, env_extras=env_extras)
        except Exception as ex:
            raise BossManageError("Problem building {} lambda code zip: {}".format(lambda_dir, ex))
        finally:
            os.remove(staging_zip)

    else:
        BUILD_ARGS['PREFIX'] = '~'
        CMD = BUILD_CMD.format(**BUILD_ARGS)

        lambda_build_server_key = bosslet_config.LAMBDA_SERVER_KEY
        lambda_build_server_key = utils.keypair_to_file(lambda_build_server_key)
        ssh_target = SSHTarget(lambda_build_server_key, lambda_build_server, 22, 'ec2-user')
        bastions = [bosslet_config.outbound_bastion] if bosslet_config.outbound_bastion else []
        ssh = SSHConnection(ssh_target, bastions)

        console.debug("Copying build zip to lambda-build-server")
        target_file = '~/staging/{}.zip'.format(domain)
        ret = ssh.scp(zipname, target_file, upload=True)
        console.debug("scp return code: " + str(ret))

        os.remove(zipname)

        console.info("calling build lambda on lambda-build-server")
        ret = ssh.cmd(CMD)
        if ret != 0:
            raise BossManageError("Problem building {} lambda code zip: Return code: {}".format(lambda_dir, ret))
Esempio n. 12
0
 def list(self):
     console.info("Subscriptions for {}".format(self.topic))
     subs = self.client.list_subscriptions_by_topic(TopicArn = self.arn)['Subscriptions']
     for sub in subs:
         console.info("    {}".format(sub['Endpoint']))
Esempio n. 13
0
            for res in resp['Reservations']:
                for inst in res['Instances']:
                    name = get_name(inst.get('Tags', []))
                    state = inst['State']['Name']
                    if name is None:
                        unlabled.append(inst)
                    elif is_excluded(name):
                        pass
                    elif state == 'stopped':
                        stopped[name] = inst
                    elif state == 'running':
                        running[name] = inst
            kwargs['NextToken'] = resp.get('NextToken')

        if len(running) == 0:
            console.info("No unexpected running EC2 instances exist")

        if not args.quiet:
            for inst in unlabled:
                console.warning('Instance {} is not labeled'.format(
                    inst['InstanceId']))
            for name, inst in items(stopped):
                console.warning('Instance {} ({}) is stopped'.format(
                    inst['InstanceId'], name))
        if args.delete:
            for name, inst in items(running):
                console.debug('Deleting EC2 instance {} ({})'.format(
                    inst['InstanceId'], name))
            if console.confirm('Are you sure', default=False):
                for inst in running.values():
                    client.terminate_instances(