def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(state=dict(required=True, choices=['present', 'absent']),
             arn=dict(required=False, type='str'),
             family=dict(required=False, type='str'),
             revision=dict(required=False, type='int'),
             force_create=dict(required=False, default=False, type='bool'),
             containers=dict(required=False, type='list'),
             network_mode=dict(required=False,
                               default='bridge',
                               choices=['bridge', 'host', 'none', 'awsvpc'],
                               type='str'),
             task_role_arn=dict(required=False, default='', type='str'),
             execution_role_arn=dict(required=False, default='', type='str'),
             volumes=dict(required=False, type='list'),
             launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
             cpu=dict(),
             memory=dict(required=False, type='str')))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('launch_type', 'FARGATE',
                                            ['cpu', 'memory'])])

    if not HAS_BOTO3:
        module.fail_json(msg='boto3 is required.')

    task_to_describe = None
    task_mgr = EcsTaskManager(module)
    results = dict(changed=False)

    if module.params['launch_type']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.4 or higher to use launch_type'
            )

    if module.params['execution_role_arn']:
        if not module.botocore_at_least('1.10.44'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.10.44 or higher to use execution_role_arn'
            )

    if module.params['containers']:
        for container in module.params['containers']:
            for environment in container.get('environment', []):
                environment['value'] = to_text(environment['value'])

    if module.params['state'] == 'present':
        if 'containers' not in module.params or not module.params['containers']:
            module.fail_json(
                msg=
                "To use task definitions, a list of containers must be specified"
            )

        if 'family' not in module.params or not module.params['family']:
            module.fail_json(
                msg="To use task definitions, a family must be specified")

        network_mode = module.params['network_mode']
        launch_type = module.params['launch_type']
        if launch_type == 'FARGATE' and network_mode != 'awsvpc':
            module.fail_json(
                msg="To use FARGATE launch type, network_mode must be awsvpc")

        family = module.params['family']
        existing_definitions_in_family = task_mgr.describe_task_definitions(
            module.params['family'])

        if 'revision' in module.params and module.params['revision']:
            # The definition specifies revision. We must guarantee that an active revision of that number will result from this.
            revision = int(module.params['revision'])

            # A revision has been explicitly specified. Attempt to locate a matching revision
            tasks_defs_for_revision = [
                td for td in existing_definitions_in_family
                if td['revision'] == revision
            ]
            existing = tasks_defs_for_revision[0] if len(
                tasks_defs_for_revision) > 0 else None

            if existing and existing['status'] != "ACTIVE":
                # We cannot reactivate an inactive revision
                module.fail_json(
                    msg=
                    "A task in family '%s' already exists for revision %d, but it is inactive"
                    % (family, revision))
            elif not existing:
                if not existing_definitions_in_family and revision != 1:
                    module.fail_json(
                        msg=
                        "You have specified a revision of %d but a created revision would be 1"
                        % revision)
                elif existing_definitions_in_family and existing_definitions_in_family[
                        -1]['revision'] + 1 != revision:
                    module.fail_json(
                        msg=
                        "You have specified a revision of %d but a created revision would be %d"
                        % (revision,
                           existing_definitions_in_family[-1]['revision'] + 1))
        else:
            existing = None

            def _right_has_values_of_left(left, right):
                # Make sure the values are equivalent for everything left has
                for k, v in left.items():
                    if not ((not v and (k not in right or not right[k])) or
                            (k in right and v == right[k])):
                        # We don't care about list ordering because ECS can change things
                        if isinstance(v, list) and k in right:
                            left_list = v
                            right_list = right[k] or []

                            if len(left_list) != len(right_list):
                                return False

                            for list_val in left_list:
                                if list_val not in right_list:
                                    return False
                        else:
                            return False

                # Make sure right doesn't have anything that left doesn't
                for k, v in right.items():
                    if v and k not in left:
                        return False

                return True

            def _task_definition_matches(requested_volumes,
                                         requested_containers,
                                         requested_task_role_arn,
                                         existing_task_definition):
                if td['status'] != "ACTIVE":
                    return None

                if requested_task_role_arn != td.get('taskRoleArn', ""):
                    return None

                existing_volumes = td.get('volumes', []) or []

                if len(requested_volumes) != len(existing_volumes):
                    # Nope.
                    return None

                if len(requested_volumes) > 0:
                    for requested_vol in requested_volumes:
                        found = False

                        for actual_vol in existing_volumes:
                            if _right_has_values_of_left(
                                    requested_vol, actual_vol):
                                found = True
                                break

                        if not found:
                            return None

                existing_containers = td.get('containerDefinitions', []) or []

                if len(requested_containers) != len(existing_containers):
                    # Nope.
                    return None

                for requested_container in requested_containers:
                    found = False

                    for actual_container in existing_containers:
                        if _right_has_values_of_left(requested_container,
                                                     actual_container):
                            found = True
                            break

                    if not found:
                        return None

                return existing_task_definition

            # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
            for td in existing_definitions_in_family:
                requested_volumes = module.params['volumes'] or []
                requested_containers = module.params['containers'] or []
                requested_task_role_arn = module.params['task_role_arn']
                existing = _task_definition_matches(requested_volumes,
                                                    requested_containers,
                                                    requested_task_role_arn,
                                                    td)

                if existing:
                    break

        if existing and not module.params.get('force_create'):
            # Awesome. Have an existing one. Nothing to do.
            results['taskdefinition'] = existing
        else:
            if not module.check_mode:
                # Doesn't exist. create it.
                volumes = module.params.get('volumes', []) or []
                results['taskdefinition'] = task_mgr.register_task(
                    module.params['family'], module.params['task_role_arn'],
                    module.params['execution_role_arn'],
                    module.params['network_mode'], module.params['containers'],
                    volumes, module.params['launch_type'],
                    module.params['cpu'], module.params['memory'])
            results['changed'] = True

    elif module.params['state'] == 'absent':
        # When de-registering a task definition, we can specify the ARN OR the family and revision.
        if module.params['state'] == 'absent':
            if 'arn' in module.params and module.params['arn'] is not None:
                task_to_describe = module.params['arn']
            elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
                    module.params['revision'] is not None:
                task_to_describe = module.params['family'] + ":" + str(
                    module.params['revision'])
            else:
                module.fail_json(
                    msg=
                    "To use task definitions, an arn or family and revision must be specified"
                )

        existing = task_mgr.describe_task(task_to_describe)

        if not existing:
            pass
        else:
            # It exists, so we should delete it and mark changed. Return info about the task definition deleted
            results['taskdefinition'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    task_mgr.deregister_task(task_to_describe)
                results['changed'] = True

    module.exit_json(**results)
Beispiel #2
0
def main():
    argument_spec = dict(state=dict(required=True,
                                    choices=['present', 'absent', 'deleting']),
                         name=dict(required=True, type='str'),
                         cluster=dict(required=False, type='str'),
                         task_definition=dict(required=False, type='str'),
                         load_balancers=dict(required=False,
                                             default=[],
                                             type='list'),
                         desired_count=dict(required=False, type='int'),
                         client_token=dict(required=False,
                                           default='',
                                           type='str'),
                         role=dict(required=False, default='', type='str'),
                         delay=dict(required=False, type='int', default=10),
                         repeat=dict(required=False, type='int', default=10),
                         force_new_deployment=dict(required=False,
                                                   default=False,
                                                   type='bool'),
                         deployment_configuration=dict(required=False,
                                                       default={},
                                                       type='dict'),
                         placement_constraints=dict(required=False,
                                                    default=[],
                                                    type='list'),
                         placement_strategy=dict(required=False,
                                                 default=[],
                                                 type='list'),
                         health_check_grace_period_seconds=dict(required=False,
                                                                type='int'),
                         network_configuration=dict(
                             required=False,
                             type='dict',
                             options=dict(subnets=dict(type='list'),
                                          security_groups=dict(type='list'),
                                          assign_public_ip=dict(type='bool'))),
                         launch_type=dict(required=False,
                                          choices=['EC2', 'FARGATE']),
                         service_registries=dict(required=False,
                                                 type='list',
                                                 default=[]),
                         scheduling_strategy=dict(
                             required=False, choices=['DAEMON', 'REPLICA']))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('state', 'present',
                                            ['task_definition']),
                                           ('launch_type', 'FARGATE',
                                            ['network_configuration'])],
                              required_together=[['load_balancers', 'role']])

    if module.params['state'] == 'present' and module.params[
            'scheduling_strategy'] == 'REPLICA':
        if module.params['desired_count'] is None:
            module.fail_json(
                msg=
                'state is present, scheduling_strategy is REPLICA; missing desired_count'
            )

    service_mgr = EcsServiceManager(module)
    if module.params['network_configuration']:
        if not service_mgr.ecs_api_handles_network_configuration():
            module.fail_json(
                msg=
                'botocore needs to be version 1.7.44 or higher to use network configuration'
            )
        network_configuration = service_mgr.format_network_configuration(
            module.params['network_configuration'])
    else:
        network_configuration = None

    deployment_configuration = map_complex_type(
        module.params['deployment_configuration'],
        DEPLOYMENT_CONFIGURATION_TYPE_MAP)

    deploymentConfiguration = snake_dict_to_camel_dict(
        deployment_configuration)
    serviceRegistries = list(
        map(snake_dict_to_camel_dict, module.params['service_registries']))

    try:
        existing = service_mgr.describe_service(module.params['cluster'],
                                                module.params['name'])
    except Exception as e:
        module.fail_json(msg="Exception describing service '" +
                         module.params['name'] + "' in cluster '" +
                         module.params['cluster'] + "': " + str(e))

    results = dict(changed=False)

    if module.params['launch_type']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.4 or higher to use launch_type'
            )
    if module.params['force_new_deployment']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.4 or higher to use force_new_deployment'
            )
    if module.params['health_check_grace_period_seconds']:
        if not module.botocore_at_least('1.8.20'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds'
            )

    if module.params['state'] == 'present':

        matching = False
        update = False

        if existing and 'status' in existing and existing['status'] == "ACTIVE":
            if module.params['force_new_deployment']:
                update = True
            elif service_mgr.is_matching_service(module.params, existing):
                matching = True
                results['service'] = existing
            else:
                update = True

        if not matching:
            if not module.check_mode:

                role = module.params['role']
                clientToken = module.params['client_token']

                loadBalancers = []
                for loadBalancer in module.params['load_balancers']:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])
                    loadBalancers.append(loadBalancer)

                for loadBalancer in loadBalancers:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])

                if update:
                    # check various parameters and boto versions and give a helpful error in boto is not new enough for feature

                    if module.params['scheduling_strategy']:
                        if not module.botocore_at_least('1.10.37'):
                            module.fail_json(
                                msg=
                                'botocore needs to be version 1.10.37 or higher to use scheduling_strategy'
                            )
                        elif (existing['schedulingStrategy']
                              ) != module.params['scheduling_strategy']:
                            module.fail_json(
                                msg=
                                "It is not possible to update the scheduling strategy of an existing service"
                            )

                    if module.params['service_registries']:
                        if not module.botocore_at_least('1.9.15'):
                            module.fail_json(
                                msg=
                                'botocore needs to be version 1.9.15 or higher to use service_registries'
                            )
                        elif (existing['serviceRegistries']
                              or []) != serviceRegistries:
                            module.fail_json(
                                msg=
                                "It is not possible to update the service registries of an existing service"
                            )

                    if (existing['loadBalancers'] or []) != loadBalancers:
                        module.fail_json(
                            msg=
                            "It is not possible to update the load balancers of an existing service"
                        )

                    # update required
                    response = service_mgr.update_service(
                        module.params['name'], module.params['cluster'],
                        module.params['task_definition'],
                        module.params['desired_count'],
                        deploymentConfiguration, network_configuration,
                        module.params['health_check_grace_period_seconds'],
                        module.params['force_new_deployment'])

                else:
                    try:
                        response = service_mgr.create_service(
                            module.params['name'], module.params['cluster'],
                            module.params['task_definition'], loadBalancers,
                            module.params['desired_count'], clientToken, role,
                            deploymentConfiguration,
                            module.params['placement_constraints'],
                            module.params['placement_strategy'],
                            module.params['health_check_grace_period_seconds'],
                            network_configuration, serviceRegistries,
                            module.params['launch_type'],
                            module.params['scheduling_strategy'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't create service")

                results['service'] = response

            results['changed'] = True

    elif module.params['state'] == 'absent':
        if not existing:
            pass
        else:
            # it exists, so we should delete it and mark changed.
            # return info about the cluster deleted
            del existing['deployments']
            del existing['events']
            results['ansible_facts'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    try:
                        service_mgr.delete_service(module.params['name'],
                                                   module.params['cluster'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't delete service")
                results['changed'] = True

    elif module.params['state'] == 'deleting':
        if not existing:
            module.fail_json(msg="Service '" + module.params['name'] +
                             " not found.")
            return
        # it exists, so we should delete it and mark changed.
        # return info about the cluster deleted
        delay = module.params['delay']
        repeat = module.params['repeat']
        time.sleep(delay)
        for i in range(repeat):
            existing = service_mgr.describe_service(module.params['cluster'],
                                                    module.params['name'])
            status = existing['status']
            if status == "INACTIVE":
                results['changed'] = True
                break
            time.sleep(delay)
        if i is repeat - 1:
            module.fail_json(msg="Service still not deleted after " +
                             str(repeat) + " tries of " + str(delay) +
                             " seconds each.")
            return

    module.exit_json(**results)
def main():
    argument_spec = dict(
        name=dict(required=True),
        description=dict(),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=900),
        state=dict(default='present', choices=['present', 'absent']),
        purge_stacks=dict(type='bool', default=True),
        parameters=dict(type='dict', default={}),
        template=dict(type='path'),
        template_url=dict(),
        template_body=dict(),
        capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
        regions=dict(type='list'),
        accounts=dict(type='list'),
        failure_tolerance=dict(
            type='dict',
            default={},
            options=dict(
                fail_count=dict(type='int'),
                fail_percentage=dict(type='int'),
                parallel_percentage=dict(type='int'),
                parallel_count=dict(type='int'),
            ),
            mutually_exclusive=[
                ['fail_count', 'fail_percentage'],
                ['parallel_count', 'parallel_percentage'],
            ],
        ),
        administration_role_arn=dict(aliases=['admin_role_arn', 'administration_role', 'admin_role']),
        execution_role_name=dict(aliases=['execution_role', 'exec_role', 'exec_role_name']),
        tags=dict(type='dict'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_url', 'template', 'template_body']],
        supports_check_mode=True
    )
    if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')):
        module.fail_json(msg="Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26")

    # Wrap the cloudformation client methods that this module uses with
    # automatic backoff / retry for throttling error codes
    cfn = module.client('cloudformation', retry_decorator=AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30))
    existing_stack_set = stack_set_facts(cfn, module.params['name'])

    operation_uuid = to_native(uuid.uuid4())
    operation_ids = []
    # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
    stack_params = {}
    state = module.params['state']
    if state == 'present' and not module.params['accounts']:
        module.fail_json(
            msg="Can't create a stack set without choosing at least one account. "
                "To get the ID of the current account, use the aws_caller_info module."
        )

    module.params['accounts'] = [to_native(a) for a in module.params['accounts']]

    stack_params['StackSetName'] = module.params['name']
    if module.params.get('description'):
        stack_params['Description'] = module.params['description']

    if module.params.get('capabilities'):
        stack_params['Capabilities'] = module.params['capabilities']

    if module.params['template'] is not None:
        with open(module.params['template'], 'r') as tpl:
            stack_params['TemplateBody'] = tpl.read()
    elif module.params['template_body'] is not None:
        stack_params['TemplateBody'] = module.params['template_body']
    elif module.params['template_url'] is not None:
        stack_params['TemplateURL'] = module.params['template_url']
    else:
        # no template is provided, but if the stack set exists already, we can use the existing one.
        if existing_stack_set:
            stack_params['UsePreviousTemplate'] = True
        else:
            module.fail_json(
                msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
                    "`template_body`, or `template_url`".format(module.params['name'])
            )

    stack_params['Parameters'] = []
    for k, v in module.params['parameters'].items():
        if isinstance(v, dict):
            # set parameter based on a dict to allow additional CFN Parameter Attributes
            param = dict(ParameterKey=k)

            if 'value' in v:
                param['ParameterValue'] = to_native(v['value'])

            if 'use_previous_value' in v and bool(v['use_previous_value']):
                param['UsePreviousValue'] = True
                param.pop('ParameterValue', None)

            stack_params['Parameters'].append(param)
        else:
            # allow default k/v configuration to set a template parameter
            stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})

    if module.params.get('tags') and isinstance(module.params.get('tags'), dict):
        stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])

    if module.params.get('administration_role_arn'):
        # TODO loosen the semantics here to autodetect the account ID and build the ARN
        stack_params['AdministrationRoleARN'] = module.params['administration_role_arn']
    if module.params.get('execution_role_name'):
        stack_params['ExecutionRoleName'] = module.params['execution_role_name']

    result = {}

    if module.check_mode:
        if state == 'absent' and existing_stack_set:
            module.exit_json(changed=True, msg='Stack set would be deleted', meta=[])
        elif state == 'absent' and not existing_stack_set:
            module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[])
        elif state == 'present' and not existing_stack_set:
            module.exit_json(changed=True, msg='New stack set would be created', meta=[])
        elif state == 'present' and existing_stack_set:
            new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
                cfn,
                module.params['name'],
                module.params['accounts'],
                module.params['regions'],
            )
            if new_stacks:
                module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[])
            elif unspecified_stacks and module.params.get('purge_stack_instances'):
                module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[])
        else:
            # TODO: need to check the template and other settings for correct check mode
            module.exit_json(changed=False, msg='No changes detected', meta=[])

    changed = False
    if state == 'present':
        if not existing_stack_set:
            # on create this parameter has a different name, and cannot be referenced later in the job log
            stack_params['ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(operation_uuid)
            changed = True
            create_stack_set(module, stack_params, cfn)
        else:
            stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(operation_uuid)
            operation_ids.append(stack_params['OperationId'])
            if module.params.get('regions'):
                stack_params['OperationPreferences'] = get_operation_preferences(module)
            changed |= update_stack_set(module, stack_params, cfn)

        # now create/update any appropriate stack instances
        new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
            cfn,
            module.params['name'],
            module.params['accounts'],
            module.params['regions'],
        )
        if new_stack_instances:
            operation_ids.append('Ansible-StackInstance-Create-{0}'.format(operation_uuid))
            changed = True
            cfn.create_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(set(acct for acct, region in new_stack_instances)),
                Regions=list(set(region for acct, region in new_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        else:
            operation_ids.append('Ansible-StackInstance-Update-{0}'.format(operation_uuid))
            cfn.update_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(set(acct for acct, region in existing_stack_instances)),
                Regions=list(set(region for acct, region in existing_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        for op in operation_ids:
            await_stack_set_operation(
                module, cfn, operation_id=op,
                stack_set_name=module.params['name'],
                max_wait=module.params.get('wait_timeout'),
            )

    elif state == 'absent':
        if not existing_stack_set:
            module.exit_json(msg='Stack set {0} does not exist'.format(module.params['name']))
        if module.params.get('purge_stack_instances') is False:
            pass
        try:
            cfn.delete_stack_set(
                StackSetName=module.params['name'],
            )
            module.exit_json(msg='Stack set {0} deleted'.format(module.params['name']))
        except is_boto3_error_code('OperationInProgressException') as e:  # pylint: disable=duplicate-except
            module.fail_json_aws(e, msg='Cannot delete stack {0} while there is an operation in progress'.format(module.params['name']))
        except is_boto3_error_code('StackSetNotEmptyException'):  # pylint: disable=duplicate-except
            delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(operation_uuid)
            cfn.delete_stack_instances(
                StackSetName=module.params['name'],
                Accounts=module.params['accounts'],
                Regions=module.params['regions'],
                RetainStacks=(not module.params.get('purge_stacks')),
                OperationId=delete_instances_op
            )
            await_stack_set_operation(
                module, cfn, operation_id=delete_instances_op,
                stack_set_name=stack_params['StackSetName'],
                max_wait=module.params.get('wait_timeout'),
            )
            try:
                cfn.delete_stack_set(
                    StackSetName=module.params['name'],
                )
            except is_boto3_error_code('StackSetNotEmptyException') as exc:  # pylint: disable=duplicate-except
                # this time, it is likely that either the delete failed or there are more stacks.
                instances = cfn.list_stack_instances(
                    StackSetName=module.params['name'],
                )
                stack_states = ', '.join('(account={Account}, region={Region}, state={Status})'.format(**i) for i in instances['Summaries'])
                module.fail_json_aws(exc, msg='Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states)
            module.exit_json(changed=True, msg='Stack set {0} deleted'.format(module.params['name']))

    result.update(**describe_stack_tree(module, stack_params['StackSetName'], operation_ids=operation_ids))
    if any(o['status'] == 'FAILED' for o in result['operations']):
        module.fail_json(msg="One or more operations failed to execute", **result)
    module.exit_json(changed=changed, **result)
Beispiel #4
0
def main():
    argument_spec = dict(
        bucket=dict(required=True),
        dest=dict(default=None, type='path'),
        encrypt=dict(default=True, type='bool'),
        encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
        expiry=dict(default=600, type='int', aliases=['expiration']),
        headers=dict(type='dict'),
        marker=dict(default=""),
        max_keys=dict(default=1000, type='int'),
        metadata=dict(type='dict'),
        mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
        object=dict(),
        permission=dict(type='list', default=['private']),
        version=dict(default=None),
        overwrite=dict(aliases=['force'], default='always'),
        prefix=dict(default=""),
        retries=dict(aliases=['retry'], type='int', default=0),
        s3_url=dict(aliases=['S3_URL']),
        dualstack=dict(default='no', type='bool'),
        rgw=dict(default='no', type='bool'),
        src=dict(),
        ignore_nonexistent_bucket=dict(default=False, type='bool'),
        encryption_kms_key_id=dict()
    )
    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_if=[['mode', 'put', ['src', 'object']],
                     ['mode', 'get', ['dest', 'object']],
                     ['mode', 'getstr', ['object']],
                     ['mode', 'geturl', ['object']]],
    )

    bucket = module.params.get('bucket')
    encrypt = module.params.get('encrypt')
    expiry = module.params.get('expiry')
    dest = module.params.get('dest', '')
    headers = module.params.get('headers')
    marker = module.params.get('marker')
    max_keys = module.params.get('max_keys')
    metadata = module.params.get('metadata')
    mode = module.params.get('mode')
    obj = module.params.get('object')
    version = module.params.get('version')
    overwrite = module.params.get('overwrite')
    prefix = module.params.get('prefix')
    retries = module.params.get('retries')
    s3_url = module.params.get('s3_url')
    dualstack = module.params.get('dualstack')
    rgw = module.params.get('rgw')
    src = module.params.get('src')
    ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')

    object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
    bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]

    if overwrite not in ['always', 'never', 'different']:
        if module.boolean(overwrite):
            overwrite = 'always'
        else:
            overwrite = 'never'

    if overwrite == 'different' and not HAS_MD5:
        module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)

    if region in ('us-east-1', '', None):
        # default to US Standard region
        location = 'us-east-1'
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    if module.params.get('object'):
        obj = module.params['object']
        # If there is a top level object, do nothing - if the object starts with /
        # remove the leading character to maintain compatibility with Ansible versions < 2.4
        if obj.startswith('/'):
            obj = obj[1:]

    # Bucket deletion does not require obj.  Prevents ambiguity with delobj.
    if obj and mode == "delete":
        module.fail_json(msg='Parameter obj cannot be used with mode=delete')

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
        module.fail_json(msg='dualstack only applies to AWS S3')

    if dualstack and not module.botocore_at_least('1.4.45'):
        module.fail_json(msg='dualstack requires botocore >= 1.4.45')

    # rgw requires an explicit url
    if rgw and not s3_url:
        module.fail_json(msg='rgw flavour requires s3_url')

    # Look at s3_url and tweak connection settings
    # if connecting to RGW, Walrus or fakes3
    if s3_url:
        for key in ['validate_certs', 'security_token', 'profile_name']:
            aws_connect_kwargs.pop(key, None)
    s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)

    validate = not ignore_nonexistent_bucket

    # separate types of ACLs
    bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
    object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
    error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
    if error_acl:
        module.fail_json(msg='Unknown permission specified: %s' % error_acl)

    # First, we check to see if the bucket exists, we get "bucket" returned.
    bucketrtn = bucket_check(module, s3, bucket, validate=validate)

    if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
        module.fail_json(msg="Source bucket cannot be found.")

    if mode == 'get':
        keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
        if keyrtn is False:
            if version:
                module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
            else:
                module.fail_json(msg="Key %s does not exist." % obj)

        if path_check(dest) and overwrite != 'always':
            if overwrite == 'never':
                module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
            if etag_compare(module, dest, s3, bucket, obj, version=version):
                module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)

        try:
            download_s3file(module, s3, bucket, obj, dest, retries, version=version)
        except Sigv4Required:
            s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
            download_s3file(module, s3, bucket, obj, dest, retries, version=version)

    if mode == 'put':

        # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
        # these were separated into the variables bucket_acl and object_acl above

        if not path_check(src):
            module.fail_json(msg="Local object for PUT does not exist")

        if bucketrtn:
            keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
        else:
            # If the bucket doesn't exist we should create it.
            # only use valid bucket acls for create_bucket function
            module.params['permission'] = bucket_acl
            create_bucket(module, s3, bucket, location)

        if keyrtn and overwrite != 'always':
            if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
                # Return the download URL for the existing object
                get_download_url(module, s3, bucket, obj, expiry, changed=False)

        # only use valid object acls for the upload_s3file function
        module.params['permission'] = object_acl
        upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)

    # Delete an object from a bucket, not the entire bucket
    if mode == 'delobj':
        if obj is None:
            module.fail_json(msg="object parameter is required")
        if bucket:
            deletertn = delete_key(module, s3, bucket, obj)
            if deletertn is True:
                module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
        else:
            module.fail_json(msg="Bucket parameter is required.")

    # Delete an entire bucket, including all objects in the bucket
    if mode == 'delete':
        if bucket:
            deletertn = delete_bucket(module, s3, bucket)
            if deletertn is True:
                module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
        else:
            module.fail_json(msg="Bucket parameter is required.")

    # Support for listing a set of keys
    if mode == 'list':
        exists = bucket_check(module, s3, bucket)

        # If the bucket does not exist then bail out
        if not exists:
            module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)

        list_keys(module, s3, bucket, prefix, marker, max_keys)

    # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
    # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
    if mode == 'create':

        # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
        # these were separated above into the variables bucket_acl and object_acl

        if bucket and not obj:
            if bucketrtn:
                module.exit_json(msg="Bucket already exists.", changed=False)
            else:
                # only use valid bucket acls when creating the bucket
                module.params['permission'] = bucket_acl
                module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
        if bucket and obj:
            if obj.endswith('/'):
                dirobj = obj
            else:
                dirobj = obj + "/"
            if bucketrtn:
                if key_check(module, s3, bucket, dirobj):
                    module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
                else:
                    # setting valid object acls for the create_dirkey function
                    module.params['permission'] = object_acl
                    create_dirkey(module, s3, bucket, dirobj, encrypt)
            else:
                # only use valid bucket acls for the create_bucket function
                module.params['permission'] = bucket_acl
                created = create_bucket(module, s3, bucket, location)
                # only use valid object acls for the create_dirkey function
                module.params['permission'] = object_acl
                create_dirkey(module, s3, bucket, dirobj, encrypt)

    # Support for grabbing the time-expired URL for an object in S3/Walrus.
    if mode == 'geturl':
        if not bucket and not obj:
            module.fail_json(msg="Bucket and Object parameters must be set")

        keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
        if keyrtn:
            get_download_url(module, s3, bucket, obj, expiry)
        else:
            module.fail_json(msg="Key %s does not exist." % obj)

    if mode == 'getstr':
        if bucket and obj:
            keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
            if keyrtn:
                try:
                    download_s3str(module, s3, bucket, obj, version=version)
                except Sigv4Required:
                    s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
                    download_s3str(module, s3, bucket, obj, version=version)
            elif version is not None:
                module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
            else:
                module.fail_json(msg="Key %s does not exist." % obj)

    module.exit_json(failed=False)
Beispiel #5
0
def main():

    argument_spec = dict(
        name=dict(type='str', required=True),
        path=dict(type='str', default="/"),
        assume_role_policy_document=dict(type='json'),
        managed_policy=dict(type='list', aliases=['managed_policies']),
        max_session_duration=dict(type='int'),
        state=dict(type='str',
                   choices=['present', 'absent'],
                   default='present'),
        description=dict(type='str'),
        boundary=dict(type='str', aliases=['boundary_policy_arn']),
        create_instance_profile=dict(type='bool', default=True),
        delete_instance_profile=dict(type='bool', default=False),
        purge_policies=dict(type='bool', default=True),
        tags=dict(type='dict'),
        purge_tags=dict(type='bool', default=True),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=[('state', 'present',
                                            ['assume_role_policy_document'])],
                              supports_check_mode=True)

    if module.params.get('boundary'):
        if module.params.get('create_instance_profile'):
            module.fail_json(
                msg=
                "When using a boundary policy, `create_instance_profile` must be set to `false`."
            )
        if not module.params.get('boundary').startswith('arn:aws:iam'):
            module.fail_json(msg="Boundary policy must be an ARN")
    if module.params.get(
            'tags') is not None and not module.botocore_at_least('1.12.46'):
        module.fail_json(
            msg="When managing tags botocore must be at least v1.12.46. "
            "Current versions: boto3-{boto3_version} botocore-{botocore_version}"
            .format(**module._gather_versions()))
    if module.params.get(
            'boundary'
    ) is not None and not module.botocore_at_least('1.10.57'):
        module.fail_json(
            msg=
            "When using a boundary policy, botocore must be at least v1.10.57. "
            "Current versions: boto3-{boto3_version} botocore-{botocore_version}"
            .format(**module._gather_versions()))
    if module.params.get('max_session_duration'):
        max_session_duration = module.params.get('max_session_duration')
        if max_session_duration < 3600 or max_session_duration > 43200:
            module.fail_json(
                msg=
                "max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)"
            )
    if module.params.get('path'):
        path = module.params.get('path')
        if not path.endswith('/') or not path.startswith('/'):
            module.fail_json(msg="path must begin and end with /")

    connection = module.client('iam')

    state = module.params.get("state")

    if state == 'present':
        create_or_update_role(connection, module)
    else:
        destroy_role(connection, module)
def main():
    argument_spec = dict(
        stack_set_name=dict(required=True),
        description=dict(),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=900),
        state=dict(default='present', choices=['present', 'absent']),
        parameters=dict(type='dict', default={}),
        permission_model=dict(type='str',
                              choices=['SERVICE_MANAGED', 'SELF_MANAGED']),
        auto_deployment=dict(
            type=dict,
            default={},
            options=dict(enabled=dict(type='bool'),
                         retain_stacks_on_account_removal=dict(type='bool'))),
        template=dict(type='path'),
        template_url=dict(),
        template_body=dict(),
        capabilities=dict(type='list',
                          elements='str',
                          choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
        administration_role_arn=dict(
            aliases=['admin_role_arn', 'administration_role', 'admin_role']),
        execution_role_name=dict(
            aliases=['execution_role', 'exec_role', 'exec_role_name']),
        tags=dict(type='dict'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_url', 'template', 'template_body']],
        supports_check_mode=True)
    if not (module.boto3_at_least('1.14.0')
            and module.botocore_at_least('1.17.7')):
        module.fail_json(
            msg=
            "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26"
        )

    # Wrap the cloudformation client methods that this module uses with
    # automatic backoff / retry for throttling error codes
    jittered_backoff_decorator = AWSRetry.jittered_backoff(
        retries=10,
        delay=3,
        max_delay=30,
        catch_extra_error_codes=['StackSetNotFound'])
    cfn = module.client('cloudformation',
                        retry_decorator=jittered_backoff_decorator)
    existing_stack_set = stack_set_facts(cfn, module.params['stack_set_name'])

    operation_uuid = to_native(uuid.uuid4())
    operation_ids = []
    # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
    stack_params = {}
    state = module.params['state']
    stack_params['StackSetName'] = module.params['stack_set_name']
    if module.params.get('description'):
        stack_params['Description'] = module.params['description']

    if module.params.get('capabilities'):
        stack_params['Capabilities'] = module.params['capabilities']

    if module.params['template'] is not None:
        with open(module.params['template'], 'r') as tpl:
            stack_params['TemplateBody'] = tpl.read()
    elif module.params['template_body'] is not None:
        stack_params['TemplateBody'] = module.params['template_body']
    elif module.params['template_url'] is not None:
        stack_params['TemplateURL'] = module.params['template_url']
    else:
        # no template is provided, but if the stack set exists already, we can use the existing one.
        if existing_stack_set:
            stack_params['UsePreviousTemplate'] = True
        else:
            module.fail_json(
                msg=
                "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
                "`template_body`, or `template_url`".format(
                    module.params['stack_set_name']))

    stack_params['Parameters'] = []
    for k, v in module.params['parameters'].items():
        if isinstance(v, dict):
            # set parameter based on a dict to allow additional CFN Parameter Attributes
            param = dict(ParameterKey=k)

            if 'value' in v:
                param['ParameterValue'] = to_native(v['value'])

            if 'use_previous_value' in v and bool(v['use_previous_value']):
                param['UsePreviousValue'] = True
                param.pop('ParameterValue', None)

            stack_params['Parameters'].append(param)
        else:
            # allow default k/v configuration to set a template parameter
            stack_params['Parameters'].append({
                'ParameterKey': k,
                'ParameterValue': str(v)
            })

    if module.params.get('tags') and isinstance(module.params.get('tags'),
                                                dict):
        stack_params['Tags'] = ansible_dict_to_boto3_tag_list(
            module.params['tags'])

    if module.params.get('administration_role_arn'):
        # TODO loosen the semantics here to autodetect the account ID and build the ARN
        stack_params['AdministrationRoleARN'] = module.params[
            'administration_role_arn']
    if module.params.get('execution_role_name'):
        stack_params['ExecutionRoleName'] = module.params[
            'execution_role_name']
    if module.params.get('permission_model'):
        stack_params['PermissionModel'] = module.params.get('permission_model')
    if module.params.get('auto_deployment'):
        param_auto_deployment = {}
        auto_deployment = module.params.get('auto_deployment')
        if 'enabled' in auto_deployment.keys():
            param_auto_deployment['Enabled'] = auto_deployment['enabled']
        if 'retain_stacks_on_account_removal' in auto_deployment.keys():
            param_auto_deployment[
                'RetainStacksOnAccountRemoval'] = auto_deployment[
                    'retain_stacks_on_account_removal']
        stack_params['AutoDeployment'] = param_auto_deployment

    result = {}

    if module.check_mode:
        if state == 'absent' and existing_stack_set:
            module.exit_json(changed=True,
                             msg='Stack set would be deleted',
                             meta=[])
        elif state == 'absent' and not existing_stack_set:
            module.exit_json(changed=False,
                             msg='Stack set doesn\'t exist',
                             meta=[])
        elif state == 'present' and not existing_stack_set:
            module.exit_json(changed=True,
                             msg='New stack set would be created',
                             meta=[])
        elif state == 'present' and existing_stack_set:
            module.exit_json(changed=True,
                             msg='Existing stack set would be updated',
                             meta=[])
        else:
            # TODO: need to check the template and other settings for correct check mode
            module.exit_json(changed=False, msg='No changes detected', meta=[])

    changed = False
    if state == 'present':
        if not existing_stack_set:
            # on create this parameter has a different name, and cannot be referenced later in the job log
            stack_params[
                'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(
                    operation_uuid)
            changed = True
            create_stack_set(module, stack_params, cfn)
        else:
            stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(
                operation_uuid)
            operation_ids.append(stack_params['OperationId'])
            changed |= update_stack_set(module, stack_params, cfn)

    elif state == 'absent':
        if not existing_stack_set:
            module.exit_json(msg='Stack set {0} does not exist'.format(
                module.params['stack_set_name']))
        try:
            cfn.delete_stack_set(
                StackSetName=module.params['stack_set_name'], )
            module.exit_json(msg='Stack set {0} deleted'.format(
                module.params['stack_set_name']))
        except is_boto3_error_code('OperationInProgressException') as e:  # pylint: disable=duplicate-except
            module.fail_json_aws(
                e,
                msg=
                'Cannot delete stack {0} while there is an operation in progress'
                .format(module.params['stack_set_name']))
        except is_boto3_error_code('StackSetNotEmptyException'):  # pylint: disable=duplicate-except
            try:
                cfn.delete_stack_set(
                    StackSetName=module.params['stack_set_name'], )
            except is_boto3_error_code('StackSetNotEmptyException') as exc:  # pylint: disable=duplicate-except
                module.fail_json_aws(
                    exc,
                    msg=
                    'Could not purge stacks, or not all accounts/regions were chosen for deletion'
                )
            module.exit_json(changed=True,
                             msg='Stack set {0} deleted'.format(
                                 module.params['stack_set_name']))

    result.update(**describe_stack_tree(
        module, stack_params['StackSetName'], operation_ids=operation_ids))
    if 'operations' in result.keys():
        if any(o['status'] == 'FAILED' for o in result['operations']):
            module.fail_json(msg="One or more operations failed to execute",
                             **result)
    module.exit_json(changed=changed, **result)