Esempio n. 1
0
def main():
    argument_spec = dict(cluster_name=dict(required=True),
                         resource=dict(required=False),
                         tags=dict(type='dict'),
                         purge_tags=dict(type='bool', default=False),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         resource_type=dict(default='cluster',
                                            choices=[
                                                'cluster', 'task', 'service',
                                                'task_definition', 'container'
                                            ]))
    required_if = [('state', 'present', ['tags']),
                   ('state', 'absent', ['tags'])]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=required_if,
                              supports_check_mode=True)

    resource_type = module.params['resource_type']
    cluster_name = module.params['cluster_name']
    if resource_type == 'cluster':
        resource = cluster_name
    else:
        resource = module.params['resource']
    tags = module.params['tags']
    state = module.params['state']
    purge_tags = module.params['purge_tags']

    result = {'changed': False}

    ecs = module.client('ecs')

    resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource)

    current_tags = get_tags(ecs, module, resource_arn)

    add_tags, remove = compare_aws_tags(current_tags,
                                        tags,
                                        purge_tags=purge_tags)

    remove_tags = {}
    if state == 'absent':
        for key in tags:
            if key in current_tags and (tags[key] is None
                                        or current_tags[key] == tags[key]):
                remove_tags[key] = current_tags[key]

    for key in remove:
        remove_tags[key] = current_tags[key]

    if remove_tags:
        result['changed'] = True
        result['removed_tags'] = remove_tags
        if not module.check_mode:
            try:
                ecs.untag_resource(resourceArn=resource_arn,
                                   tagKeys=list(remove_tags.keys()))
            except (BotoCoreError, ClientError) as e:
                module.fail_json_aws(
                    e,
                    msg='Failed to remove tags {0} from resource {1}'.format(
                        remove_tags, resource))

    if state == 'present' and add_tags:
        result['changed'] = True
        result['added_tags'] = add_tags
        current_tags.update(add_tags)
        if not module.check_mode:
            try:
                tags = ansible_dict_to_boto3_tag_list(
                    add_tags,
                    tag_name_key_name='key',
                    tag_value_key_name='value')
                ecs.tag_resource(resourceArn=resource_arn, tags=tags)
            except (BotoCoreError, ClientError) as e:
                module.fail_json_aws(
                    e,
                    msg='Failed to set tags {0} on resource {1}'.format(
                        add_tags, resource))

    result['tags'] = get_tags(ecs, module, resource_arn)
    module.exit_json(**result)
Esempio n. 2
0
def main():

    argument_spec = dict(access_logs_enabled=dict(type='bool'),
                         access_logs_s3_bucket=dict(type='str'),
                         access_logs_s3_prefix=dict(type='str'),
                         deletion_protection=dict(type='bool'),
                         http2=dict(type='bool'),
                         idle_timeout=dict(type='int'),
                         listeners=dict(
                             type='list',
                             elements='dict',
                             options=dict(Protocol=dict(type='str',
                                                        required=True),
                                          Port=dict(type='int', required=True),
                                          SslPolicy=dict(type='str'),
                                          Certificates=dict(type='list',
                                                            elements='dict'),
                                          DefaultActions=dict(type='list',
                                                              required=True,
                                                              elements='dict'),
                                          Rules=dict(type='list',
                                                     elements='dict'))),
                         name=dict(required=True, type='str'),
                         purge_listeners=dict(default=True, type='bool'),
                         purge_tags=dict(default=True, type='bool'),
                         subnets=dict(type='list', elements='str'),
                         security_groups=dict(type='list', elements='str'),
                         scheme=dict(default='internet-facing',
                                     choices=['internet-facing', 'internal']),
                         state=dict(choices=['present', 'absent'],
                                    default='present'),
                         tags=dict(type='dict'),
                         wait_timeout=dict(type='int'),
                         wait=dict(default=False, type='bool'),
                         purge_rules=dict(default=True, type='bool'))

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=[('state', 'present', ['subnets', 'security_groups'])],
        required_together=[['access_logs_enabled', 'access_logs_s3_bucket']])

    # Quick check of listeners parameters
    listeners = module.params.get("listeners")
    if listeners is not None:
        for listener in listeners:
            for key in listener.keys():
                if key == 'Protocol' and listener[key] == 'HTTPS':
                    if listener.get('SslPolicy') is None:
                        module.fail_json(
                            msg=
                            "'SslPolicy' is a required listener dict key when Protocol = HTTPS"
                        )

                    if listener.get('Certificates') is None:
                        module.fail_json(
                            msg=
                            "'Certificates' is a required listener dict key when Protocol = HTTPS"
                        )

    connection = module.client('elbv2')
    connection_ec2 = module.client('ec2')

    state = module.params.get("state")

    elb = ApplicationLoadBalancer(connection, connection_ec2, module)

    if state == 'present':
        create_or_update_elb(elb)
    else:
        delete_elb(elb)
Esempio n. 3
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent']),
        name=dict(required=True),
        cert=dict(),
        key=dict(no_log=True),
        cert_chain=dict(),
        new_name=dict(),
        path=dict(default='/'),
        new_path=dict(),
        dup_ok=dict(type='bool'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[
            ['new_path', 'key'],
            ['new_path', 'cert'],
            ['new_path', 'cert_chain'],
            ['new_name', 'key'],
            ['new_name', 'cert'],
            ['new_name', 'cert_chain'],
        ],
        check_boto3=False,
    )

    if not HAS_BOTO:
        module.fail_json(msg="Boto is required for this module")

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)

    try:
        if region:
            iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
        else:
            iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
    except boto.exception.NoAuthHandlerFound as e:
        module.fail_json(msg=str(e))

    state = module.params.get('state')
    name = module.params.get('name')
    path = module.params.get('path')
    new_name = module.params.get('new_name')
    new_path = module.params.get('new_path')
    dup_ok = module.params.get('dup_ok')
    if state == 'present' and not new_name and not new_path:
        cert, key, cert_chain = load_data(
            cert=module.params.get('cert'),
            key=module.params.get('key'),
            cert_chain=module.params.get('cert_chain'))
    else:
        cert = key = cert_chain = None

    orig_cert_names = [
        ctb['server_certificate_name'] for ctb in iam.get_all_server_certs().
        list_server_certificates_result.server_certificate_metadata_list
    ]
    orig_cert_bodies = [
        iam.get_server_certificate(
            thing).get_server_certificate_result.certificate_body
        for thing in orig_cert_names
    ]
    if new_name == name:
        new_name = None
    if new_path == path:
        new_path = None

    changed = False
    try:
        cert_action(module, iam, name, path, new_name, new_path, state, cert,
                    key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok)
    except boto.exception.BotoServerError as err:
        module.fail_json(changed=changed, msg=str(err), debug=[cert, key])
Esempio n. 4
0
def main():
    protocols = [
        'http',
        'https',
        'email',
        'email_json',
        'sms',
        'sqs',
        'application',
        'lambda',
    ]

    argument_spec = dict(
        msg=dict(required=True, aliases=['default']),
        subject=dict(),
        topic=dict(required=True),
        message_attributes=dict(type='dict'),
        message_structure=dict(choices=['json', 'string'], default='json'),
    )

    for p in protocols:
        argument_spec[p] = dict()

    module = AnsibleAWSModule(argument_spec=argument_spec)

    sns_kwargs = dict(
        Message=module.params['msg'],
        Subject=module.params['subject'],
        MessageStructure=module.params['message_structure'],
    )

    if module.params['message_attributes']:
        if module.params['message_structure'] != 'string':
            module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
        sns_kwargs['MessageAttributes'] = module.params['message_attributes']

    dict_msg = {
        'default': sns_kwargs['Message']
    }

    for p in protocols:
        if module.params[p]:
            if sns_kwargs['MessageStructure'] != 'json':
                module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
            dict_msg[p.replace('_', '-')] = module.params[p]

    client = module.client('sns')

    topic = module.params['topic']
    if ':' in topic:
        # Short names can't contain ':' so we'll assume this is the full ARN
        sns_kwargs['TopicArn'] = topic
    else:
        sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)

    if not sns_kwargs['TopicArn']:
        module.fail_json(msg='Could not find topic: {0}'.format(topic))

    if sns_kwargs['MessageStructure'] == 'json':
        sns_kwargs['Message'] = json.dumps(dict_msg)

    try:
        result = client.publish(**sns_kwargs)
    except (BotoCoreError, ClientError) as e:
        module.fail_json_aws(e, msg='Failed to publish message')

    module.exit_json(msg='OK', message_id=result['MessageId'])
Esempio n. 5
0
def main():
    argument_spec = dict(alias=dict(aliases=['key_alias']),
                         policy_mode=dict(aliases=['mode'],
                                          choices=['grant', 'deny'],
                                          default='grant'),
                         policy_role_name=dict(aliases=['role_name']),
                         policy_role_arn=dict(aliases=['role_arn']),
                         policy_grant_types=dict(aliases=['grant_types'],
                                                 type='list'),
                         policy_clean_invalid_entries=dict(
                             aliases=['clean_invalid_entries'],
                             type='bool',
                             default=True),
                         key_id=dict(aliases=['key_arn']),
                         description=dict(),
                         enabled=dict(type='bool', default=True),
                         tags=dict(type='dict', default={}),
                         purge_tags=dict(type='bool', default=False),
                         grants=dict(type='list', default=[]),
                         policy=dict(type='json'),
                         purge_grants=dict(type='bool', default=False),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         enable_key_rotation=(dict(type='bool')))

    module = AnsibleAWSModule(
        supports_check_mode=True,
        argument_spec=argument_spec,
        required_one_of=[['alias', 'key_id']],
    )

    mode = module.params['policy_mode']

    kms = module.client('kms')

    key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'),
                                      module.params.get('alias'))
    # We can't create keys with a specific ID, if we can't access the key we'll have to fail
    if module.params.get('state') == 'present' and module.params.get(
            'key_id') and not key_metadata:
        module.fail_json(msg="Could not find key with id %s to update")

    if module.params.get('policy_grant_types') or mode == 'deny':
        module.deprecate(
            'Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile'
            ' and has been deprecated in favour of the policy option.',
            date='2021-12-01',
            collection_name='community.aws')
        result = update_policy_grants(kms, module, key_metadata, mode)
        module.exit_json(**result)

    if module.params.get('state') == 'absent':
        if key_metadata is None:
            module.exit_json(changed=False)
        result = delete_key(kms, module, key_metadata)
        module.exit_json(**result)

    if key_metadata:
        key_details = get_key_details(kms, module, key_metadata['Arn'])
        result = update_key(kms, module, key_details)
        module.exit_json(**result)

    result = create_key(kms, module)
    module.exit_json(**result)
def main():
    argument_spec = dict(
        name=dict(required=True),
        description=dict(),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=900),
        state=dict(default='present', choices=['present', 'absent']),
        purge_stacks=dict(type='bool', default=True),
        parameters=dict(type='dict', default={}),
        template=dict(type='path'),
        template_url=dict(),
        template_body=dict(),
        capabilities=dict(type='list',
                          elements='str',
                          choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
        regions=dict(type='list', elements='str'),
        accounts=dict(type='list', elements='str'),
        failure_tolerance=dict(
            type='dict',
            default={},
            options=dict(
                fail_count=dict(type='int'),
                fail_percentage=dict(type='int'),
                parallel_percentage=dict(type='int'),
                parallel_count=dict(type='int'),
            ),
            mutually_exclusive=[
                ['fail_count', 'fail_percentage'],
                ['parallel_count', 'parallel_percentage'],
            ],
        ),
        administration_role_arn=dict(
            aliases=['admin_role_arn', 'administration_role', 'admin_role']),
        execution_role_name=dict(
            aliases=['execution_role', 'exec_role', 'exec_role_name']),
        tags=dict(type='dict'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_url', 'template', 'template_body']],
        supports_check_mode=True)

    # Wrap the cloudformation client methods that this module uses with
    # automatic backoff / retry for throttling error codes
    jittered_backoff_decorator = AWSRetry.jittered_backoff(
        retries=10,
        delay=3,
        max_delay=30,
        catch_extra_error_codes=['StackSetNotFound'])
    cfn = module.client('cloudformation',
                        retry_decorator=jittered_backoff_decorator)
    existing_stack_set = stack_set_facts(cfn, module.params['name'])

    operation_uuid = to_native(uuid.uuid4())
    operation_ids = []
    # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
    stack_params = {}
    state = module.params['state']
    if state == 'present' and not module.params['accounts']:
        module.fail_json(
            msg=
            "Can't create a stack set without choosing at least one account. "
            "To get the ID of the current account, use the aws_caller_info module."
        )

    module.params['accounts'] = [
        to_native(a) for a in module.params['accounts']
    ]

    stack_params['StackSetName'] = module.params['name']
    if module.params.get('description'):
        stack_params['Description'] = module.params['description']

    if module.params.get('capabilities'):
        stack_params['Capabilities'] = module.params['capabilities']

    if module.params['template'] is not None:
        with open(module.params['template'], 'r') as tpl:
            stack_params['TemplateBody'] = tpl.read()
    elif module.params['template_body'] is not None:
        stack_params['TemplateBody'] = module.params['template_body']
    elif module.params['template_url'] is not None:
        stack_params['TemplateURL'] = module.params['template_url']
    else:
        # no template is provided, but if the stack set exists already, we can use the existing one.
        if existing_stack_set:
            stack_params['UsePreviousTemplate'] = True
        else:
            module.fail_json(
                msg=
                "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
                "`template_body`, or `template_url`".format(
                    module.params['name']))

    stack_params['Parameters'] = []
    for k, v in module.params['parameters'].items():
        if isinstance(v, dict):
            # set parameter based on a dict to allow additional CFN Parameter Attributes
            param = dict(ParameterKey=k)

            if 'value' in v:
                param['ParameterValue'] = to_native(v['value'])

            if 'use_previous_value' in v and bool(v['use_previous_value']):
                param['UsePreviousValue'] = True
                param.pop('ParameterValue', None)

            stack_params['Parameters'].append(param)
        else:
            # allow default k/v configuration to set a template parameter
            stack_params['Parameters'].append({
                'ParameterKey': k,
                'ParameterValue': str(v)
            })

    if module.params.get('tags') and isinstance(module.params.get('tags'),
                                                dict):
        stack_params['Tags'] = ansible_dict_to_boto3_tag_list(
            module.params['tags'])

    if module.params.get('administration_role_arn'):
        # TODO loosen the semantics here to autodetect the account ID and build the ARN
        stack_params['AdministrationRoleARN'] = module.params[
            'administration_role_arn']
    if module.params.get('execution_role_name'):
        stack_params['ExecutionRoleName'] = module.params[
            'execution_role_name']

    result = {}

    if module.check_mode:
        if state == 'absent' and existing_stack_set:
            module.exit_json(changed=True,
                             msg='Stack set would be deleted',
                             meta=[])
        elif state == 'absent' and not existing_stack_set:
            module.exit_json(changed=False,
                             msg='Stack set doesn\'t exist',
                             meta=[])
        elif state == 'present' and not existing_stack_set:
            module.exit_json(changed=True,
                             msg='New stack set would be created',
                             meta=[])
        elif state == 'present' and existing_stack_set:
            new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
                cfn,
                module.params['name'],
                module.params['accounts'],
                module.params['regions'],
            )
            if new_stacks:
                module.exit_json(changed=True,
                                 msg='New stack instance(s) would be created',
                                 meta=[])
            elif unspecified_stacks and module.params.get(
                    'purge_stack_instances'):
                module.exit_json(changed=True,
                                 msg='Old stack instance(s) would be deleted',
                                 meta=[])
        else:
            # TODO: need to check the template and other settings for correct check mode
            module.exit_json(changed=False, msg='No changes detected', meta=[])

    changed = False
    if state == 'present':
        if not existing_stack_set:
            # on create this parameter has a different name, and cannot be referenced later in the job log
            stack_params[
                'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(
                    operation_uuid)
            changed = True
            create_stack_set(module, stack_params, cfn)
        else:
            stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(
                operation_uuid)
            operation_ids.append(stack_params['OperationId'])
            if module.params.get('regions'):
                stack_params[
                    'OperationPreferences'] = get_operation_preferences(module)
            changed |= update_stack_set(module, stack_params, cfn)

        # now create/update any appropriate stack instances
        new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
            cfn,
            module.params['name'],
            module.params['accounts'],
            module.params['regions'],
        )
        if new_stack_instances:
            operation_ids.append(
                'Ansible-StackInstance-Create-{0}'.format(operation_uuid))
            changed = True
            cfn.create_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(set(acct
                                  for acct, region in new_stack_instances)),
                Regions=list(
                    set(region for acct, region in new_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        else:
            operation_ids.append(
                'Ansible-StackInstance-Update-{0}'.format(operation_uuid))
            cfn.update_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(
                    set(acct for acct, region in existing_stack_instances)),
                Regions=list(
                    set(region for acct, region in existing_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        for op in operation_ids:
            await_stack_set_operation(
                module,
                cfn,
                operation_id=op,
                stack_set_name=module.params['name'],
                max_wait=module.params.get('wait_timeout'),
            )

    elif state == 'absent':
        if not existing_stack_set:
            module.exit_json(msg='Stack set {0} does not exist'.format(
                module.params['name']))
        if module.params.get('purge_stack_instances') is False:
            pass
        try:
            cfn.delete_stack_set(StackSetName=module.params['name'], )
            module.exit_json(
                msg='Stack set {0} deleted'.format(module.params['name']))
        except is_boto3_error_code('OperationInProgressException') as e:  # pylint: disable=duplicate-except
            module.fail_json_aws(
                e,
                msg=
                'Cannot delete stack {0} while there is an operation in progress'
                .format(module.params['name']))
        except is_boto3_error_code('StackSetNotEmptyException'):  # pylint: disable=duplicate-except
            delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(
                operation_uuid)
            cfn.delete_stack_instances(
                StackSetName=module.params['name'],
                Accounts=module.params['accounts'],
                Regions=module.params['regions'],
                RetainStacks=(not module.params.get('purge_stacks')),
                OperationId=delete_instances_op)
            await_stack_set_operation(
                module,
                cfn,
                operation_id=delete_instances_op,
                stack_set_name=stack_params['StackSetName'],
                max_wait=module.params.get('wait_timeout'),
            )
            try:
                cfn.delete_stack_set(StackSetName=module.params['name'], )
            except is_boto3_error_code('StackSetNotEmptyException') as exc:  # pylint: disable=duplicate-except
                # this time, it is likely that either the delete failed or there are more stacks.
                instances = cfn.list_stack_instances(
                    StackSetName=module.params['name'], )
                stack_states = ', '.join(
                    '(account={Account}, region={Region}, state={Status})'.
                    format(**i) for i in instances['Summaries'])
                module.fail_json_aws(
                    exc,
                    msg=
                    'Could not purge all stacks, or not all accounts/regions were chosen for deletion: '
                    + stack_states)
            module.exit_json(changed=True,
                             msg='Stack set {0} deleted'.format(
                                 module.params['name']))

    result.update(**describe_stack_tree(
        module, stack_params['StackSetName'], operation_ids=operation_ids))
    if any(o['status'] == 'FAILED' for o in result['operations']):
        module.fail_json(msg="One or more operations failed to execute",
                         **result)
    module.exit_json(changed=changed, **result)
Esempio n. 7
0
def main():
    argument_spec = dict(
        device_id=dict(required=False,
                       aliases=['instance_id'],
                       deprecated_aliases=[
                           dict(name='instance_id',
                                date='2022-12-01',
                                collection_name='community.aws')
                       ]),
        public_ip=dict(required=False, aliases=['ip']),
        state=dict(required=False,
                   default='present',
                   choices=['present', 'absent']),
        in_vpc=dict(required=False, type='bool', default=False),
        reuse_existing_ip_allowed=dict(required=False,
                                       type='bool',
                                       default=False),
        release_on_disassociation=dict(required=False,
                                       type='bool',
                                       default=False),
        allow_reassociation=dict(type='bool', default=False),
        wait_timeout=dict(type='int',
                          removed_at_date='2022-06-01',
                          removed_from_collection='community.aws'),
        private_ip_address=dict(),
        tags=dict(required=False, type='dict'),
        purge_tags=dict(required=False, type='bool', default=True),
        tag_name=dict(),
        tag_value=dict(),
        public_ipv4_pool=dict())

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_by={
            'private_ip_address': ['device_id'],
        },
    )

    ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())

    device_id = module.params.get('device_id')
    instance_id = module.params.get('instance_id')
    public_ip = module.params.get('public_ip')
    private_ip_address = module.params.get('private_ip_address')
    state = module.params.get('state')
    in_vpc = module.params.get('in_vpc')
    domain = 'vpc' if in_vpc else None
    reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
    release_on_disassociation = module.params.get('release_on_disassociation')
    allow_reassociation = module.params.get('allow_reassociation')
    tag_name = module.params.get('tag_name')
    tag_value = module.params.get('tag_value')
    public_ipv4_pool = module.params.get('public_ipv4_pool')
    tags = module.params.get('tags')
    purge_tags = module.params.get('purge_tags')

    if instance_id:
        is_instance = True
        device_id = instance_id
    else:
        if device_id and device_id.startswith('i-'):
            is_instance = True
        elif device_id:
            if device_id.startswith('eni-') and not in_vpc:
                module.fail_json(
                    msg="If you are specifying an ENI, in_vpc must be true")
            is_instance = False

    # Tags for *searching* for an EIP.
    tag_dict = generate_tag_dict(module, tag_name, tag_value)

    try:
        if device_id:
            address = find_address(ec2,
                                   module,
                                   public_ip,
                                   device_id,
                                   is_instance=is_instance)
        else:
            address = find_address(ec2, module, public_ip, None)

        if state == 'present':
            if device_id:
                result = ensure_present(ec2,
                                        module,
                                        domain,
                                        address,
                                        private_ip_address,
                                        device_id,
                                        reuse_existing_ip_allowed,
                                        allow_reassociation,
                                        module.check_mode,
                                        is_instance=is_instance)
                if 'allocation_id' not in result:
                    # Don't check tags on check_mode here - no EIP to pass through
                    module.exit_json(**result)
            else:
                if address:
                    result = {
                        'changed': False,
                        'public_ip': address['PublicIp'],
                        'allocation_id': address['AllocationId']
                    }
                else:
                    address, changed = allocate_address(
                        ec2, module, domain, reuse_existing_ip_allowed,
                        module.check_mode, tag_dict, public_ipv4_pool)
                    if address:
                        result = {
                            'changed': changed,
                            'public_ip': address['PublicIp'],
                            'allocation_id': address['AllocationId']
                        }
                    else:
                        # Don't check tags on check_mode here - no EIP to pass through
                        result = {'changed': changed}
                        module.exit_json(**result)

            result['changed'] |= ensure_ec2_tags(ec2,
                                                 module,
                                                 result['allocation_id'],
                                                 resource_type='elastic-ip',
                                                 tags=tags,
                                                 purge_tags=purge_tags)
        else:
            if device_id:
                disassociated = ensure_absent(ec2,
                                              module,
                                              address,
                                              device_id,
                                              module.check_mode,
                                              is_instance=is_instance)

                if release_on_disassociation and disassociated['changed']:
                    released = release_address(ec2, module, address,
                                               module.check_mode)
                    result = {
                        'changed': True,
                        'disassociated': disassociated['changed'],
                        'released': released['changed']
                    }
                else:
                    result = {
                        'changed': disassociated['changed'],
                        'disassociated': disassociated['changed'],
                        'released': False
                    }
            else:
                released = release_address(ec2, module, address,
                                           module.check_mode)
                result = {
                    'changed': released['changed'],
                    'disassociated': False,
                    'released': released['changed']
                }

    except (botocore.exceptions.BotoCoreError,
            botocore.exceptions.ClientError) as e:
        module.fail_json_aws(str(e))

    module.exit_json(**result)
def main():
    argument_spec = dict(
        project_names=dict(required=False, type=list),
        project_name=dict(required=False),
        report_group_arn=dict(required=False),
        sort_by=dict(required=False),
        sort_order=dict(required=False,
                        choices=['ASCENDING', 'DESCENDING'],
                        default='ASCENDING'),
        report_filter_status=dict(required=False,
                                  choices=[
                                      'GENERATING', 'SUCCEEDED', 'FAILED',
                                      'INCOMPLETE', 'DELETING'
                                  ],
                                  default='SUCCEEDED'),
        list_projects=dict(required=False, type=bool),
        list_shared_projects=dict(required=False, type=bool),
        list_report_groups=dict(required=False, type=bool),
        list_shared_report_groups=dict(required=False, type=bool),
        list_reports=dict(required=False, type=bool),
        list_reports_for_report_group=dict(required=False, type=bool),
        list_builds=dict(required=False, type=bool),
        list_builds_for_project=dict(required=False, type=bool),
        describe_projects=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=(
            ('list_projects', True, ['sort_by']),
            ('list_shared_projects', True, ['sort_by']),
            ('list_report_groups', True, ['sort_by']),
            ('list_shared_report_groups', True, ['sort_by']),
            ('list_reports_for_report_group', True, ['report_group_arn']),
            ('list_builds_for_project', True, ['project_name']),
            ('describe_projects', True, ['project_names']),
        ),
        mutually_exclusive=[(
            'list_projects',
            'list_shared_projects',
            'list_report_groups',
            'list_shared_report_groups',
            'list_reports',
            'list_reports_for_report_group',
            'list_builds',
            'list_builds_for_project',
            'describe_projects',
        )],
    )

    client = module.client('codebuild',
                           retry_decorator=AWSRetry.exponential_backoff())
    _it, paginate = _codebuild(client, module)

    if module.params['list_projects'] or module.params['list_shared_projects']:
        module.exit_json(
            project_ids=aws_response_list_parser(paginate, _it, 'projects'))
    elif module.params['list_report_groups'] or module.params[
            'list_shared_report_groups']:
        module.exit_json(report_group_ids=aws_response_list_parser(
            paginate, _it, 'reportGroups'))
    elif module.params['list_reports'] or module.params[
            'list_reports_for_report_group']:
        module.exit_json(
            report_arns=aws_response_list_parser(paginate, _it, 'reports'))
    elif module.params['list_builds'] or module.params[
            'list_builds_for_project']:
        module.exit_json(
            build_ids=aws_response_list_parser(paginate, _it, 'ids'))
    elif module.params['describe_projects']:
        module.exit_json(
            projects=aws_response_list_parser(paginate, _it, 'projects'))
    else:
        module.fail_json("unknown options are passed")
Esempio n. 9
0
def main():
    argument_spec = dict(iam_type=dict(required=True,
                                       choices=['user', 'group', 'role']),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         iam_name=dict(required=True),
                         policy_name=dict(required=True),
                         policy_document=dict(default=None, required=False),
                         policy_json=dict(type='json',
                                          default=None,
                                          required=False),
                         skip_duplicates=dict(type='bool',
                                              default=None,
                                              required=False))
    mutually_exclusive = [['policy_document', 'policy_json']]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              mutually_exclusive=mutually_exclusive,
                              supports_check_mode=True)

    skip_duplicates = module.params.get('skip_duplicates')

    if (skip_duplicates is None):
        module.deprecate(
            'The skip_duplicates behaviour has caused confusion and'
            ' will be disabled by default in Ansible 2.14',
            date='2022-06-01',
            collection_name='community.aws')
        skip_duplicates = True

    if module.params.get('policy_document'):
        module.deprecate(
            'The policy_document option has been deprecated and'
            ' will be removed in Ansible 2.14',
            date='2022-06-01',
            collection_name='community.aws')

    args = dict(
        client=module.client('iam',
                             retry_decorator=AWSRetry.jittered_backoff()),
        name=module.params.get('iam_name'),
        policy_name=module.params.get('policy_name'),
        policy_document=module.params.get('policy_document'),
        policy_json=module.params.get('policy_json'),
        skip_duplicates=skip_duplicates,
        state=module.params.get('state'),
        check_mode=module.check_mode,
    )
    iam_type = module.params.get('iam_type')

    try:
        if iam_type == 'user':
            policy = UserPolicy(**args)
        elif iam_type == 'role':
            policy = RolePolicy(**args)
        elif iam_type == 'group':
            policy = GroupPolicy(**args)

        module.exit_json(**(policy.run()))
    except (BotoCoreError, ClientError) as e:
        module.fail_json_aws(e)
    except PolicyError as e:
        module.fail_json(msg=str(e))
Esempio n. 10
0
def main():
    argument_spec = dict(
        state={
            'required': True,
            'choices': ['present', 'absent']
        },
        instance_id={'required': True},
        ec2_elbs={
            'default': None,
            'required': False,
            'type': 'list',
            'elements': 'str'
        },
        enable_availability_zone={
            'default': True,
            'required': False,
            'type': 'bool'
        },
        wait={
            'required': False,
            'default': True,
            'type': 'bool'
        },
        wait_timeout={
            'required': False,
            'default': 0,
            'type': 'int'
        },
    )
    required_if = [
        ('state', 'present', ['ec2_elbs']),
    ]

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=required_if,
        supports_check_mode=True,
    )

    ec2_elbs = module.params['ec2_elbs']
    wait = module.params['wait']
    enable_availability_zone = module.params['enable_availability_zone']
    timeout = module.params['wait_timeout']
    instance_id = module.params['instance_id']

    elb_man = ElbManager(module, instance_id, ec2_elbs)

    if ec2_elbs is not None:
        for elb in ec2_elbs:
            if not elb_man.exists(elb):
                module.fail_json(msg="ELB {0} does not exist".format(elb))

    if module.params['state'] == 'present':
        elb_man.register(wait, enable_availability_zone, timeout)
    elif module.params['state'] == 'absent':
        elb_man.deregister(wait, timeout)

    # XXX We're not an _fact module we shouldn't be returning a fact and poluting
    # the namespace
    ansible_facts = {
        'ec2_elbs': [lb['LoadBalancerName'] for lb in elb_man.lbs]
    }

    module.exit_json(
        changed=elb_man.changed,
        ansible_facts=ansible_facts,
        updated_elbs=list(elb_man.updated_elbs),
    )
def main():
    """
    Get list of S3 buckets
    :return:
    """
    argument_spec = dict(
        name=dict(type='str', default=""),
        name_filter=dict(type='str', default=""),
        bucket_facts=dict(type='dict', options=dict(
            bucket_accelerate_configuration=dict(type='bool', default=False),
            bucket_acl=dict(type='bool', default=False),
            bucket_cors=dict(type='bool', default=False),
            bucket_encryption=dict(type='bool', default=False),
            bucket_lifecycle_configuration=dict(type='bool', default=False),
            bucket_location=dict(type='bool', default=False),
            bucket_logging=dict(type='bool', default=False),
            bucket_notification_configuration=dict(type='bool', default=False),
            bucket_ownership_controls=dict(type='bool', default=False),
            bucket_policy=dict(type='bool', default=False),
            bucket_policy_status=dict(type='bool', default=False),
            bucket_replication=dict(type='bool', default=False),
            bucket_request_payment=dict(type='bool', default=False),
            bucket_tagging=dict(type='bool', default=False),
            bucket_website=dict(type='bool', default=False),
            public_access_block=dict(type='bool', default=False),
        )),
        transform_location=dict(type='bool', default=False)
    )

    # Ensure we have an empty dict
    result = {}

    # Define mutually exclusive options
    mutually_exclusive = [
        ['name', 'name_filter']
    ]

    # Including ec2 argument spec
    module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive)
    is_old_facts = module._name == 'aws_s3_bucket_facts'
    if is_old_facts:
        module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', "
                         "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')

    # Get parameters
    name = module.params.get("name")
    name_filter = module.params.get("name_filter")
    requested_facts = module.params.get("bucket_facts")
    transform_location = module.params.get("bucket_facts")

    # Set up connection
    connection = {}
    try:
        connection = module.client('s3')
    except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code:
        module.fail_json_aws(err_code, msg='Failed to connect to AWS')

    # Get basic bucket list (name + creation date)
    bucket_list = get_bucket_list(module, connection, name, name_filter)

    # Add information about name/name_filter to result
    if name:
        result['bucket_name'] = name
    elif name_filter:
        result['bucket_name_filter'] = name_filter

    # Gather detailed information about buckets if requested
    bucket_facts = module.params.get("bucket_facts")
    if bucket_facts:
        result['buckets'] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location)
    else:
        result['buckets'] = bucket_list

    # Send exit
    if is_old_facts:
        module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result)
    else:
        module.exit_json(msg="Retrieved s3 info.", **result)
Esempio n. 12
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent', 'deleting']),
        name=dict(required=True, type='str'),
        cluster=dict(required=False, type='str'),
        task_definition=dict(required=False, type='str'),
        load_balancers=dict(required=False,
                            default=[],
                            type='list',
                            elements='dict'),
        desired_count=dict(required=False, type='int'),
        client_token=dict(required=False, default='', type='str',
                          no_log=False),
        role=dict(required=False, default='', type='str'),
        delay=dict(required=False, type='int', default=10),
        repeat=dict(required=False, type='int', default=10),
        force_new_deployment=dict(required=False, default=False, type='bool'),
        force_deletion=dict(required=False, default=False, type='bool'),
        deployment_configuration=dict(required=False, default={}, type='dict'),
        placement_constraints=dict(required=False,
                                   default=[],
                                   type='list',
                                   elements='dict',
                                   options=dict(type=dict(type='str'),
                                                expression=dict(type='str'))),
        placement_strategy=dict(required=False,
                                default=[],
                                type='list',
                                elements='dict',
                                options=dict(
                                    type=dict(type='str'),
                                    field=dict(type='str'),
                                )),
        health_check_grace_period_seconds=dict(required=False, type='int'),
        network_configuration=dict(required=False,
                                   type='dict',
                                   options=dict(
                                       subnets=dict(type='list',
                                                    elements='str'),
                                       security_groups=dict(type='list',
                                                            elements='str'),
                                       assign_public_ip=dict(type='bool'))),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        platform_version=dict(required=False, type='str'),
        service_registries=dict(required=False,
                                type='list',
                                default=[],
                                elements='dict'),
        scheduling_strategy=dict(required=False, choices=['DAEMON',
                                                          'REPLICA']))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('state', 'present',
                                            ['task_definition']),
                                           ('launch_type', 'FARGATE',
                                            ['network_configuration'])],
                              required_together=[['load_balancers', 'role']])

    if module.params['state'] == 'present' and module.params[
            'scheduling_strategy'] == 'REPLICA':
        if module.params['desired_count'] is None:
            module.fail_json(
                msg=
                'state is present, scheduling_strategy is REPLICA; missing desired_count'
            )

    service_mgr = EcsServiceManager(module)
    if module.params['network_configuration']:
        network_configuration = service_mgr.format_network_configuration(
            module.params['network_configuration'])
    else:
        network_configuration = None

    deployment_configuration = map_complex_type(
        module.params['deployment_configuration'],
        DEPLOYMENT_CONFIGURATION_TYPE_MAP)

    deploymentConfiguration = snake_dict_to_camel_dict(
        deployment_configuration)
    serviceRegistries = list(
        map(snake_dict_to_camel_dict, module.params['service_registries']))

    try:
        existing = service_mgr.describe_service(module.params['cluster'],
                                                module.params['name'])
    except Exception as e:
        module.fail_json(msg="Exception describing service '" +
                         module.params['name'] + "' in cluster '" +
                         module.params['cluster'] + "': " + str(e))

    results = dict(changed=False)

    if module.params['state'] == 'present':

        matching = False
        update = False

        if existing and 'status' in existing and existing['status'] == "ACTIVE":
            if module.params['force_new_deployment']:
                update = True
            elif service_mgr.is_matching_service(module.params, existing):
                matching = True
                results['service'] = existing
            else:
                update = True

        if not matching:
            if not module.check_mode:

                role = module.params['role']
                clientToken = module.params['client_token']

                loadBalancers = []
                for loadBalancer in module.params['load_balancers']:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])
                    loadBalancers.append(loadBalancer)

                for loadBalancer in loadBalancers:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])

                if update:
                    # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature

                    if module.params['scheduling_strategy']:
                        if (existing['schedulingStrategy']
                            ) != module.params['scheduling_strategy']:
                            module.fail_json(
                                msg=
                                "It is not possible to update the scheduling strategy of an existing service"
                            )

                    if module.params['service_registries']:
                        if (existing['serviceRegistries']
                                or []) != serviceRegistries:
                            module.fail_json(
                                msg=
                                "It is not possible to update the service registries of an existing service"
                            )

                    if (existing['loadBalancers'] or []) != loadBalancers:
                        module.fail_json(
                            msg=
                            "It is not possible to update the load balancers of an existing service"
                        )

                    # update required
                    response = service_mgr.update_service(
                        module.params['name'], module.params['cluster'],
                        module.params['task_definition'],
                        module.params['desired_count'],
                        deploymentConfiguration, network_configuration,
                        module.params['health_check_grace_period_seconds'],
                        module.params['force_new_deployment'])

                else:
                    try:
                        response = service_mgr.create_service(
                            module.params['name'], module.params['cluster'],
                            module.params['task_definition'], loadBalancers,
                            module.params['desired_count'], clientToken, role,
                            deploymentConfiguration,
                            module.params['placement_constraints'],
                            module.params['placement_strategy'],
                            module.params['health_check_grace_period_seconds'],
                            network_configuration, serviceRegistries,
                            module.params['launch_type'],
                            module.params['platform_version'],
                            module.params['scheduling_strategy'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't create service")

                results['service'] = response

            results['changed'] = True

    elif module.params['state'] == 'absent':
        if not existing:
            pass
        else:
            # it exists, so we should delete it and mark changed.
            # return info about the cluster deleted
            del existing['deployments']
            del existing['events']
            results['ansible_facts'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    try:
                        service_mgr.delete_service(
                            module.params['name'],
                            module.params['cluster'],
                            module.params['force_deletion'],
                        )
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't delete service")
                results['changed'] = True

    elif module.params['state'] == 'deleting':
        if not existing:
            module.fail_json(msg="Service '" + module.params['name'] +
                             " not found.")
            return
        # it exists, so we should delete it and mark changed.
        # return info about the cluster deleted
        delay = module.params['delay']
        repeat = module.params['repeat']
        time.sleep(delay)
        for i in range(repeat):
            existing = service_mgr.describe_service(module.params['cluster'],
                                                    module.params['name'])
            status = existing['status']
            if status == "INACTIVE":
                results['changed'] = True
                break
            time.sleep(delay)
        if i is repeat - 1:
            module.fail_json(msg="Service still not deleted after " +
                             str(repeat) + " tries of " + str(delay) +
                             " seconds each.")
            return

    module.exit_json(**results)
Esempio n. 13
0
def main():
    argument_spec = dict(
        command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
        identifier=dict(required=True),
        node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge',
                                'ds2.8xlarge', 'dc1.large', 'dc2.large',
                                'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge',
                                'dw2.large', 'dw2.8xlarge'], required=False),
        username=dict(required=False),
        password=dict(no_log=True, required=False),
        db_name=dict(required=False),
        cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'),
        cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'),
        vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'),
        skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
                                         type='bool', default=False),
        final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
        cluster_subnet_group_name=dict(aliases=['subnet']),
        availability_zone=dict(aliases=['aws_zone', 'zone']),
        preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
        cluster_parameter_group_name=dict(aliases=['param_group_name']),
        automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'),
        port=dict(type='int'),
        cluster_version=dict(aliases=['version'], choices=['1.0']),
        allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
        number_of_nodes=dict(type='int'),
        publicly_accessible=dict(type='bool', default=False),
        encrypted=dict(type='bool', default=False),
        elastic_ip=dict(required=False),
        new_cluster_identifier=dict(aliases=['new_identifier']),
        enhanced_vpc_routing=dict(type='bool', default=False),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=300),
        tags=dict(type='dict', aliases=['resource_tags']),
        purge_tags=dict(type='bool', default=True)
    )

    required_if = [
        ('command', 'delete', ['skip_final_cluster_snapshot']),
        ('command', 'create', ['node_type',
                               'username',
                               'password'])
    ]

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=required_if
    )

    command = module.params.get('command')
    skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
    final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
    # can't use module basic required_if check for this case
    if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
        module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False")

    conn = module.client('redshift')

    changed = True
    if command == 'create':
        (changed, cluster) = create_cluster(module, conn)

    elif command == 'facts':
        (changed, cluster) = describe_cluster(module, conn)

    elif command == 'delete':
        (changed, cluster) = delete_cluster(module, conn)

    elif command == 'modify':
        (changed, cluster) = modify_cluster(module, conn)

    module.exit_json(changed=changed, cluster=cluster)
Esempio n. 14
0
def main():
    argument_spec = dict(
        query=dict(choices=[
            'change',
            'checker_ip_range',
            'health_check',
            'hosted_zone',
            'record_sets',
            'reusable_delegation_set',
        ],
                   required=True),
        change_id=dict(),
        hosted_zone_id=dict(),
        max_items=dict(),
        next_marker=dict(),
        delegation_set_id=dict(),
        start_record_name=dict(),
        type=dict(choices=[
            'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS'
        ]),
        dns_name=dict(),
        resource_id=dict(type='list', aliases=['resource_ids'],
                         elements='str'),
        health_check_id=dict(),
        hosted_zone_method=dict(
            choices=['details', 'list', 'list_by_name', 'count', 'tags'],
            default='list'),
        health_check_method=dict(choices=[
            'list',
            'details',
            'status',
            'failure_reason',
            'count',
            'tags',
        ],
                                 default='list'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        mutually_exclusive=[
            ['hosted_zone_method', 'health_check_method'],
        ],
        check_boto3=False,
    )
    if module._name == 'route53_facts':
        module.deprecate(
            "The 'route53_facts' module has been renamed to 'route53_info'",
            date='2021-12-01',
            collection_name='community.aws')

    try:
        route53 = module.client('route53')
    except (botocore.exceptions.ClientError,
            botocore.exceptions.BotoCoreError) as e:
        module.fail_json_aws(e, msg='Failed to connect to AWS')

    invocations = {
        'change': change_details,
        'checker_ip_range': checker_ip_range_details,
        'health_check': health_check_details,
        'hosted_zone': hosted_zone_details,
        'record_sets': record_sets_details,
        'reusable_delegation_set': reusable_delegation_set_details,
    }

    results = dict(changed=False)
    try:
        results = invocations[module.params.get('query')](route53, module)
    except (botocore.exceptions.ClientError,
            botocore.exceptions.BotoCoreError) as e:
        module.fail_json(msg=to_native(e))

    module.exit_json(**results)
Esempio n. 15
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent']),
        arn=dict(required=False, type='str'),
        family=dict(required=False, type='str'),
        revision=dict(required=False, type='int'),
        force_create=dict(required=False, default=False, type='bool'),
        containers=dict(required=True, type='list', elements='dict'),
        network_mode=dict(
            required=False,
            default='bridge',
            choices=['default', 'bridge', 'host', 'none', 'awsvpc'],
            type='str'),
        task_role_arn=dict(required=False, default='', type='str'),
        execution_role_arn=dict(required=False, default='', type='str'),
        volumes=dict(required=False, type='list', elements='dict'),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        cpu=dict(),
        memory=dict(required=False, type='str'),
        placement_constraints=dict(required=False,
                                   type='list',
                                   elements='dict',
                                   options=dict(type=dict(type='str'),
                                                expression=dict(type='str'))),
    )

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('launch_type', 'FARGATE',
                                            ['cpu', 'memory'])])

    task_to_describe = None
    task_mgr = EcsTaskManager(module)
    results = dict(changed=False)

    if module.params['state'] == 'present':
        if 'containers' not in module.params or not module.params['containers']:
            module.fail_json(
                msg=
                "To use task definitions, a list of containers must be specified"
            )

        if 'family' not in module.params or not module.params['family']:
            module.fail_json(
                msg="To use task definitions, a family must be specified")

        network_mode = module.params['network_mode']
        launch_type = module.params['launch_type']
        placement_constraints = module.params['placement_constraints']
        if launch_type == 'FARGATE':
            if network_mode != 'awsvpc':
                module.fail_json(
                    msg=
                    "To use FARGATE launch type, network_mode must be awsvpc")
            if placement_constraints:
                module.fail_json(
                    msg=
                    "Task placement constraints are not supported for tasks run on Fargate"
                )

        for container in module.params['containers']:
            if container.get('links') and network_mode == 'awsvpc':
                module.fail_json(
                    msg=
                    'links parameter is not supported if network mode is awsvpc.'
                )

            for environment in container.get('environment', []):
                environment['value'] = environment['value']

            for environment_file in container.get('environmentFiles', []):
                if environment_file['type'] != 's3':
                    module.fail_json(
                        msg=
                        'The only supported value for environmentFiles is s3.')

            for linux_param in container.get('linuxParameters', {}):
                if linux_param == 'maxSwap' and launch_type == 'FARGATE':
                    module.fail_json(
                        msg=
                        'devices parameter is not supported with the FARGATE launch type.'
                    )

                if linux_param == 'maxSwap' and launch_type == 'FARGATE':
                    module.fail_json(
                        msg=
                        'maxSwap parameter is not supported with the FARGATE launch type.'
                    )
                elif linux_param == 'maxSwap' and int(
                        container['linuxParameters']['maxSwap']) < 0:
                    module.fail_json(
                        msg=
                        'Accepted values for maxSwap are 0 or any positive integer.'
                    )

                if (linux_param == 'swappiness' and
                    (int(container['linuxParameters']['swappiness']) < 0 or
                     int(container['linuxParameters']['swappiness']) > 100)):
                    module.fail_json(
                        msg=
                        'Accepted values for swappiness are whole numbers between 0 and 100.'
                    )

                if linux_param == 'sharedMemorySize' and launch_type == 'FARGATE':
                    module.fail_json(
                        msg=
                        'sharedMemorySize parameter is not supported with the FARGATE launch type.'
                    )

                if linux_param == 'tmpfs' and launch_type == 'FARGATE':
                    module.fail_json(
                        msg=
                        'tmpfs parameter is not supported with the FARGATE launch type.'
                    )

            if container.get('hostname') and network_mode == 'awsvpc':
                module.fail_json(
                    msg=
                    'hostname parameter is not supported when the awsvpc network mode is used.'
                )

            if container.get('extraHosts') and network_mode == 'awsvpc':
                module.fail_json(
                    msg=
                    'extraHosts parameter is not supported when the awsvpc network mode is used.'
                )

        family = module.params['family']
        existing_definitions_in_family = task_mgr.describe_task_definitions(
            module.params['family'])

        if 'revision' in module.params and module.params['revision']:
            # The definition specifies revision. We must guarantee that an active revision of that number will result from this.
            revision = int(module.params['revision'])

            # A revision has been explicitly specified. Attempt to locate a matching revision
            tasks_defs_for_revision = [
                td for td in existing_definitions_in_family
                if td['revision'] == revision
            ]
            existing = tasks_defs_for_revision[0] if len(
                tasks_defs_for_revision) > 0 else None

            if existing and existing['status'] != "ACTIVE":
                # We cannot reactivate an inactive revision
                module.fail_json(
                    msg=
                    "A task in family '%s' already exists for revision %d, but it is inactive"
                    % (family, revision))
            elif not existing:
                if not existing_definitions_in_family and revision != 1:
                    module.fail_json(
                        msg=
                        "You have specified a revision of %d but a created revision would be 1"
                        % revision)
                elif existing_definitions_in_family and existing_definitions_in_family[
                        -1]['revision'] + 1 != revision:
                    module.fail_json(
                        msg=
                        "You have specified a revision of %d but a created revision would be %d"
                        % (revision,
                           existing_definitions_in_family[-1]['revision'] + 1))
        else:
            existing = None

            def _right_has_values_of_left(left, right):
                # Make sure the values are equivalent for everything left has
                for k, v in left.items():
                    if not ((not v and (k not in right or not right[k])) or
                            (k in right and v == right[k])):
                        # We don't care about list ordering because ECS can change things
                        if isinstance(v, list) and k in right:
                            left_list = v
                            right_list = right[k] or []

                            if len(left_list) != len(right_list):
                                return False

                            for list_val in left_list:
                                if list_val not in right_list:
                                    # if list_val is the port mapping, the key 'protocol' may be absent (but defaults to 'tcp')
                                    # fill in that default if absent and see if it is in right_list then
                                    if isinstance(
                                            list_val, dict
                                    ) and not list_val.get('protocol'):
                                        modified_list_val = dict(list_val)
                                        modified_list_val.update(
                                            protocol='tcp')
                                        if modified_list_val in right_list:
                                            continue
                        else:
                            return False

                # Make sure right doesn't have anything that left doesn't
                for k, v in right.items():
                    if v and k not in left:
                        # 'essential' defaults to True when not specified
                        if k == 'essential' and v is True:
                            pass
                        else:
                            return False

                return True

            def _task_definition_matches(requested_volumes,
                                         requested_containers,
                                         requested_task_role_arn,
                                         requested_launch_type,
                                         existing_task_definition):
                if td['status'] != "ACTIVE":
                    return None

                if requested_task_role_arn != td.get('taskRoleArn', ""):
                    return None

                if requested_launch_type is not None and requested_launch_type not in td.get(
                        'compatibilities', []):
                    return None

                existing_volumes = td.get('volumes', []) or []

                if len(requested_volumes) != len(existing_volumes):
                    # Nope.
                    return None

                if len(requested_volumes) > 0:
                    for requested_vol in requested_volumes:
                        found = False

                        for actual_vol in existing_volumes:
                            if _right_has_values_of_left(
                                    requested_vol, actual_vol):
                                found = True
                                break

                        if not found:
                            return None

                existing_containers = td.get('containerDefinitions', []) or []

                if len(requested_containers) != len(existing_containers):
                    # Nope.
                    return None

                for requested_container in requested_containers:
                    found = False

                    for actual_container in existing_containers:
                        if _right_has_values_of_left(requested_container,
                                                     actual_container):
                            found = True
                            break

                    if not found:
                        return None

                return existing_task_definition

            # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
            for td in existing_definitions_in_family:
                requested_volumes = module.params['volumes'] or []
                requested_containers = module.params['containers'] or []
                requested_task_role_arn = module.params['task_role_arn']
                requested_launch_type = module.params['launch_type']
                existing = _task_definition_matches(requested_volumes,
                                                    requested_containers,
                                                    requested_task_role_arn,
                                                    requested_launch_type, td)

                if existing:
                    break

        if existing and not module.params.get('force_create'):
            # Awesome. Have an existing one. Nothing to do.
            results['taskdefinition'] = existing
        else:
            if not module.check_mode:
                # Doesn't exist. create it.
                volumes = module.params.get('volumes', []) or []
                results['taskdefinition'] = task_mgr.register_task(
                    module.params['family'],
                    module.params['task_role_arn'],
                    module.params['execution_role_arn'],
                    module.params['network_mode'],
                    module.params['containers'],
                    volumes,
                    module.params['launch_type'],
                    module.params['cpu'],
                    module.params['memory'],
                    module.params['placement_constraints'],
                )
            results['changed'] = True

    elif module.params['state'] == 'absent':
        # When de-registering a task definition, we can specify the ARN OR the family and revision.
        if module.params['state'] == 'absent':
            if 'arn' in module.params and module.params['arn'] is not None:
                task_to_describe = module.params['arn']
            elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
                    module.params['revision'] is not None:
                task_to_describe = module.params['family'] + ":" + str(
                    module.params['revision'])
            else:
                module.fail_json(
                    msg=
                    "To use task definitions, an arn or family and revision must be specified"
                )

        existing = task_mgr.describe_task(task_to_describe)

        if not existing:
            pass
        else:
            # It exists, so we should delete it and mark changed. Return info about the task definition deleted
            results['taskdefinition'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    task_mgr.deregister_task(task_to_describe)
                results['changed'] = True

    module.exit_json(**results)
Esempio n. 16
0
def main():
    argument_spec = dict(
        state=dict(type='str',
                   required=True,
                   choices=['absent', 'create', 'delete', 'get', 'present'],
                   aliases=['command']),
        zone=dict(type='str'),
        hosted_zone_id=dict(type='str'),
        record=dict(type='str', required=True),
        ttl=dict(type='int', default=3600),
        type=dict(type='str',
                  required=True,
                  choices=[
                      'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA',
                      'SPF', 'SRV', 'TXT'
                  ]),
        alias=dict(type='bool'),
        alias_hosted_zone_id=dict(type='str'),
        alias_evaluate_target_health=dict(type='bool', default=False),
        value=dict(type='list', elements='str'),
        overwrite=dict(type='bool'),
        retry_interval=dict(type='int', default=500),
        private_zone=dict(type='bool', default=False),
        identifier=dict(type='str'),
        weight=dict(type='int'),
        region=dict(type='str'),
        health_check=dict(type='str'),
        failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']),
        vpc_id=dict(type='str'),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=300),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_one_of=[['zone', 'hosted_zone_id']],
        # If alias is True then you must specify alias_hosted_zone as well
        required_together=[['alias', 'alias_hosted_zone_id']],
        # state=present, absent, create, delete THEN value is required
        required_if=(
            ('state', 'present', ['value']),
            ('state', 'create', ['value']),
            ('state', 'absent', ['value']),
            ('state', 'delete', ['value']),
        ),
        # failover, region and weight are mutually exclusive
        mutually_exclusive=[('failover', 'region', 'weight')],
        # failover, region and weight require identifier
        required_by=dict(
            failover=('identifier', ),
            region=('identifier', ),
            weight=('identifier', ),
        ),
    )

    if module.params['state'] in ('present', 'create'):
        command_in = 'create'
    elif module.params['state'] in ('absent', 'delete'):
        command_in = 'delete'
    elif module.params['state'] == 'get':
        command_in = 'get'

    zone_in = (module.params.get('zone') or '').lower()
    hosted_zone_id_in = module.params.get('hosted_zone_id')
    ttl_in = module.params.get('ttl')
    record_in = module.params.get('record').lower()
    type_in = module.params.get('type')
    value_in = module.params.get('value') or []
    alias_in = module.params.get('alias')
    alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
    alias_evaluate_target_health_in = module.params.get(
        'alias_evaluate_target_health')
    retry_interval_in = module.params.get('retry_interval')

    if module.params['vpc_id'] is not None:
        private_zone_in = True
    else:
        private_zone_in = module.params.get('private_zone')

    identifier_in = module.params.get('identifier')
    weight_in = module.params.get('weight')
    region_in = module.params.get('region')
    health_check_in = module.params.get('health_check')
    failover_in = module.params.get('failover')
    vpc_id_in = module.params.get('vpc_id')
    wait_in = module.params.get('wait')
    wait_timeout_in = module.params.get('wait_timeout')

    if zone_in[-1:] != '.':
        zone_in += "."

    if record_in[-1:] != '.':
        record_in += "."

    if command_in == 'create' or command_in == 'delete':
        if alias_in and len(value_in) != 1:
            module.fail_json(
                msg=
                "parameter 'value' must contain a single dns name for alias records"
            )
        if (weight_in is None and region_in is None
                and failover_in is None) and identifier_in is not None:
            module.fail_json(
                msg=
                "You have specified identifier which makes sense only if you specify one of: weight, region or failover."
            )

    # connect to the route53 endpoint
    try:
        route53 = module.client('route53',
                                retry_decorator=AWSRetry.jittered_backoff(
                                    retries=MAX_AWS_RETRIES,
                                    delay=retry_interval_in))
    except botocore.exceptions.HTTPClientError as e:
        module.fail_json_aws(e, msg='Failed to connect to AWS')

    # Find the named zone ID
    zone_id = hosted_zone_id_in or get_zone_id_by_name(
        route53, module, zone_in, private_zone_in, vpc_id_in)

    # Verify that the requested zone is already defined in Route53
    if zone_id is None:
        errmsg = "Zone %s does not exist in Route53" % (zone_in
                                                        or hosted_zone_id_in)
        module.fail_json(msg=errmsg)

    aws_record = get_record(route53, zone_id, record_in, type_in,
                            identifier_in)

    resource_record_set = scrub_none_parameters({
        'Name':
        record_in,
        'Type':
        type_in,
        'Weight':
        weight_in,
        'Region':
        region_in,
        'Failover':
        failover_in,
        'TTL':
        ttl_in,
        'ResourceRecords': [dict(Value=value) for value in value_in],
        'HealthCheckId':
        health_check_in,
    })

    if alias_in:
        resource_record_set['AliasTarget'] = dict(
            HostedZoneId=alias_hosted_zone_id_in,
            DNSName=value_in[0],
            EvaluateTargetHealth=alias_evaluate_target_health_in)

    # On CAA records order doesn't matter
    if type_in == 'CAA':
        resource_record_set['ResourceRecords'] = sorted(
            resource_record_set['ResourceRecords'], key=itemgetter('Value'))

    if command_in == 'create' and aws_record == resource_record_set:
        module.exit_json(changed=False)

    if command_in == 'get':
        if type_in == 'NS':
            ns = aws_record.get('values', [])
        else:
            # Retrieve name servers associated to the zone.
            ns = route53.get_hosted_zone(
                aws_retry=True, Id=zone_id)['DelegationSet']['NameServers']

        module.exit_json(changed=False, set=aws_record, nameservers=ns)

    if command_in == 'delete' and not aws_record:
        module.exit_json(changed=False)

    if command_in == 'create' or command_in == 'delete':
        if command_in == 'create' and aws_record:
            if not module.params['overwrite']:
                module.fail_json(
                    msg=
                    "Record already exists with different value. Set 'overwrite' to replace it"
                )
            command = 'UPSERT'
        else:
            command = command_in.upper()

    if not module.check_mode:
        try:
            change_resource_record_sets = route53.change_resource_record_sets(
                aws_retry=True,
                HostedZoneId=zone_id,
                ChangeBatch=dict(Changes=[
                    dict(Action=command, ResourceRecordSet=resource_record_set)
                ]))

            if wait_in:
                waiter = route53.get_waiter('resource_record_sets_changed')
                waiter.wait(Id=change_resource_record_sets['ChangeInfo']['Id'],
                            WaiterConfig=dict(
                                Delay=WAIT_RETRY,
                                MaxAttemps=wait_timeout_in // WAIT_RETRY,
                            ))
        except is_boto3_error_message('but it already exists'):
            module.exit_json(changed=False)
        except botocore.exceptions.WaiterError as e:
            module.fail_json_aws(
                e,
                msg='Timeout waiting for resource records changes to be applied'
            )
        except (botocore.exceptions.BotoCoreError,
                botocore.exceptions.ClientError) as e:  # pylint: disable=duplicate-except
            module.fail_json_aws(e, msg='Failed to update records')
        except Exception as e:
            module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))

    module.exit_json(
        changed=True,
        diff=dict(
            before=aws_record,
            after=resource_record_set if command != 'delete' else {},
        ),
    )
Esempio n. 17
0
def main():
    argument_spec = dict(
        arn=dict(required=False),
        arns=dict(required=False, type=list),
        names=dict(required=False, type=list),
        container_instance_status=dict(required=False,
                                       choices=[
                                           'ACTIVE', 'DRAINING', 'REGISTERING',
                                           'DEREGISTERING',
                                           'REGISTRATION_FAILED'
                                       ],
                                       default='ACTIVE'),
        launch_type=dict(required=False,
                         choices=['EC2', 'FARGATE'],
                         default='EC2'),
        task_definition_status=dict(required=False,
                                    choices=['ACTIVE', 'INACTIVE'],
                                    default='ACTIVE'),
        task_desired_status=dict(required=False,
                                 choices=['RUNNING', 'PENDING', 'STOPPED'],
                                 default='RUNNING'),
        list_container_instances=dict(required=False, type=bool),
        list_services=dict(required=False, type=bool),
        list_task_definitions=dict(required=False, type=bool),
        list_tasks=dict(required=False, type=bool),
        describe_clusters=dict(required=False, type=bool),
        describe_container_instances=dict(required=False, type=bool),
        describe_services=dict(required=False, type=bool),
        describe_task_definition=dict(required=False, type=bool),
        describe_tasks=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=(
            ('list_container_instances', True, ['arn']),
            ('list_services', True, ['arn']),
            ('list_tasks', True, ['arn']),
            ('describe_clusters', True, ['arns']),
            ('describe_container_instances', True, ['arns']),
            ('describe_services', True, ['arn', 'names']),
            ('describe_task_definition', True, ['arn']),
            ('describe_tasks', True, ['arn', 'arns']),
        ),
        mutually_exclusive=[(
            'list_container_instances',
            'list_services',
            'list_task_definitions',
            'list_tasks',
            'describe_clusters',
            'describe_container_instances',
            'describe_services',
            'describe_task_definition',
            'describe_tasks',
        )],
    )

    client = module.client('ecs',
                           retry_decorator=AWSRetry.exponential_backoff())
    it, paginate = _ecs(client, module)

    if module.params['list_container_instances']:
        module.exit_json(container_instance_arns=aws_response_list_parser(
            paginate, it, 'containerInstanceArns'))
    elif module.params['list_services']:
        module.exit_json(
            service_arns=aws_response_list_parser(paginate, it, 'serviceArns'))
    elif module.params['list_task_definitions']:
        module.exit_json(task_definition_arns=aws_response_list_parser(
            paginate, it, 'taskDefinitionArns'))
    elif module.params['list_tasks']:
        module.exit_json(
            task_arns=aws_response_list_parser(paginate, it, 'taskArns'))
    elif module.params['describe_clusters']:
        module.exit_json(
            clusters=aws_response_list_parser(paginate, it, 'clusters'))
    elif module.params['describe_container_instances']:
        module.exit_json(container_instances=aws_response_list_parser(
            paginate, it, 'containerInstances'))
    elif module.params['describe_services']:
        module.exit_json(
            services=aws_response_list_parser(paginate, it, 'services'))
    elif module.params['describe_task_definition']:
        module.exit_json(
            task_definition=camel_dict_to_snake_dict(it['taskDefinition']))
    elif module.params['describe_tasks']:
        module.exit_json(tasks=aws_response_list_parser(paginate, it, 'tasks'))
    else:
        module.exit_json(
            cluster_arns=aws_response_list_parser(paginate, it, 'clusterArns'))
def main():
    argument_spec = dict(
        id=dict(required=False),
        transfer_type=dict(required=False,
                           choices=['OUTGOING', 'INCOMING'],
                           default='INCOMING'),
        list_channels=dict(required=False, type=bool),
        list_input_device_transfers=dict(required=False, type=bool),
        list_input_devices=dict(required=False, type=bool),
        list_input_security_groups=dict(required=False, type=bool),
        list_inputs=dict(required=False, type=bool),
        list_multiplex_programs=dict(required=False, type=bool),
        list_multiplexes=dict(required=False, type=bool),
        list_offerings=dict(required=False, type=bool),
        list_reservations=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=(('list_multiplex_programs', True, ['id']), ),
        mutually_exclusive=[(
            'list_channels',
            'list_input_device_transfers',
            'list_input_devices',
            'list_input_security_groups',
            'list_inputs',
            'list_multiplex_programs',
            'list_multiplexes',
            'list_offerings',
            'list_reservations',
        )],
    )

    client = module.client('medialive',
                           retry_decorator=AWSRetry.exponential_backoff())
    it, paginate = _medialive(client, module)

    if module.params['list_channels']:
        module.exit_json(
            channels=aws_response_list_parser(paginate, it, 'Channels'))
    elif module.params['list_input_device_transfers']:
        module.exit_json(input_device_transfers=aws_response_list_parser(
            paginate, it, 'InputDeviceTransfers'))
    elif module.params['list_input_devices']:
        module.exit_json(input_devices=aws_response_list_parser(
            paginate, it, 'InputDevices'))
    elif module.params['list_input_security_groups']:
        module.exit_json(input_security_groups=aws_response_list_parser(
            paginate, it, 'InputSecurityGroups'))
    elif module.params['list_inputs']:
        module.exit_json(
            inputs=aws_response_list_parser(paginate, it, 'Inputs'))
    elif module.params['list_multiplex_programs']:
        module.exit_json(multiplex_programs=aws_response_list_parser(
            paginate, it, 'MultiplexPrograms'))
    elif module.params['list_multiplexes']:
        module.exit_json(
            multiplexes=aws_response_list_parser(paginate, it, 'Multiplexes'))
    elif module.params['list_offerings']:
        module.exit_json(
            offerings=aws_response_list_parser(paginate, it, 'Offerings'))
    elif module.params['list_reservations']:
        module.exit_json(reservations=aws_response_list_parser(
            paginate, it, 'Reservations'))
    else:
        module.fail_json("unknown options are passed")
Esempio n. 19
0
def main():
    module = AnsibleAWSModule(
        argument_spec={
            'name': dict(required=True),
            'state': dict(choices=['present', 'absent'], default='present'),
            'description': dict(default=""),
            'kms_key_id': dict(),
            'secret_type': dict(choices=['binary', 'string'],
                                default="string"),
            'secret': dict(default="", no_log=True),
            'tags': dict(type='dict', default={}),
            'rotation_lambda': dict(),
            'rotation_interval': dict(type='int', default=30),
            'recovery_window': dict(type='int', default=30),
        },
        supports_check_mode=True,
    )

    changed = False
    state = module.params.get('state')
    secrets_mgr = SecretsManagerInterface(module)
    recovery_window = module.params.get('recovery_window')
    secret = Secret(module.params.get('name'),
                    module.params.get('secret_type'),
                    module.params.get('secret'),
                    description=module.params.get('description'),
                    kms_key_id=module.params.get('kms_key_id'),
                    tags=module.params.get('tags'),
                    lambda_arn=module.params.get('rotation_lambda'),
                    rotation_interval=module.params.get('rotation_interval'))

    current_secret = secrets_mgr.get_secret(secret.name)

    if state == 'absent':
        if current_secret:
            if not current_secret.get("DeletedDate"):
                result = camel_dict_to_snake_dict(
                    secrets_mgr.delete_secret(secret.name,
                                              recovery_window=recovery_window))
                changed = True
            elif current_secret.get("DeletedDate") and recovery_window == 0:
                result = camel_dict_to_snake_dict(
                    secrets_mgr.delete_secret(secret.name,
                                              recovery_window=recovery_window))
                changed = True
            else:
                result = "secret already scheduled for deletion"
        else:
            result = "secret does not exist"
    if state == 'present':
        if current_secret is None:
            result = secrets_mgr.create_secret(secret)
            changed = True
        else:
            if current_secret.get("DeletedDate"):
                secrets_mgr.restore_secret(secret.name)
                changed = True
            if not secrets_mgr.secrets_match(secret, current_secret):
                result = secrets_mgr.update_secret(secret)
                changed = True
            if not rotation_match(secret, current_secret):
                result = secrets_mgr.update_rotation(secret)
                changed = True
            current_tags = boto3_tag_list_to_ansible_dict(
                current_secret.get('Tags', []))
            tags_to_add, tags_to_remove = compare_aws_tags(
                current_tags, secret.tags)
            if tags_to_add:
                secrets_mgr.tag_secret(
                    secret.name, ansible_dict_to_boto3_tag_list(tags_to_add))
                changed = True
            if tags_to_remove:
                secrets_mgr.untag_secret(secret.name, tags_to_remove)
                changed = True
        result = camel_dict_to_snake_dict(secrets_mgr.get_secret(secret.name))
        result.pop("response_metadata")

    module.exit_json(changed=changed, secret=result)
Esempio n. 20
0
def main():
    argument_spec = dict(
        policy_name=dict(required=True),
        policy_description=dict(default=''),
        policy=dict(type='json'),
        make_default=dict(type='bool', default=True),
        only_version=dict(type='bool', default=False),
        fail_on_delete=dict(type='bool',
                            removed_at_date='2022-06-01',
                            removed_from_collection='community.aws'),
        state=dict(default='present', choices=['present', 'absent']),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=[['state', 'present', ['policy']]],
    )

    name = module.params.get('policy_name')
    description = module.params.get('policy_description')
    state = module.params.get('state')
    default = module.params.get('make_default')
    only = module.params.get('only_version')

    policy = None

    if module.params.get('policy') is not None:
        policy = json.dumps(json.loads(module.params.get('policy')))

    try:
        region, ec2_url, aws_connect_kwargs = get_aws_connection_info(
            module, boto3=True)
        iam = boto3_conn(module,
                         conn_type='client',
                         resource='iam',
                         region=region,
                         endpoint=ec2_url,
                         **aws_connect_kwargs)
    except (botocore.exceptions.NoCredentialsError,
            botocore.exceptions.ProfileNotFound) as e:
        module.fail_json(
            msg=
            "Can't authorize connection. Check your credentials and profile.",
            exceptions=traceback.format_exc(),
            **camel_dict_to_snake_dict(e.response))

    p = get_policy_by_name(module, iam, name)
    if state == 'present':
        if p is None:
            # No Policy so just create one
            try:
                rvalue = iam.create_policy(PolicyName=name,
                                           Path='/',
                                           PolicyDocument=policy,
                                           Description=description)
            except Exception as e:
                module.fail_json(msg="Couldn't create policy %s: %s" %
                                 (name, to_native(e)),
                                 exception=traceback.format_exc(),
                                 **camel_dict_to_snake_dict(e.response))

            module.exit_json(changed=True,
                             policy=camel_dict_to_snake_dict(rvalue['Policy']))
        else:
            policy_version, changed = get_or_create_policy_version(
                module, iam, p, policy)
            changed = set_if_default(module, iam, p, policy_version,
                                     default) or changed
            changed = set_if_only(module, iam, p, policy_version,
                                  only) or changed
            # If anything has changed we needto refresh the policy
            if changed:
                try:
                    p = iam.get_policy(PolicyArn=p['Arn'])['Policy']
                except Exception as e:
                    module.fail_json(msg="Couldn't get policy: %s" %
                                     to_native(e),
                                     exception=traceback.format_exc(),
                                     **camel_dict_to_snake_dict(e.response))

            module.exit_json(changed=changed,
                             policy=camel_dict_to_snake_dict(p))
    else:
        # Check for existing policy
        if p:
            # Detach policy
            detach_all_entities(module, iam, p)
            # Delete Versions
            try:
                versions = iam.list_policy_versions(
                    PolicyArn=p['Arn'])['Versions']
            except botocore.exceptions.ClientError as e:
                module.fail_json(msg="Couldn't list policy versions: %s" %
                                 to_native(e),
                                 exception=traceback.format_exc(),
                                 **camel_dict_to_snake_dict(e.response))
            for v in versions:
                if not v['IsDefaultVersion']:
                    try:
                        iam.delete_policy_version(PolicyArn=p['Arn'],
                                                  VersionId=v['VersionId'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json(
                            msg="Couldn't delete policy version %s: %s" %
                            (v['VersionId'], to_native(e)),
                            exception=traceback.format_exc(),
                            **camel_dict_to_snake_dict(e.response))
            # Delete policy
            try:
                iam.delete_policy(PolicyArn=p['Arn'])
            except Exception as e:
                module.fail_json(msg="Couldn't delete policy %s: %s" %
                                 (p['PolicyName'], to_native(e)),
                                 exception=traceback.format_exc(),
                                 **camel_dict_to_snake_dict(e.response))
            # This is the one case where we will return the old policy
            module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p))
        else:
            module.exit_json(changed=False, policy=None)
Esempio n. 21
0
def main():
    """ elasticache ansible module """
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent', 'rebooted']),
        name=dict(required=True),
        engine=dict(default='memcached'),
        cache_engine_version=dict(default=""),
        node_type=dict(default='cache.t2.small'),
        num_nodes=dict(default=1, type='int'),
        # alias for compat with the original PR 1950
        cache_parameter_group=dict(default="", aliases=['parameter_group']),
        cache_port=dict(type='int'),
        cache_subnet_group=dict(default=""),
        cache_security_groups=dict(default=[], type='list', elements='str'),
        security_group_ids=dict(default=[], type='list', elements='str'),
        zone=dict(),
        wait=dict(default=True, type='bool'),
        hard_modify=dict(type='bool'),
    )

    module = AnsibleAWSModule(argument_spec=argument_spec, )

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)

    name = module.params['name']
    state = module.params['state']
    engine = module.params['engine']
    cache_engine_version = module.params['cache_engine_version']
    node_type = module.params['node_type']
    num_nodes = module.params['num_nodes']
    cache_port = module.params['cache_port']
    cache_subnet_group = module.params['cache_subnet_group']
    cache_security_groups = module.params['cache_security_groups']
    security_group_ids = module.params['security_group_ids']
    zone = module.params['zone']
    wait = module.params['wait']
    hard_modify = module.params['hard_modify']
    cache_parameter_group = module.params['cache_parameter_group']

    if cache_subnet_group and cache_security_groups:
        module.fail_json(
            msg=
            "Can't specify both cache_subnet_group and cache_security_groups")

    if state == 'present' and not num_nodes:
        module.fail_json(
            msg=
            "'num_nodes' is a required parameter. Please specify num_nodes > 0"
        )

    elasticache_manager = ElastiCacheManager(
        module, name, engine, cache_engine_version, node_type, num_nodes,
        cache_port, cache_parameter_group, cache_subnet_group,
        cache_security_groups, security_group_ids, zone, wait, hard_modify,
        region, **aws_connect_kwargs)

    if state == 'present':
        elasticache_manager.ensure_present()
    elif state == 'absent':
        elasticache_manager.ensure_absent()
    elif state == 'rebooted':
        elasticache_manager.ensure_rebooted()

    facts_result = dict(changed=elasticache_manager.changed,
                        elasticache=elasticache_manager.get_info())

    module.exit_json(**facts_result)
Esempio n. 22
0
def main():
    """
     Module action handler
    """
    argument_spec = dict(
        encrypt=dict(required=False, type="bool", default=False),
        state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
        kms_key_id=dict(required=False, type='str', default=None),
        purge_tags=dict(default=True, type='bool'),
        id=dict(required=False, type='str', default=None),
        name=dict(required=False, type='str', default=None),
        tags=dict(required=False, type="dict", default={}),
        targets=dict(required=False, type="list", default=[]),
        performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
        throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None),
        provisioned_throughput_in_mibps=dict(required=False, type='float'),
        wait=dict(required=False, type="bool", default=False),
        wait_timeout=dict(required=False, type="int", default=0)
    )

    module = AnsibleAWSModule(argument_spec=argument_spec)

    connection = EFSConnection(module)

    name = module.params.get('name')
    fs_id = module.params.get('id')
    tags = module.params.get('tags')
    target_translations = {
        'ip_address': 'IpAddress',
        'security_groups': 'SecurityGroups',
        'subnet_id': 'SubnetId'
    }
    targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
    performance_mode_translations = {
        'general_purpose': 'generalPurpose',
        'max_io': 'maxIO'
    }
    encrypt = module.params.get('encrypt')
    kms_key_id = module.params.get('kms_key_id')
    performance_mode = performance_mode_translations[module.params.get('performance_mode')]
    purge_tags = module.params.get('purge_tags')
    throughput_mode = module.params.get('throughput_mode')
    provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps')
    state = str(module.params.get('state')).lower()
    changed = False

    if state == 'present':
        if not name:
            module.fail_json(msg='Name parameter is required for create')

        changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps)
        if connection.supports_provisioned_mode():
            changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed
        changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets,
                                                  throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed
        result = first_or_default(connection.get_file_systems(CreationToken=name))

    elif state == 'absent':
        if not name and not fs_id:
            module.fail_json(msg='Either name or id parameter is required for delete')

        changed = connection.delete_file_system(name, fs_id)
        result = None
    if result:
        result = camel_dict_to_snake_dict(result)
    module.exit_json(changed=changed, efs=result)
def main():
    argument_spec = dict(
        distribution_id=dict(required=False, type='str'),
        invalidation_id=dict(required=False, type='str'),
        origin_access_identity_id=dict(required=False, type='str'),
        domain_name_alias=dict(required=False, type='str'),
        all_lists=dict(required=False, default=False, type='bool'),
        distribution=dict(required=False, default=False, type='bool'),
        distribution_config=dict(required=False, default=False, type='bool'),
        origin_access_identity=dict(required=False, default=False, type='bool'),
        origin_access_identity_config=dict(required=False, default=False, type='bool'),
        invalidation=dict(required=False, default=False, type='bool'),
        streaming_distribution=dict(required=False, default=False, type='bool'),
        streaming_distribution_config=dict(required=False, default=False, type='bool'),
        list_origin_access_identities=dict(required=False, default=False, type='bool'),
        list_distributions=dict(required=False, default=False, type='bool'),
        list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'),
        list_invalidations=dict(required=False, default=False, type='bool'),
        list_streaming_distributions=dict(required=False, default=False, type='bool'),
        summary=dict(required=False, default=False, type='bool'),
    )

    module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
    is_old_facts = module._name == 'cloudfront_facts'
    if is_old_facts:
        module.deprecate("The 'cloudfront_facts' module has been renamed to 'cloudfront_info', "
                         "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')

    service_mgr = CloudFrontServiceManager(module)

    distribution_id = module.params.get('distribution_id')
    invalidation_id = module.params.get('invalidation_id')
    origin_access_identity_id = module.params.get('origin_access_identity_id')
    web_acl_id = module.params.get('web_acl_id')
    domain_name_alias = module.params.get('domain_name_alias')
    all_lists = module.params.get('all_lists')
    distribution = module.params.get('distribution')
    distribution_config = module.params.get('distribution_config')
    origin_access_identity = module.params.get('origin_access_identity')
    origin_access_identity_config = module.params.get('origin_access_identity_config')
    invalidation = module.params.get('invalidation')
    streaming_distribution = module.params.get('streaming_distribution')
    streaming_distribution_config = module.params.get('streaming_distribution_config')
    list_origin_access_identities = module.params.get('list_origin_access_identities')
    list_distributions = module.params.get('list_distributions')
    list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id')
    list_invalidations = module.params.get('list_invalidations')
    list_streaming_distributions = module.params.get('list_streaming_distributions')
    summary = module.params.get('summary')

    aliases = []
    result = {'cloudfront': {}}
    facts = {}

    require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or
                               streaming_distribution_config or list_invalidations)

    # set default to summary if no option specified
    summary = summary or not (distribution or distribution_config or origin_access_identity or
                              origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or
                              list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or
                              list_streaming_distributions or list_distributions)

    # validations
    if require_distribution_id and distribution_id is None and domain_name_alias is None:
        module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.')
    if (invalidation and invalidation_id is None):
        module.fail_json(msg='Error invalidation_id has not been specified.')
    if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None:
        module.fail_json(msg='Error origin_access_identity_id has not been specified.')
    if list_distributions_by_web_acl_id and web_acl_id is None:
        module.fail_json(msg='Error web_acl_id has not been specified.')

    # get distribution id from domain name alias
    if require_distribution_id and distribution_id is None:
        distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias)
        if not distribution_id:
            module.fail_json(msg='Error unable to source a distribution id from domain_name_alias')

    # set appropriate cloudfront id
    if distribution_id and not list_invalidations:
        facts = {distribution_id: {}}
        aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
        for alias in aliases:
            facts.update({alias: {}})
        if invalidation_id:
            facts.update({invalidation_id: {}})
    elif distribution_id and list_invalidations:
        facts = {distribution_id: {}}
        aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
        for alias in aliases:
            facts.update({alias: {}})
    elif origin_access_identity_id:
        facts = {origin_access_identity_id: {}}
    elif web_acl_id:
        facts = {web_acl_id: {}}

    # get details based on options
    if distribution:
        facts_to_set = service_mgr.get_distribution(distribution_id)
    if distribution_config:
        facts_to_set = service_mgr.get_distribution_config(distribution_id)
    if origin_access_identity:
        facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id))
    if origin_access_identity_config:
        facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id))
    if invalidation:
        facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id)
        facts[invalidation_id].update(facts_to_set)
    if streaming_distribution:
        facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
    if streaming_distribution_config:
        facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id)
    if list_invalidations:
        facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)}
    if 'facts_to_set' in vars():
        facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases)

    # get list based on options
    if all_lists or list_origin_access_identities:
        facts['origin_access_identities'] = service_mgr.list_origin_access_identities()
    if all_lists or list_distributions:
        facts['distributions'] = service_mgr.list_distributions()
    if all_lists or list_streaming_distributions:
        facts['streaming_distributions'] = service_mgr.list_streaming_distributions()
    if list_distributions_by_web_acl_id:
        facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id)
    if list_invalidations:
        facts['invalidations'] = service_mgr.list_invalidations(distribution_id)

    # default summary option
    if summary:
        facts['summary'] = service_mgr.summary()

    result['changed'] = False
    result['cloudfront'].update(facts)
    if is_old_facts:
        module.exit_json(msg="Retrieved CloudFront facts.", ansible_facts=result)
    else:
        module.exit_json(msg="Retrieved CloudFront info.", **result)
def main():
    argument_spec = dict(
        id=dict(required=False, aliases=['distribution_id']),
        type=dict(required=False, choices=['managed', 'custom']),
        list_cache_policies=dict(required=False, type=bool),
        list_cloud_front_origin_access_identities=dict(required=False,
                                                       type=bool),
        list_field_level_encryption_configs=dict(required=False, type=bool),
        list_field_level_encryption_profiles=dict(required=False, type=bool),
        list_invalidations=dict(required=False, type=bool),
        list_key_groups=dict(required=False, type=bool),
        list_origin_request_policies=dict(required=False, type=bool),
        list_public_keys=dict(required=False, type=bool),
        list_streaming_distributions=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=[
            ('list_cache_policies', True, ['type']),
            ('list_invalidations', True, ['id']),
            ('list_origin_request_policies', True, ['type']),
        ],
        mutually_exclusive=[
            ('list_cache_policies',
             'list_cloud_front_origin_access_identities',
             'list_field_level_encryption_configs',
             'list_field_level_encryption_profiles', 'list_invalidations',
             'list_key_groups', 'list_origin_request_policies',
             'list_public_keys', 'list_streaming_distributions')
        ],
    )

    client = module.client('cloudfront',
                           retry_decorator=AWSRetry.exponential_backoff())
    _it, paginate = _cloudfront(client, module)

    if module.params['list_cache_policies']:
        module.exit_json(cache_policy_list=aws_cloudfornt_parser(
            paginate, _it, 'CachePolicyList', 'Items'))
    elif module.params['list_cloud_front_origin_access_identities']:
        module.exit_json(
            cloud_front_origin_access_identity_list=aws_cloudfornt_parser(
                paginate, _it, 'CloudFrontOriginAccessIdentityList', 'Items'))
    elif module.params['list_field_level_encryption_configs']:
        module.exit_json(field_level_encryption_list=aws_cloudfornt_parser(
            paginate, _it, 'FieldLevelEncryptionList', 'Items'))
    elif module.params['list_field_level_encryption_profiles']:
        module.exit_json(
            field_level_encryption_profile_list=aws_cloudfornt_parser(
                paginate, _it, 'FieldLevelEncryptionProfileList', 'Items'))
    elif module.params['list_invalidations']:
        module.exit_json(invalidation_list=aws_cloudfornt_parser(
            paginate, _it, 'InvalidationList', 'Items'))
    elif module.params['list_key_groups']:
        module.exit_json(key_group_list=aws_cloudfornt_parser(
            paginate, _it, 'KeyGroupList', 'Items'))
    elif module.params['list_origin_request_policies']:
        module.exit_json(origin_request_policy_list=aws_cloudfornt_parser(
            paginate, _it, 'OriginRequestPolicyList', 'Items'))
    elif module.params['list_public_keys']:
        module.exit_json(public_key_list=aws_cloudfornt_parser(
            paginate, _it, 'PublicKeyList', 'Items'))
    elif module.params['list_streaming_distributions']:
        module.exit_json(streaming_distribution_list=aws_cloudfornt_parser(
            paginate, _it, 'StreamingDistributionList', 'Items'))
    else:
        module.exit_json(distribution_list=aws_cloudfornt_parser(
            paginate, _it, 'DistributionList', 'Items'))
Esempio n. 25
0
def main():
    template_options = dict(
        block_device_mappings=dict(
            type='list',
            elements='dict',
            options=dict(
                device_name=dict(),
                ebs=dict(
                    type='dict',
                    options=dict(
                        delete_on_termination=dict(type='bool'),
                        encrypted=dict(type='bool'),
                        iops=dict(type='int'),
                        kms_key_id=dict(),
                        snapshot_id=dict(),
                        volume_size=dict(type='int'),
                        volume_type=dict(),
                    ),
                ),
                no_device=dict(),
                virtual_name=dict(),
            ),
        ),
        cpu_options=dict(
            type='dict',
            options=dict(
                core_count=dict(type='int'),
                threads_per_core=dict(type='int'),
            ),
        ),
        credit_specification=dict(
            dict(type='dict'),
            options=dict(cpu_credits=dict(), ),
        ),
        disable_api_termination=dict(type='bool'),
        ebs_optimized=dict(type='bool'),
        elastic_gpu_specifications=dict(
            options=dict(type=dict()),
            type='list',
            elements='dict',
        ),
        iam_instance_profile=dict(),
        image_id=dict(),
        instance_initiated_shutdown_behavior=dict(
            choices=['stop', 'terminate']),
        instance_market_options=dict(
            type='dict',
            options=dict(
                market_type=dict(),
                spot_options=dict(
                    type='dict',
                    options=dict(
                        block_duration_minutes=dict(type='int'),
                        instance_interruption_behavior=dict(
                            choices=['hibernate', 'stop', 'terminate']),
                        max_price=dict(),
                        spot_instance_type=dict(
                            choices=['one-time', 'persistent']),
                    ),
                ),
            ),
        ),
        instance_type=dict(),
        kernel_id=dict(),
        key_name=dict(),
        monitoring=dict(
            type='dict',
            options=dict(enabled=dict(type='bool')),
        ),
        metadata_options=dict(
            type='dict',
            options=dict(http_endpoint=dict(choices=['enabled', 'disabled'],
                                            default='enabled'),
                         http_put_response_hop_limit=dict(type='int',
                                                          default=1),
                         http_tokens=dict(choices=['optional', 'required'],
                                          default='optional'))),
        network_interfaces=dict(
            type='list',
            elements='dict',
            options=dict(
                associate_public_ip_address=dict(type='bool'),
                delete_on_termination=dict(type='bool'),
                description=dict(),
                device_index=dict(type='int'),
                groups=dict(type='list', elements='str'),
                ipv6_address_count=dict(type='int'),
                ipv6_addresses=dict(type='list', elements='str'),
                network_interface_id=dict(),
                private_ip_address=dict(),
                subnet_id=dict(),
            ),
        ),
        placement=dict(
            options=dict(
                affinity=dict(),
                availability_zone=dict(),
                group_name=dict(),
                host_id=dict(),
                tenancy=dict(),
            ),
            type='dict',
        ),
        ram_disk_id=dict(),
        security_group_ids=dict(type='list', elements='str'),
        security_groups=dict(type='list', elements='str'),
        tags=dict(type='dict'),
        user_data=dict(),
    )

    arg_spec = dict(
        state=dict(choices=['present', 'absent'], default='present'),
        template_name=dict(aliases=['name']),
        template_id=dict(aliases=['id']),
        default_version=dict(default='latest'),
    )

    arg_spec.update(template_options)

    module = AnsibleAWSModule(argument_spec=arg_spec,
                              required_one_of=[('template_name', 'template_id')
                                               ],
                              supports_check_mode=True)

    if not module.boto3_at_least('1.6.0'):
        module.fail_json(msg="ec2_launch_template requires boto3 >= 1.6.0")

    for interface in (module.params.get('network_interfaces') or []):
        if interface.get('ipv6_addresses'):
            interface['ipv6_addresses'] = [{
                'ipv6_address': x
            } for x in interface['ipv6_addresses']]

    if module.params.get('state') == 'present':
        out = create_or_update(module, template_options)
        out.update(format_module_output(module))
    elif module.params.get('state') == 'absent':
        out = delete_template(module)
    else:
        module.fail_json(
            msg='Unsupported value "{0}" for `state` parameter'.format(
                module.params.get('state')))

    module.exit_json(**out)
def main():
    module = AnsibleAWSModule(
        argument_spec={
            'name':
            dict(type='str', required=True),
            'state':
            dict(type='str', choices=['present', 'absent'], default='present'),
            'account_sources':
            dict(type='list', required=True, elements='dict'),
            'organization_source':
            dict(type='dict', required=True)
        },
        supports_check_mode=False,
    )

    result = {'changed': False}

    name = module.params.get('name')
    state = module.params.get('state')

    params = {}
    if name:
        params['ConfigurationAggregatorName'] = name
    if module.params.get('account_sources'):
        params['AccountAggregationSources'] = []
        for i in module.params.get('account_sources'):
            tmp_dict = {}
            if i.get('account_ids'):
                tmp_dict['AccountIds'] = i.get('account_ids')
            if i.get('aws_regions'):
                tmp_dict['AwsRegions'] = i.get('aws_regions')
            if i.get('all_aws_regions') is not None:
                tmp_dict['AllAwsRegions'] = i.get('all_aws_regions')
            params['AccountAggregationSources'].append(tmp_dict)
    if module.params.get('organization_source'):
        params['OrganizationAggregationSource'] = {}
        if module.params.get('organization_source').get('role_arn'):
            params['OrganizationAggregationSource'].update({
                'RoleArn':
                module.params.get('organization_source').get('role_arn')
            })
        if module.params.get('organization_source').get('aws_regions'):
            params['OrganizationAggregationSource'].update({
                'AwsRegions':
                module.params.get('organization_source').get('aws_regions')
            })
        if module.params.get('organization_source').get(
                'all_aws_regions') is not None:
            params['OrganizationAggregationSourcep'].update({
                'AllAwsRegions':
                module.params.get('organization_source').get('all_aws_regions')
            })

    client = module.client('config',
                           retry_decorator=AWSRetry.jittered_backoff())

    resource_status = resource_exists(client, module, params)

    if state == 'present':
        if not resource_status:
            create_resource(client, module, params, result)
        else:
            update_resource(client, module, params, result)

    if state == 'absent':
        if resource_status:
            delete_resource(client, module, params, result)

    module.exit_json(changed=result['changed'])
Esempio n. 27
0
def main():
    argument_spec = dict(
        state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
        name=dict(default='default'),
        enable_logging=dict(default=True, type='bool'),
        s3_bucket_name=dict(),
        s3_key_prefix=dict(),
        sns_topic_name=dict(),
        is_multi_region_trail=dict(default=False, type='bool'),
        enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']),
        include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']),
        cloudwatch_logs_role_arn=dict(),
        cloudwatch_logs_log_group_arn=dict(),
        kms_key_id=dict(),
        tags=dict(default={}, type='dict'),
    )

    required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])]
    required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')]

    module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if)

    # collect parameters
    if module.params['state'] in ('present', 'enabled'):
        state = 'present'
    elif module.params['state'] in ('absent', 'disabled'):
        state = 'absent'
    tags = module.params['tags']
    enable_logging = module.params['enable_logging']
    ct_params = dict(
        Name=module.params['name'],
        S3BucketName=module.params['s3_bucket_name'],
        IncludeGlobalServiceEvents=module.params['include_global_events'],
        IsMultiRegionTrail=module.params['is_multi_region_trail'],
    )

    if module.params['s3_key_prefix']:
        ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/')

    if module.params['sns_topic_name']:
        ct_params['SnsTopicName'] = module.params['sns_topic_name']

    if module.params['cloudwatch_logs_role_arn']:
        ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn']

    if module.params['cloudwatch_logs_log_group_arn']:
        ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn']

    if module.params['enable_log_file_validation'] is not None:
        ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation']

    if module.params['kms_key_id']:
        ct_params['KmsKeyId'] = module.params['kms_key_id']

    client = module.client('cloudtrail')
    region = module.region

    results = dict(
        changed=False,
        exists=False
    )

    # Get existing trail facts
    trail = get_trail_facts(module, client, ct_params['Name'])

    # If the trail exists set the result exists variable
    if trail is not None:
        results['exists'] = True

    if state == 'absent' and results['exists']:
        # If Trail exists go ahead and delete
        results['changed'] = True
        results['exists'] = False
        results['trail'] = dict()
        if not module.check_mode:
            delete_trail(module, client, trail['TrailARN'])

    elif state == 'present' and results['exists']:
        # If Trail exists see if we need to update it
        do_update = False
        for key in ct_params:
            tkey = str(key)
            # boto3 has inconsistent parameter naming so we handle it here
            if key == 'EnableLogFileValidation':
                tkey = 'LogFileValidationEnabled'
            # We need to make an empty string equal None
            if ct_params.get(key) == '':
                val = None
            else:
                val = ct_params.get(key)
            if val != trail.get(tkey):
                do_update = True
                results['changed'] = True
                # If we are in check mode copy the changed values to the trail facts in result output to show what would change.
                if module.check_mode:
                    trail.update({tkey: ct_params.get(key)})

        if not module.check_mode and do_update:
            update_trail(module, client, ct_params)
            trail = get_trail_facts(module, client, ct_params['Name'])

        # Check if we need to start/stop logging
        if enable_logging and not trail['IsLogging']:
            results['changed'] = True
            trail['IsLogging'] = True
            if not module.check_mode:
                set_logging(module, client, name=ct_params['Name'], action='start')
        if not enable_logging and trail['IsLogging']:
            results['changed'] = True
            trail['IsLogging'] = False
            if not module.check_mode:
                set_logging(module, client, name=ct_params['Name'], action='stop')

        # Check if we need to update tags on resource
        tag_dry_run = False
        if module.check_mode:
            tag_dry_run = True
        tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run)
        if tags_changed:
            results['changed'] = True
            trail['tags'] = tags
        # Populate trail facts in output
        results['trail'] = camel_dict_to_snake_dict(trail)

    elif state == 'present' and not results['exists']:
        # Trail doesn't exist just go create it
        results['changed'] = True
        if not module.check_mode:
            # If we aren't in check_mode then actually create it
            created_trail = create_trail(module, client, ct_params)
            # Apply tags
            tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN'])
            # Get the trail status
            try:
                status_resp = client.get_trail_status(Name=created_trail['Name'])
            except (BotoCoreError, ClientError) as err:
                module.fail_json_aws(err, msg="Failed to fetch Trail statuc")
            # Set the logging state for the trail to desired value
            if enable_logging and not status_resp['IsLogging']:
                set_logging(module, client, name=ct_params['Name'], action='start')
            if not enable_logging and status_resp['IsLogging']:
                set_logging(module, client, name=ct_params['Name'], action='stop')
            # Get facts for newly created Trail
            trail = get_trail_facts(module, client, ct_params['Name'])

        # If we are in check mode create a fake return structure for the newly minted trail
        if module.check_mode:
            acct_id = '123456789012'
            try:
                sts_client = module.client('sts')
                acct_id = sts_client.get_caller_identity()['Account']
            except (BotoCoreError, ClientError):
                pass
            trail = dict()
            trail.update(ct_params)
            if 'EnableLogFileValidation' not in ct_params:
                ct_params['EnableLogFileValidation'] = False
            trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation']
            trail.pop('EnableLogFileValidation')
            fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name']
            trail['HasCustomEventSelectors'] = False
            trail['HomeRegion'] = region
            trail['TrailARN'] = fake_arn
            trail['IsLogging'] = enable_logging
            trail['tags'] = tags
        # Populate trail facts in output
        results['trail'] = camel_dict_to_snake_dict(trail)

    module.exit_json(**results)
Esempio n. 28
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent']),
        name=dict(required=True),
        description=dict(required=False),
        subnets=dict(required=False, type='list', elements='str'),
    )
    required_if = [('state', 'present', ['description', 'subnets'])]
    module = AnsibleAWSModule(
        argument_spec=argument_spec, required_if=required_if)
    state = module.params.get('state')
    group_name = module.params.get('name').lower()
    group_description = module.params.get('description')
    group_subnets = module.params.get('subnets') or []

    try:
        conn = module.client('rds')
    except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
        module.fail_json_aws(e, 'Failed to instantiate AWS connection')
    # Default.
    result = create_result(False)

    try:
        matching_groups = conn.describe_db_subnet_groups(
            DBSubnetGroupName=group_name, MaxRecords=100).get('DBSubnetGroups')
    except is_boto3_error_code('DBSubnetGroupNotFoundFault'):
        # No existing subnet, create it if needed, else we can just exit.
        if state == 'present':
            try:
                new_group = conn.create_db_subnet_group(
                    DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets)
                result = create_result(True, new_group.get('DBSubnetGroup'))
            except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
                module.fail_json_aws(e, 'Failed to create a new subnet group')
        module.exit_json(**result)
    except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:  # pylint: disable=duplicate-except
        module.fail_json_aws(e, 'Failed to get subnet groups description')
        # We have one or more subnets at this point.
    if state == 'absent':
        try:
            conn.delete_db_subnet_group(DBSubnetGroupName=group_name)
            result = create_result(True)
            module.exit_json(**result)
        except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
            module.fail_json_aws(e, 'Failed to delete a subnet group')

    # Sort the subnet groups before we compare them
    existing_subnets = create_subnet_list(matching_groups[0].get('Subnets'))
    existing_subnets.sort()
    group_subnets.sort()
    # See if anything changed.
    if (matching_groups[0].get('DBSubnetGroupName') == group_name and
        matching_groups[0].get('DBSubnetGroupDescription') == group_description and
            existing_subnets == group_subnets):
        result = create_result(False, matching_groups[0])
        module.exit_json(**result)
    # Modify existing group.
    try:
        changed_group = conn.modify_db_subnet_group(
            DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets)
        result = create_result(True, changed_group.get('DBSubnetGroup'))
        module.exit_json(**result)
    except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
        module.fail_json_aws(e, 'Failed to update a subnet group')
Esempio n. 29
0
def main():
    argument_spec = dict(
        name=dict(),
        function_arn=dict(),
        wait=dict(default=True, type='bool'),
        tail_log=dict(default=False, type='bool'),
        dry_run=dict(default=False, type='bool'),
        version_qualifier=dict(),
        payload=dict(default={}, type='dict'),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              mutually_exclusive=[
                                  ['name', 'function_arn'],
                              ])

    name = module.params.get('name')
    function_arn = module.params.get('function_arn')
    await_return = module.params.get('wait')
    dry_run = module.params.get('dry_run')
    tail_log = module.params.get('tail_log')
    version_qualifier = module.params.get('version_qualifier')
    payload = module.params.get('payload')

    if not (name or function_arn):
        module.fail_json(
            msg="Must provide either a function_arn or a name to invoke.")

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module,
                                                                  boto3=True)
    if not region:
        module.fail_json(msg="The AWS region must be specified as an "
                         "environment variable or in the AWS credentials "
                         "profile.")

    try:
        client = boto3_conn(module,
                            conn_type='client',
                            resource='lambda',
                            region=region,
                            endpoint=ec2_url,
                            **aws_connect_kwargs)
    except (botocore.exceptions.ClientError,
            botocore.exceptions.ValidationError) as e:
        module.fail_json(msg="Failure connecting boto3 to AWS: %s" %
                         to_native(e),
                         exception=traceback.format_exc())

    invoke_params = {}

    if await_return:
        # await response
        invoke_params['InvocationType'] = 'RequestResponse'
    else:
        # fire and forget
        invoke_params['InvocationType'] = 'Event'
    if dry_run or module.check_mode:
        # dry_run overrides invocation type
        invoke_params['InvocationType'] = 'DryRun'

    if tail_log and await_return:
        invoke_params['LogType'] = 'Tail'
    elif tail_log and not await_return:
        module.fail_json(msg="The `tail_log` parameter is only available if "
                         "the invocation waits for the function to complete. "
                         "Set `wait` to true or turn off `tail_log`.")
    else:
        invoke_params['LogType'] = 'None'

    if version_qualifier:
        invoke_params['Qualifier'] = version_qualifier

    if payload:
        invoke_params['Payload'] = json.dumps(payload)

    if function_arn:
        invoke_params['FunctionName'] = function_arn
    elif name:
        invoke_params['FunctionName'] = name

    try:
        response = client.invoke(**invoke_params)
    except botocore.exceptions.ClientError as ce:
        if ce.response['Error']['Code'] == 'ResourceNotFoundException':
            module.fail_json(msg="Could not find Lambda to execute. Make sure "
                             "the ARN is correct and your profile has "
                             "permissions to execute this function.",
                             exception=traceback.format_exc())
        module.fail_json(
            msg=
            "Client-side error when invoking Lambda, check inputs and specific error",
            exception=traceback.format_exc())
    except botocore.exceptions.ParamValidationError as ve:
        module.fail_json(msg="Parameters to `invoke` failed to validate",
                         exception=traceback.format_exc())
    except Exception as e:
        module.fail_json(
            msg="Unexpected failure while invoking Lambda function",
            exception=traceback.format_exc())

    results = {
        'logs': '',
        'status': response['StatusCode'],
        'output': '',
    }

    if response.get('LogResult'):
        try:
            # logs are base64 encoded in the API response
            results['logs'] = base64.b64decode(response.get('LogResult', ''))
        except Exception as e:
            module.fail_json(msg="Failed while decoding logs",
                             exception=traceback.format_exc())

    if invoke_params['InvocationType'] == 'RequestResponse':
        try:
            results['output'] = json.loads(
                response['Payload'].read().decode('utf8'))
        except Exception as e:
            module.fail_json(msg="Failed while decoding function return value",
                             exception=traceback.format_exc())

        if isinstance(results.get('output'), dict) and any([
                results['output'].get('stackTrace'),
                results['output'].get('errorMessage')
        ]):
            # AWS sends back stack traces and error messages when a function failed
            # in a RequestResponse (synchronous) context.
            template = (
                "Function executed, but there was an error in the Lambda function. "
                "Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
            error_data = {
                # format the stacktrace sent back as an array into a multiline string
                'trace':
                '\n'.join([
                    ' '.join([
                        str(x) for x in line  # cast line numbers to strings
                    ]) for line in results.get('output', {}).get(
                        'stackTrace', [])
                ]),
                'errmsg':
                results['output'].get('errorMessage'),
                'type':
                results['output'].get('errorType')
            }
            module.fail_json(msg=template.format(**error_data), result=results)

    module.exit_json(changed=True, result=results)
Esempio n. 30
0
def main():
    module = AnsibleAWSModule(
        argument_spec=dict(filters=dict(type='dict', default={})),
        supports_check_mode=True)

    module.exit_json(changed=False, addresses=get_eips_details(module))