def main():
    argument_spec = dict(
        name=dict(required=True),
        schedule_expression=dict(),
        event_pattern=dict(),
        state=dict(choices=['present', 'disabled', 'absent'],
                   default='present'),
        description=dict(),
        role_arn=dict(),
        targets=dict(type='list', default=[]),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec)

    rule_data = dict(
        [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
    )
    targets = module.params.get('targets')
    state = module.params.get('state')
    client = module.client('events')

    cwe_rule = CloudWatchEventRule(module, client=client, **rule_data)
    cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)

    if state == 'present':
        cwe_rule_manager.ensure_present()
    elif state == 'disabled':
        cwe_rule_manager.ensure_disabled()
    elif state == 'absent':
        cwe_rule_manager.ensure_absent()
    else:
        module.fail_json(msg="Invalid state '{0}' provided".format(state))

    module.exit_json(**cwe_rule_manager.fetch_aws_state())
Beispiel #2
0
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(role_arn=dict(required=True),
             role_session_name=dict(required=True),
             duration_seconds=dict(required=False, default=None, type='int'),
             external_id=dict(required=False, default=None),
             policy=dict(required=False, default=None),
             mfa_serial_number=dict(required=False, default=None),
             mfa_token=dict(required=False, default=None)))

    module = AnsibleAWSModule(argument_spec=argument_spec)

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module,
                                                                  boto3=True)

    if region:
        connection = boto3_conn(module,
                                conn_type='client',
                                resource='sts',
                                region=region,
                                endpoint=ec2_url,
                                **aws_connect_kwargs)

    else:
        module.fail_json(msg="region must be specified")

    assume_role_policy(connection, module)
def main():
    argument_spec = dict(
        name=dict(required=True),
        version=dict(),
        role_arn=dict(),
        subnets=dict(type='list'),
        security_groups=dict(type='list'),
        state=dict(choices=['absent', 'present'], default='present'),
        wait=dict(default=False, type='bool'),
        wait_timeout=dict(default=1200, type='int')
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=[['state', 'present', ['role_arn', 'subnets', 'security_groups']]],
        supports_check_mode=True,
    )

    if not module.botocore_at_least("1.10.32"):
        module.fail_json(msg='aws_eks_cluster module requires botocore >= 1.10.32')

    if (not module.botocore_at_least("1.12.38") and
            module.params.get('state') == 'absent' and
            module.params.get('wait')):
        module.fail_json(msg='aws_eks_cluster: wait=yes when state=absent requires botocore >= 1.12.38')

    client = module.client('eks')

    if module.params.get('state') == 'present':
        ensure_present(client, module)
    else:
        ensure_absent(client, module)
Beispiel #4
0
def main():
    argument_spec = dict(iam_type=dict(required=True,
                                       choices=['user', 'group', 'role']),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         iam_name=dict(required=True),
                         policy_name=dict(required=True),
                         policy_document=dict(default=None, required=False),
                         policy_json=dict(type='json',
                                          default=None,
                                          required=False),
                         skip_duplicates=dict(type='bool',
                                              default=None,
                                              required=False))
    mutually_exclusive = [['policy_document', 'policy_json']]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              mutually_exclusive=mutually_exclusive,
                              supports_check_mode=True)

    skip_duplicates = module.params.get('skip_duplicates')

    if (skip_duplicates is None):
        module.deprecate(
            'The skip_duplicates behaviour has caused confusion and'
            ' will be disabled by default in Ansible 2.14',
            version='2.14')
        skip_duplicates = True

    if module.params.get('policy_document'):
        module.deprecate(
            'The policy_document option has been deprecated and'
            ' will be removed in Ansible 2.14',
            version='2.14')

    args = dict(
        client=module.client('iam'),
        name=module.params.get('iam_name'),
        policy_name=module.params.get('policy_name'),
        policy_document=module.params.get('policy_document'),
        policy_json=module.params.get('policy_json'),
        skip_duplicates=skip_duplicates,
        state=module.params.get('state'),
        check_mode=module.check_mode,
    )
    iam_type = module.params.get('iam_type')

    try:
        if iam_type == 'user':
            policy = UserPolicy(**args)
        elif iam_type == 'role':
            policy = RolePolicy(**args)
        elif iam_type == 'group':
            policy = GroupPolicy(**args)

        module.exit_json(**(policy.run()))
    except (BotoCoreError, ClientError) as e:
        module.fail_json_aws(e)
    except PolicyError as e:
        module.fail_json(msg=str(e))
Beispiel #5
0
def main():
    argument_spec = dict(
        iam_type=dict(required=True, choices=['user', 'group', 'role']),
        iam_name=dict(required=True),
        policy_name=dict(default=None, required=False),
    )

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True)

    args = dict(
        client=module.client('iam'),
        name=module.params.get('iam_name'),
        policy_name=module.params.get('policy_name'),
    )
    iam_type = module.params.get('iam_type')

    try:
        if iam_type == 'user':
            policy = UserPolicy(**args)
        elif iam_type == 'role':
            policy = RolePolicy(**args)
        elif iam_type == 'group':
            policy = GroupPolicy(**args)

        module.exit_json(**(policy.run()))
    except (BotoCoreError, ClientError) as e:
        if e.response['Error']['Code'] == 'NoSuchEntity':
            module.exit_json(changed=False, msg=e.response['Error']['Message'])
        module.fail_json_aws(e)
    except PolicyError as e:
        module.fail_json(msg=str(e))
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(az=dict(default=None, required=False),
             cidr=dict(required=True),
             ipv6_cidr=dict(default='', required=False),
             state=dict(default='present', choices=['present', 'absent']),
             tags=dict(default={},
                       required=False,
                       type='dict',
                       aliases=['resource_tags']),
             vpc_id=dict(required=True),
             map_public=dict(default=False, required=False, type='bool'),
             assign_instances_ipv6=dict(default=False,
                                        required=False,
                                        type='bool'),
             wait=dict(type='bool', default=True),
             wait_timeout=dict(type='int', default=300, required=False),
             purge_tags=dict(default=True, type='bool')))

    required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=required_if)

    if module.params.get(
            'assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
        module.fail_json(
            msg=
            "assign_instances_ipv6 is True but ipv6_cidr is None or an empty string"
        )

    if not module.botocore_at_least("1.7.0"):
        module.warn(
            "botocore >= 1.7.0 is required to use wait_timeout for custom wait times"
        )

    region, ec2_url, aws_connect_params = get_aws_connection_info(module,
                                                                  boto3=True)
    connection = boto3_conn(module,
                            conn_type='client',
                            resource='ec2',
                            region=region,
                            endpoint=ec2_url,
                            **aws_connect_params)

    state = module.params.get('state')

    try:
        if state == 'present':
            result = ensure_subnet_present(connection, module)
        elif state == 'absent':
            result = ensure_subnet_absent(connection, module)
    except botocore.exceptions.ClientError as e:
        module.fail_json_aws(e)

    module.exit_json(**result)
Beispiel #7
0
def main():
    event_types = [
        's3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post',
        's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload',
        's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete',
        's3:ObjectRemoved:DeleteMarkerCreated', 's3:ObjectRestore:Post',
        's3:ObjectRestore:Completed', 's3:ReducedRedundancyLostObject'
    ]
    argument_spec = dict(
        state=dict(default='present', choices=['present', 'absent']),
        event_name=dict(required=True),
        lambda_function_arn=dict(aliases=['function_arn']),
        bucket_name=dict(required=True),
        events=dict(type='list', default=[], choices=event_types),
        prefix=dict(default=''),
        suffix=dict(default=''),
        lambda_alias=dict(),
        lambda_version=dict(type='int', default=0),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        mutually_exclusive=[['lambda_alias', 'lambda_version']],
        required_if=[['state', 'present', ['events']]])

    bucket = AmazonBucket(module.client('s3'), module.params['bucket_name'])
    current = bucket.current_config(module.params['event_name'])
    desired = Config.from_params(**module.params)
    notification_configuration = [cfg.raw for cfg in bucket.full_config()]

    state = module.params['state']
    try:
        if (state == 'present' and current == desired) or (state == 'absent'
                                                           and not current):
            changed = False
        elif module.check_mode:
            changed = True
        elif state == 'present':
            changed = True
            notification_configuration = bucket.apply_config(desired)
        elif state == 'absent':
            changed = True
            notification_configuration = bucket.delete_config(desired)
    except (ClientError, BotoCoreError) as e:
        module.fail_json(msg='{0}'.format(e))

    module.exit_json(**dict(changed=changed,
                            notification_configuration=[
                                camel_dict_to_snake_dict(cfg)
                                for cfg in notification_configuration
                            ]))
def run_module():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            cluster_name=dict(type='str', required=True, aliases=['cluster']),
            state=dict(type='str', choices=['present', 'absent'], default='present'),
            region=dict(type='str', required=True, aliases=['source']),
            destination_region=dict(type='str', required=True, aliases=['destination']),
            snapshot_copy_grant=dict(type='str', aliases=['copy_grant']),
            snapshot_retention_period=dict(type='int', required=True, aliases=['retention_period']),
        )
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True
    )

    result = dict(
        changed=False,
        message=''
    )
    connection = module.client('redshift')

    snapshot_controller = SnapshotController(client=connection,
                                             cluster_name=module.params.get('cluster_name'))

    current_config = snapshot_controller.get_cluster_snapshot_copy_status()
    if current_config is not None:
        if module.params.get('state') == 'present':
            if requesting_unsupported_modifications(current_config, module.params):
                message = 'Cannot modify destination_region or grant_name. ' \
                          'Please disable cross-region snapshots, and re-run.'
                module.fail_json(msg=message, **result)
            if needs_update(current_config, module.params):
                result['changed'] = True
                if not module.check_mode:
                    snapshot_controller.modify_snapshot_copy_retention_period(
                        module.params.get('snapshot_retention_period')
                    )
        else:
            result['changed'] = True
            if not module.check_mode:
                snapshot_controller.disable_snapshot_copy()
    else:
        if module.params.get('state') == 'present':
            result['changed'] = True
            if not module.check_mode:
                snapshot_controller.enable_snapshot_copy(module.params.get('destination_region'),
                                                         module.params.get('snapshot_copy_grant'),
                                                         module.params.get('snapshot_retention_period'))
    module.exit_json(**result)
Beispiel #9
0
def main():
    argument_spec = dict(instance_id=dict(),
                         image_id=dict(),
                         architecture=dict(default='x86_64'),
                         kernel_id=dict(),
                         virtualization_type=dict(default='hvm'),
                         root_device_name=dict(),
                         delete_snapshot=dict(default=False, type='bool'),
                         name=dict(),
                         wait=dict(type='bool', default=False),
                         wait_timeout=dict(default=900, type='int'),
                         description=dict(default=''),
                         no_reboot=dict(default=False, type='bool'),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         device_mapping=dict(type='list'),
                         tags=dict(type='dict'),
                         launch_permissions=dict(type='dict'),
                         image_location=dict(),
                         enhanced_networking=dict(type='bool'),
                         billing_products=dict(type='list'),
                         ramdisk_id=dict(),
                         sriov_net_support=dict(),
                         purge_tags=dict(type='bool', default=False))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=[
                                  ['state', 'absent', ['image_id']],
                              ])

    # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by
    # the required_if for state=absent, so check manually instead
    if not any([module.params['image_id'], module.params['name']]):
        module.fail_json(
            msg="one of the following is required: name, image_id")

    connection = module.client('ec2')

    if module.params.get('state') == 'absent':
        deregister_image(module, connection)
    elif module.params.get('state') == 'present':
        if module.params.get('image_id'):
            update_image(module, connection, module.params.get('image_id'))
        if not module.params.get('instance_id') and not module.params.get(
                'device_mapping'):
            module.fail_json(
                msg=
                "The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image."
            )
        create_image(module, connection)
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(name=dict(required=True, type='str'),
             state=dict(default='present', choices=['present', 'absent']),
             strategy=dict(default='cluster', choices=['cluster', 'spread'])))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True)

    region, ec2_url, aws_connect_params = get_aws_connection_info(module,
                                                                  boto3=True)

    connection = boto3_conn(module,
                            resource='ec2',
                            conn_type='client',
                            region=region,
                            endpoint=ec2_url,
                            **aws_connect_params)

    state = module.params.get("state")

    if state == 'present':
        placement_group = get_placement_group_details(connection, module)
        if placement_group is None:
            create_placement_group(connection, module)
        else:
            strategy = module.params.get("strategy")
            if placement_group['strategy'] == strategy:
                module.exit_json(changed=False,
                                 placement_group=placement_group)
            else:
                name = module.params.get("name")
                module.fail_json(
                    msg=("Placement group '{}' exists, can't change strategy" +
                         " from '{}' to '{}'"
                         ).format(name, placement_group['strategy'], strategy))

    elif state == 'absent':
        placement_group = get_placement_group_details(connection, module)
        if placement_group is None:
            module.exit_json(changed=False)
        else:
            delete_placement_group(connection, module)
Beispiel #11
0
def main():
    argument_spec = dict(gather_local_disks=dict(type='bool', default=True),
                         gather_tapes=dict(type='bool', default=True),
                         gather_file_shares=dict(type='bool', default=True),
                         gather_volumes=dict(type='bool', default=True))

    module = AnsibleAWSModule(argument_spec=argument_spec)
    if module._name == 'aws_sgw_facts':
        module.deprecate(
            "The 'aws_sgw_facts' module has been renamed to 'aws_sgw_info'",
            version='2.13')
    client = module.client('storagegateway')

    if client is None:  # this should never happen
        module.fail_json(
            msg=
            'Unknown error, failed to create storagegateway client, no information from boto.'
        )

    SGWInformationManager(client, module).fetch()
Beispiel #12
0
def main():
    argument_spec = dict(
        state=dict(type='str',
                   default='present',
                   choices=['present', 'absent']),
        filters=dict(type='dict', default={}),
        vpn_gateway_id=dict(type='str'),
        tags=dict(default={}, type='dict'),
        connection_type=dict(default='ipsec.1', type='str'),
        tunnel_options=dict(no_log=True, type='list', default=[]),
        static_only=dict(default=False, type='bool'),
        customer_gateway_id=dict(type='str'),
        vpn_connection_id=dict(type='str'),
        purge_tags=dict(type='bool', default=False),
        routes=dict(type='list', default=[]),
        purge_routes=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=600),
        delay=dict(type='int', default=15),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True)
    connection = module.client('ec2')

    state = module.params.get('state')
    parameters = dict(module.params)

    try:
        if state == 'present':
            changed, response = ensure_present(connection, parameters,
                                               module.check_mode)
        elif state == 'absent':
            changed, response = ensure_absent(connection, parameters,
                                              module.check_mode)
    except VPNConnectionException as e:
        if e.exception:
            module.fail_json_aws(e.exception, msg=e.msg)
        else:
            module.fail_json(msg=e.msg)

    module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main():
    argument_spec = dict(state=dict(required=True,
                                    choices=['present', 'absent']),
                         id_to_associate=dict(required=True,
                                              aliases=[
                                                  'link_aggregation_group_id',
                                                  'connection_id'
                                              ]),
                         public=dict(type='bool'),
                         name=dict(),
                         vlan=dict(type='int', default=100),
                         bgp_asn=dict(type='int', default=65000),
                         authentication_key=dict(),
                         amazon_address=dict(),
                         customer_address=dict(),
                         address_type=dict(),
                         cidr=dict(type='list'),
                         virtual_gateway_id=dict(),
                         virtual_interface_id=dict())

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_one_of=[['virtual_interface_id', 'name']],
        required_if=[['state', 'present', ['public']],
                     ['public', False, ['virtual_gateway_id']],
                     ['public', True, ['amazon_address']],
                     ['public', True, ['customer_address']],
                     ['public', True, ['cidr']]])

    connection = module.client('directconnect')

    try:
        changed, latest_state = ensure_state(connection, module)
    except DirectConnectError as e:
        if e.exception:
            module.fail_json_aws(exception=e.exception, msg=e.msg)
        else:
            module.fail_json(msg=e.msg)

    module.exit_json(changed=changed, **camel_dict_to_snake_dict(latest_state))
def main():
    argument_spec = dict(name=dict(required=False),
                         waf_regional=dict(type='bool', default=False))
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True)
    if module._name == 'aws_waf_facts':
        module.deprecate(
            "The 'aws_waf_facts' module has been renamed to 'aws_waf_info'",
            version='2.13')

    resource = 'waf' if not module.params['waf_regional'] else 'waf-regional'
    client = module.client(resource)
    web_acls = list_web_acls(client, module)
    name = module.params['name']
    if name:
        web_acls = [web_acl for web_acl in web_acls if web_acl['Name'] == name]
        if not web_acls:
            module.fail_json(msg="WAF named %s not found" % name)
    module.exit_json(wafs=[
        get_web_acl(client, module, web_acl['WebACLId'])
        for web_acl in web_acls
    ])
def main():
    argument_spec = dict(
        certificate_arn=dict(aliases=['arn']),
        domain_name=dict(aliases=['name']),
        statuses=dict(type='list',
                      choices=[
                          'PENDING_VALIDATION', 'ISSUED', 'INACTIVE',
                          'EXPIRED', 'VALIDATION_TIMED_OUT', 'REVOKED',
                          'FAILED'
                      ]),
        tags=dict(type='dict'),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True)
    acm_info = ACMServiceManager(module)

    if module._name == 'aws_acm_facts':
        module.deprecate(
            "The 'aws_acm_facts' module has been renamed to 'aws_acm_info'",
            version='2.13')

    client = module.client('acm')

    certificates = acm_info.get_certificates(
        client,
        module,
        domain_name=module.params['domain_name'],
        statuses=module.params['statuses'],
        arn=module.params['certificate_arn'],
        only_tags=module.params['tags'])

    if module.params['certificate_arn'] and len(certificates) != 1:
        module.fail_json(
            msg="No certificate exists in this region with ARN %s" %
            module.params['certificate_arn'])

    module.exit_json(certificates=certificates)
Beispiel #16
0
def main():
    module = AnsibleAWSModule(
        argument_spec={
            "identity": dict(required=True, type='str'),
            "state": dict(default='present', choices=['present', 'absent']),
            "bounce_notifications": dict(type='dict'),
            "complaint_notifications": dict(type='dict'),
            "delivery_notifications": dict(type='dict'),
            "feedback_forwarding": dict(default=True, type='bool'),
        },
        supports_check_mode=True,
    )

    for notification_type in ('bounce', 'complaint', 'delivery'):
        param_name = notification_type + '_notifications'
        arg_dict = module.params.get(param_name)
        if arg_dict:
            extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
            if extra_keys:
                module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')

    # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
    # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
    # the ansible build runs multiple instances of the test in parallel that's caused throttling
    # failures so apply a jittered backoff to call SES calls.
    connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff())

    state = module.params.get("state")

    if state == 'present':
        region = get_aws_connection_info(module, boto3=True)[0]
        account_id = get_account_id(module)
        validate_params_for_identity_present(module)
        create_or_update_identity(connection, module, region, account_id)
    else:
        destroy_identity(connection, module)
Beispiel #17
0
def main():
    argument_spec = dict(state=dict(required=True,
                                    choices=['present', 'absent']),
                         name=dict(),
                         location=dict(),
                         bandwidth=dict(choices=['1Gbps', '10Gbps']),
                         link_aggregation_group=dict(),
                         connection_id=dict(),
                         forced_update=dict(type='bool', default=False))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_one_of=[('connection_id', 'name')],
                              required_if=[('state', 'present',
                                            ('location', 'bandwidth'))])

    connection = module.client('directconnect')

    state = module.params.get('state')
    try:
        connection_id = connection_exists(
            connection,
            connection_id=module.params.get('connection_id'),
            connection_name=module.params.get('name'))
        if not connection_id and module.params.get('connection_id'):
            module.fail_json(
                msg="The Direct Connect connection {0} does not exist.".format(
                    module.params.get('connection_id')))

        if state == 'present':
            changed, connection_id = ensure_present(
                connection,
                connection_id=connection_id,
                connection_name=module.params.get('name'),
                location=module.params.get('location'),
                bandwidth=module.params.get('bandwidth'),
                lag_id=module.params.get('link_aggregation_group'),
                forced_update=module.params.get('forced_update'))
            response = connection_status(connection, connection_id)
        elif state == 'absent':
            changed = ensure_absent(connection, connection_id)
            response = {}
    except DirectConnectError as e:
        if e.last_traceback:
            module.fail_json(msg=e.msg,
                             exception=e.last_traceback,
                             **camel_dict_to_snake_dict(e.exception.response))
        else:
            module.fail_json(msg=e.msg)

    module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(state=dict(required=True,
                        choices=['present', 'absent', 'deleting']),
             name=dict(required=True, type='str'),
             cluster=dict(required=False, type='str'),
             task_definition=dict(required=False, type='str'),
             load_balancers=dict(required=False, default=[], type='list'),
             desired_count=dict(required=False, type='int'),
             client_token=dict(required=False, default='', type='str'),
             role=dict(required=False, default='', type='str'),
             delay=dict(required=False, type='int', default=10),
             repeat=dict(required=False, type='int', default=10),
             force_new_deployment=dict(required=False,
                                       default=False,
                                       type='bool'),
             deployment_configuration=dict(required=False,
                                           default={},
                                           type='dict'),
             placement_constraints=dict(required=False,
                                        default=[],
                                        type='list'),
             placement_strategy=dict(required=False, default=[], type='list'),
             health_check_grace_period_seconds=dict(required=False,
                                                    type='int'),
             network_configuration=dict(
                 required=False,
                 type='dict',
                 options=dict(subnets=dict(type='list'),
                              security_groups=dict(type='list'),
                              assign_public_ip=dict(type='bool'))),
             launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
             service_registries=dict(required=False, type='list', default=[]),
             scheduling_strategy=dict(required=False,
                                      choices=['DAEMON', 'REPLICA'])))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('state', 'present',
                                            ['task_definition']),
                                           ('launch_type', 'FARGATE',
                                            ['network_configuration'])],
                              required_together=[['load_balancers', 'role']])

    if module.params['state'] == 'present' and module.params[
            'scheduling_strategy'] == 'REPLICA':
        if module.params['desired_count'] is None:
            module.fail_json(
                msg=
                'state is present, scheduling_strategy is REPLICA; missing desired_count'
            )

    service_mgr = EcsServiceManager(module)
    if module.params['network_configuration']:
        if not service_mgr.ecs_api_handles_network_configuration():
            module.fail_json(
                msg=
                'botocore needs to be version 1.7.44 or higher to use network configuration'
            )
        network_configuration = service_mgr.format_network_configuration(
            module.params['network_configuration'])
    else:
        network_configuration = None

    deployment_configuration = map_complex_type(
        module.params['deployment_configuration'],
        DEPLOYMENT_CONFIGURATION_TYPE_MAP)

    deploymentConfiguration = snake_dict_to_camel_dict(
        deployment_configuration)
    serviceRegistries = list(
        map(snake_dict_to_camel_dict, module.params['service_registries']))

    try:
        existing = service_mgr.describe_service(module.params['cluster'],
                                                module.params['name'])
    except Exception as e:
        module.fail_json(msg="Exception describing service '" +
                         module.params['name'] + "' in cluster '" +
                         module.params['cluster'] + "': " + str(e))

    results = dict(changed=False)

    if module.params['launch_type']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.4 or higher to use launch_type'
            )
    if module.params['force_new_deployment']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.4 or higher to use force_new_deployment'
            )
    if module.params['health_check_grace_period_seconds']:
        if not module.botocore_at_least('1.8.20'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds'
            )

    if module.params['state'] == 'present':

        matching = False
        update = False

        if existing and 'status' in existing and existing['status'] == "ACTIVE":
            if module.params['force_new_deployment']:
                update = True
            elif service_mgr.is_matching_service(module.params, existing):
                matching = True
                results['service'] = existing
            else:
                update = True

        if not matching:
            if not module.check_mode:

                role = module.params['role']
                clientToken = module.params['client_token']

                loadBalancers = []
                for loadBalancer in module.params['load_balancers']:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])
                    loadBalancers.append(loadBalancer)

                for loadBalancer in loadBalancers:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])

                if update:
                    # check various parameters and boto versions and give a helpful error in boto is not new enough for feature

                    if module.params['scheduling_strategy']:
                        if not module.botocore_at_least('1.10.37'):
                            module.fail_json(
                                msg=
                                'botocore needs to be version 1.10.37 or higher to use scheduling_strategy'
                            )
                        elif (existing['schedulingStrategy']
                              ) != module.params['scheduling_strategy']:
                            module.fail_json(
                                msg=
                                "It is not possible to update the scheduling strategy of an existing service"
                            )

                    if module.params['service_registries']:
                        if not module.botocore_at_least('1.9.15'):
                            module.fail_json(
                                msg=
                                'botocore needs to be version 1.9.15 or higher to use service_registries'
                            )
                        elif (existing['serviceRegistries']
                              or []) != serviceRegistries:
                            module.fail_json(
                                msg=
                                "It is not possible to update the service registries of an existing service"
                            )

                    if (existing['loadBalancers'] or []) != loadBalancers:
                        module.fail_json(
                            msg=
                            "It is not possible to update the load balancers of an existing service"
                        )

                    # update required
                    response = service_mgr.update_service(
                        module.params['name'], module.params['cluster'],
                        module.params['task_definition'],
                        module.params['desired_count'],
                        deploymentConfiguration, network_configuration,
                        module.params['health_check_grace_period_seconds'],
                        module.params['force_new_deployment'])

                else:
                    try:
                        response = service_mgr.create_service(
                            module.params['name'], module.params['cluster'],
                            module.params['task_definition'], loadBalancers,
                            module.params['desired_count'], clientToken, role,
                            deploymentConfiguration,
                            module.params['placement_constraints'],
                            module.params['placement_strategy'],
                            module.params['health_check_grace_period_seconds'],
                            network_configuration, serviceRegistries,
                            module.params['launch_type'],
                            module.params['scheduling_strategy'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't create service")

                results['service'] = response

            results['changed'] = True

    elif module.params['state'] == 'absent':
        if not existing:
            pass
        else:
            # it exists, so we should delete it and mark changed.
            # return info about the cluster deleted
            del existing['deployments']
            del existing['events']
            results['ansible_facts'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    try:
                        service_mgr.delete_service(module.params['name'],
                                                   module.params['cluster'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't delete service")
                results['changed'] = True

    elif module.params['state'] == 'deleting':
        if not existing:
            module.fail_json(msg="Service '" + module.params['name'] +
                             " not found.")
            return
        # it exists, so we should delete it and mark changed.
        # return info about the cluster deleted
        delay = module.params['delay']
        repeat = module.params['repeat']
        time.sleep(delay)
        for i in range(repeat):
            existing = service_mgr.describe_service(module.params['cluster'],
                                                    module.params['name'])
            status = existing['status']
            if status == "INACTIVE":
                results['changed'] = True
                break
            time.sleep(delay)
        if i is repeat - 1:
            module.fail_json(msg="Service still not deleted after " +
                             str(repeat) + " tries of " + str(delay) +
                             " seconds each.")
            return

    module.exit_json(**results)
def main():

    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(force=dict(default=False, type='bool'),
             policy=dict(type='json'),
             name=dict(required=True),
             requester_pays=dict(default=False, type='bool'),
             s3_url=dict(aliases=['S3_URL']),
             state=dict(default='present', choices=['present', 'absent']),
             tags=dict(type='dict'),
             purge_tags=dict(type='bool', default=True),
             versioning=dict(type='bool'),
             ceph=dict(default=False, type='bool'),
             encryption=dict(choices=['none', 'AES256', 'aws:kms']),
             encryption_key_id=dict()))

    module = AnsibleAWSModule(argument_spec=argument_spec, )

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module,
                                                                  boto3=True)

    if region in ('us-east-1', '', None):
        # default to US Standard region
        location = 'us-east-1'
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    s3_url = module.params.get('s3_url')
    ceph = module.params.get('ceph')

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    if ceph and not s3_url:
        module.fail_json(msg='ceph flavour requires s3_url')

    # Look at s3_url and tweak connection settings
    # if connecting to Ceph RGW, Walrus or fakes3
    if s3_url:
        for key in ['validate_certs', 'security_token', 'profile_name']:
            aws_connect_kwargs.pop(key, None)
    s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph,
                              s3_url)

    if s3_client is None:  # this should never happen
        module.fail_json(
            msg=
            'Unknown error, failed to create s3 connection, no information from boto.'
        )

    state = module.params.get("state")
    encryption = module.params.get("encryption")
    encryption_key_id = module.params.get("encryption_key_id")

    # Parameter validation
    if encryption_key_id is not None and encryption is None:
        module.fail_json(
            msg=
            "You must specify encryption parameter along with encryption_key_id."
        )
    elif encryption_key_id is not None and encryption != 'aws:kms':
        module.fail_json(
            msg=
            "Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id."
        )

    if state == 'present':
        create_or_update_bucket(s3_client, module, location)
    elif state == 'absent':
        destroy_bucket(s3_client, module)
Beispiel #20
0
def main():

    argument_spec = dict(
        name=dict(type='str', required=True),
        path=dict(type='str', default="/"),
        assume_role_policy_document=dict(type='json'),
        managed_policies=dict(type='list', aliases=['managed_policy']),
        max_session_duration=dict(type='int'),
        state=dict(type='str',
                   choices=['present', 'absent'],
                   default='present'),
        description=dict(type='str'),
        boundary=dict(type='str', aliases=['boundary_policy_arn']),
        create_instance_profile=dict(type='bool', default=True),
        delete_instance_profile=dict(type='bool', default=False),
        purge_policies=dict(type='bool',
                            aliases=['purge_policy',
                                     'purge_managed_policies']),
        tags=dict(type='dict'),
        purge_tags=dict(type='bool', default=True),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=[('state', 'present',
                                            ['assume_role_policy_document'])],
                              supports_check_mode=True)

    if module.params.get('purge_policies') is None:
        module.deprecate(
            'In Ansible 2.14 the default value of purge_policies will change from true to false.'
            '  To maintain the existing behaviour explicity set purge_policies=true',
            version='2.14')

    if module.params.get('boundary'):
        if module.params.get('create_instance_profile'):
            module.fail_json(
                msg=
                "When using a boundary policy, `create_instance_profile` must be set to `false`."
            )
        if not module.params.get('boundary').startswith('arn:aws:iam'):
            module.fail_json(msg="Boundary policy must be an ARN")
    if module.params.get(
            'tags') is not None and not module.botocore_at_least('1.12.46'):
        module.fail_json(
            msg="When managing tags botocore must be at least v1.12.46. "
            "Current versions: boto3-{boto3_version} botocore-{botocore_version}"
            .format(**module._gather_versions()))
    if module.params.get(
            'boundary'
    ) is not None and not module.botocore_at_least('1.10.57'):
        module.fail_json(
            msg=
            "When using a boundary policy, botocore must be at least v1.10.57. "
            "Current versions: boto3-{boto3_version} botocore-{botocore_version}"
            .format(**module._gather_versions()))
    if module.params.get('max_session_duration'):
        max_session_duration = module.params.get('max_session_duration')
        if max_session_duration < 3600 or max_session_duration > 43200:
            module.fail_json(
                msg=
                "max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)"
            )
    if module.params.get('path'):
        path = module.params.get('path')
        if not path.endswith('/') or not path.startswith('/'):
            module.fail_json(msg="path must begin and end with /")

    connection = module.client('iam')

    state = module.params.get("state")

    if state == 'present':
        create_or_update_role(connection, module)
    else:
        destroy_role(connection, module)
Beispiel #21
0
def main():
    argument_spec = dict(
        name=dict(required=True),
        description=dict(),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=900),
        state=dict(default='present', choices=['present', 'absent']),
        purge_stacks=dict(type='bool', default=True),
        parameters=dict(type='dict', default={}),
        template=dict(type='path'),
        template_url=dict(),
        template_body=dict(),
        capabilities=dict(type='list',
                          choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
        regions=dict(type='list'),
        accounts=dict(type='list'),
        failure_tolerance=dict(
            type='dict',
            default={},
            options=dict(
                fail_count=dict(type='int'),
                fail_percentage=dict(type='int'),
                parallel_percentage=dict(type='int'),
                parallel_count=dict(type='int'),
            ),
            mutually_exclusive=[
                ['fail_count', 'fail_percentage'],
                ['parallel_count', 'parallel_percentage'],
            ],
        ),
        administration_role_arn=dict(
            aliases=['admin_role_arn', 'administration_role', 'admin_role']),
        execution_role_name=dict(
            aliases=['execution_role', 'exec_role', 'exec_role_name']),
        tags=dict(type='dict'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_url', 'template', 'template_body']],
        supports_check_mode=True)
    if not (module.boto3_at_least('1.6.0')
            and module.botocore_at_least('1.10.26')):
        module.fail_json(
            msg=
            "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26"
        )

    # Wrap the cloudformation client methods that this module uses with
    # automatic backoff / retry for throttling error codes
    cfn = module.client('cloudformation',
                        retry_decorator=AWSRetry.jittered_backoff(
                            retries=10, delay=3, max_delay=30))
    existing_stack_set = stack_set_facts(cfn, module.params['name'])

    operation_uuid = to_native(uuid.uuid4())
    operation_ids = []
    # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
    stack_params = {}
    state = module.params['state']
    if state == 'present' and not module.params['accounts']:
        module.fail_json(
            msg=
            "Can't create a stack set without choosing at least one account. "
            "To get the ID of the current account, use the aws_caller_info module."
        )

    module.params['accounts'] = [
        to_native(a) for a in module.params['accounts']
    ]

    stack_params['StackSetName'] = module.params['name']
    if module.params.get('description'):
        stack_params['Description'] = module.params['description']

    if module.params.get('capabilities'):
        stack_params['Capabilities'] = module.params['capabilities']

    if module.params['template'] is not None:
        with open(module.params['template'], 'r') as tpl:
            stack_params['TemplateBody'] = tpl.read()
    elif module.params['template_body'] is not None:
        stack_params['TemplateBody'] = module.params['template_body']
    elif module.params['template_url'] is not None:
        stack_params['TemplateURL'] = module.params['template_url']
    else:
        # no template is provided, but if the stack set exists already, we can use the existing one.
        if existing_stack_set:
            stack_params['UsePreviousTemplate'] = True
        else:
            module.fail_json(
                msg=
                "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
                "`template_body`, or `template_url`".format(
                    module.params['name']))

    stack_params['Parameters'] = []
    for k, v in module.params['parameters'].items():
        if isinstance(v, dict):
            # set parameter based on a dict to allow additional CFN Parameter Attributes
            param = dict(ParameterKey=k)

            if 'value' in v:
                param['ParameterValue'] = to_native(v['value'])

            if 'use_previous_value' in v and bool(v['use_previous_value']):
                param['UsePreviousValue'] = True
                param.pop('ParameterValue', None)

            stack_params['Parameters'].append(param)
        else:
            # allow default k/v configuration to set a template parameter
            stack_params['Parameters'].append({
                'ParameterKey': k,
                'ParameterValue': str(v)
            })

    if module.params.get('tags') and isinstance(module.params.get('tags'),
                                                dict):
        stack_params['Tags'] = ansible_dict_to_boto3_tag_list(
            module.params['tags'])

    if module.params.get('administration_role_arn'):
        # TODO loosen the semantics here to autodetect the account ID and build the ARN
        stack_params['AdministrationRoleARN'] = module.params[
            'administration_role_arn']
    if module.params.get('execution_role_name'):
        stack_params['ExecutionRoleName'] = module.params[
            'execution_role_name']

    result = {}

    if module.check_mode:
        if state == 'absent' and existing_stack_set:
            module.exit_json(changed=True,
                             msg='Stack set would be deleted',
                             meta=[])
        elif state == 'absent' and not existing_stack_set:
            module.exit_json(changed=False,
                             msg='Stack set doesn\'t exist',
                             meta=[])
        elif state == 'present' and not existing_stack_set:
            module.exit_json(changed=True,
                             msg='New stack set would be created',
                             meta=[])
        elif state == 'present' and existing_stack_set:
            new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
                cfn,
                module.params['name'],
                module.params['accounts'],
                module.params['regions'],
            )
            if new_stacks:
                module.exit_json(changed=True,
                                 msg='New stack instance(s) would be created',
                                 meta=[])
            elif unspecified_stacks and module.params.get(
                    'purge_stack_instances'):
                module.exit_json(changed=True,
                                 msg='Old stack instance(s) would be deleted',
                                 meta=[])
        else:
            # TODO: need to check the template and other settings for correct check mode
            module.exit_json(changed=False, msg='No changes detected', meta=[])

    changed = False
    if state == 'present':
        if not existing_stack_set:
            # on create this parameter has a different name, and cannot be referenced later in the job log
            stack_params[
                'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(
                    operation_uuid)
            changed = True
            create_stack_set(module, stack_params, cfn)
        else:
            stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(
                operation_uuid)
            operation_ids.append(stack_params['OperationId'])
            if module.params.get('regions'):
                stack_params[
                    'OperationPreferences'] = get_operation_preferences(module)
            changed |= update_stack_set(module, stack_params, cfn)

        # now create/update any appropriate stack instances
        new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
            cfn,
            module.params['name'],
            module.params['accounts'],
            module.params['regions'],
        )
        if new_stack_instances:
            operation_ids.append(
                'Ansible-StackInstance-Create-{0}'.format(operation_uuid))
            changed = True
            cfn.create_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(set(acct
                                  for acct, region in new_stack_instances)),
                Regions=list(
                    set(region for acct, region in new_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        else:
            operation_ids.append(
                'Ansible-StackInstance-Update-{0}'.format(operation_uuid))
            cfn.update_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(
                    set(acct for acct, region in existing_stack_instances)),
                Regions=list(
                    set(region for acct, region in existing_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        for op in operation_ids:
            await_stack_set_operation(
                module,
                cfn,
                operation_id=op,
                stack_set_name=module.params['name'],
                max_wait=module.params.get('wait_timeout'),
            )

    elif state == 'absent':
        if not existing_stack_set:
            module.exit_json(msg='Stack set {0} does not exist'.format(
                module.params['name']))
        if module.params.get('purge_stack_instances') is False:
            pass
        try:
            cfn.delete_stack_set(StackSetName=module.params['name'], )
            module.exit_json(
                msg='Stack set {0} deleted'.format(module.params['name']))
        except is_boto3_error_code('OperationInProgressException') as e:  # pylint: disable=duplicate-except
            module.fail_json_aws(
                e,
                msg=
                'Cannot delete stack {0} while there is an operation in progress'
                .format(module.params['name']))
        except is_boto3_error_code('StackSetNotEmptyException'):  # pylint: disable=duplicate-except
            delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(
                operation_uuid)
            cfn.delete_stack_instances(
                StackSetName=module.params['name'],
                Accounts=module.params['accounts'],
                Regions=module.params['regions'],
                RetainStacks=(not module.params.get('purge_stacks')),
                OperationId=delete_instances_op)
            await_stack_set_operation(
                module,
                cfn,
                operation_id=delete_instances_op,
                stack_set_name=stack_params['StackSetName'],
                max_wait=module.params.get('wait_timeout'),
            )
            try:
                cfn.delete_stack_set(StackSetName=module.params['name'], )
            except is_boto3_error_code('StackSetNotEmptyException') as exc:  # pylint: disable=duplicate-except
                # this time, it is likely that either the delete failed or there are more stacks.
                instances = cfn.list_stack_instances(
                    StackSetName=module.params['name'], )
                stack_states = ', '.join(
                    '(account={Account}, region={Region}, state={Status})'.
                    format(**i) for i in instances['Summaries'])
                module.fail_json_aws(
                    exc,
                    msg=
                    'Could not purge all stacks, or not all accounts/regions were chosen for deletion: '
                    + stack_states)
            module.exit_json(changed=True,
                             msg='Stack set {0} deleted'.format(
                                 module.params['name']))

    result.update(**describe_stack_tree(
        module, stack_params['StackSetName'], operation_ids=operation_ids))
    if any(o['status'] == 'FAILED' for o in result['operations']):
        module.fail_json(msg="One or more operations failed to execute",
                         **result)
    module.exit_json(changed=changed, **result)
def main():

    argument_spec = (
        dict(
            cross_zone_load_balancing=dict(type='bool'),
            deletion_protection=dict(type='bool'),
            listeners=dict(type='list',
                           elements='dict',
                           options=dict(
                               Protocol=dict(type='str', required=True),
                               Port=dict(type='int', required=True),
                               SslPolicy=dict(type='str'),
                               Certificates=dict(type='list'),
                               DefaultActions=dict(type='list', required=True)
                           )
                           ),
            name=dict(required=True, type='str'),
            purge_listeners=dict(default=True, type='bool'),
            purge_tags=dict(default=True, type='bool'),
            subnets=dict(type='list'),
            subnet_mappings=dict(type='list'),
            scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']),
            state=dict(choices=['present', 'absent'], type='str'),
            tags=dict(type='dict'),
            wait_timeout=dict(type='int'),
            wait=dict(type='bool')
        )
    )

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              mutually_exclusive=[['subnets', 'subnet_mappings']])

    # Check for subnets or subnet_mappings if state is present
    state = module.params.get("state")
    if state == 'present':
        if module.params.get("subnets") is None and module.params.get("subnet_mappings") is None:
            module.fail_json(msg="'subnets' or 'subnet_mappings' is required when state=present")

    if state is None:
        # See below, unless state==present we delete.  Ouch.
        module.deprecate('State currently defaults to absent.  This is inconsistent with other modules'
                         ' and the default will be changed to `present` in Ansible 2.14',
                         version='2.14')

    # Quick check of listeners parameters
    listeners = module.params.get("listeners")
    if listeners is not None:
        for listener in listeners:
            for key in listener.keys():
                protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP']
                if key == 'Protocol' and listener[key] not in protocols_list:
                    module.fail_json(msg="'Protocol' must be either " + ", ".join(protocols_list))

    connection = module.client('elbv2')
    connection_ec2 = module.client('ec2')

    elb = NetworkLoadBalancer(connection, connection_ec2, module)

    if state == 'present':
        create_or_update_elb(elb)
    else:
        delete_elb(elb)
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
        device_id=dict(required=False, aliases=['instance_id']),
        public_ip=dict(required=False, aliases=['ip']),
        state=dict(required=False, default='present',
                   choices=['present', 'absent']),
        in_vpc=dict(required=False, type='bool', default=False),
        reuse_existing_ip_allowed=dict(required=False, type='bool',
                                       default=False),
        release_on_disassociation=dict(required=False, type='bool', default=False),
        allow_reassociation=dict(type='bool', default=False),
        wait_timeout=dict(type='int', removed_in_version='2.14'),
        private_ip_address=dict(),
        tag_name=dict(),
        tag_value=dict(),
        public_ipv4_pool=dict()
    ))

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_by={
            'private_ip_address': ['device_id'],
        },
    )

    ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())

    device_id = module.params.get('device_id')
    instance_id = module.params.get('instance_id')
    public_ip = module.params.get('public_ip')
    private_ip_address = module.params.get('private_ip_address')
    state = module.params.get('state')
    in_vpc = module.params.get('in_vpc')
    domain = 'vpc' if in_vpc else None
    reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
    release_on_disassociation = module.params.get('release_on_disassociation')
    allow_reassociation = module.params.get('allow_reassociation')
    tag_name = module.params.get('tag_name')
    tag_value = module.params.get('tag_value')
    public_ipv4_pool = module.params.get('public_ipv4_pool')

    if instance_id:
        warnings = ["instance_id is no longer used, please use device_id going forward"]
        is_instance = True
        device_id = instance_id
    else:
        if device_id and device_id.startswith('i-'):
            is_instance = True
        elif device_id:
            if device_id.startswith('eni-') and not in_vpc:
                module.fail_json(msg="If you are specifying an ENI, in_vpc must be true")
            is_instance = False

    tag_dict = generate_tag_dict(module, tag_name, tag_value)

    try:
        if device_id:
            address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance)
        else:
            address = find_address(ec2, module, public_ip, None)

        if state == 'present':
            if device_id:
                result = ensure_present(
                    ec2, module, domain, address, private_ip_address, device_id,
                    reuse_existing_ip_allowed, allow_reassociation,
                    module.check_mode, is_instance=is_instance
                )
            else:
                if address:
                    changed = False
                else:
                    address, changed = allocate_address(
                        ec2, module, domain, reuse_existing_ip_allowed,
                        module.check_mode, tag_dict, public_ipv4_pool
                    )
                result = {
                    'changed': changed,
                    'public_ip': address['PublicIp'],
                    'allocation_id': address['AllocationId']
                }
        else:
            if device_id:
                disassociated = ensure_absent(
                    ec2, module, address, device_id, module.check_mode, is_instance=is_instance
                )

                if release_on_disassociation and disassociated['changed']:
                    released = release_address(ec2, module, address, module.check_mode)
                    result = {
                        'changed': True,
                        'disassociated': disassociated,
                        'released': released
                    }
                else:
                    result = {
                        'changed': disassociated['changed'],
                        'disassociated': disassociated,
                        'released': {'changed': False}
                    }
            else:
                released = release_address(ec2, module, address, module.check_mode)
                result = {
                    'changed': released['changed'],
                    'disassociated': {'changed': False},
                    'released': released
                }

    except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
        module.fail_json_aws(str(e))

    if instance_id:
        result['warnings'] = warnings
    module.exit_json(**result)
Beispiel #24
0
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            command=dict(choices=['create', 'facts', 'delete', 'modify'],
                         required=True),
            identifier=dict(required=True),
            node_type=dict(choices=[
                'ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge',
                'dc1.large', 'dc2.large', 'dc1.8xlarge', 'dw1.xlarge',
                'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'
            ],
                           required=False),
            username=dict(required=False),
            password=dict(no_log=True, required=False),
            db_name=dict(required=False),
            cluster_type=dict(choices=['multi-node', 'single-node'],
                              default='single-node'),
            cluster_security_groups=dict(aliases=['security_groups'],
                                         type='list'),
            vpc_security_group_ids=dict(aliases=['vpc_security_groups'],
                                        type='list'),
            skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
                                             type='bool',
                                             default=False),
            final_cluster_snapshot_identifier=dict(
                aliases=['final_snapshot_id'], required=False),
            cluster_subnet_group_name=dict(aliases=['subnet']),
            availability_zone=dict(aliases=['aws_zone', 'zone']),
            preferred_maintenance_window=dict(
                aliases=['maintance_window', 'maint_window']),
            cluster_parameter_group_name=dict(aliases=['param_group_name']),
            automated_snapshot_retention_period=dict(
                aliases=['retention_period'], type='int'),
            port=dict(type='int'),
            cluster_version=dict(aliases=['version'], choices=['1.0']),
            allow_version_upgrade=dict(aliases=['version_upgrade'],
                                       type='bool',
                                       default=True),
            number_of_nodes=dict(type='int'),
            publicly_accessible=dict(type='bool', default=False),
            encrypted=dict(type='bool', default=False),
            elastic_ip=dict(required=False),
            new_cluster_identifier=dict(aliases=['new_identifier']),
            enhanced_vpc_routing=dict(type='bool', default=False),
            wait=dict(type='bool', default=False),
            wait_timeout=dict(type='int', default=300),
        ))

    required_if = [('command', 'delete', ['skip_final_cluster_snapshot']),
                   ('command', 'create', ['node_type', 'username',
                                          'password'])]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=required_if)

    command = module.params.get('command')
    skip_final_cluster_snapshot = module.params.get(
        'skip_final_cluster_snapshot')
    final_cluster_snapshot_identifier = module.params.get(
        'final_cluster_snapshot_identifier')
    # can't use module basic required_if check for this case
    if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
        module.fail_json(
            msg=
            "Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False"
        )

    conn = module.client('redshift')

    changed = True
    if command == 'create':
        (changed, cluster) = create_cluster(module, conn)

    elif command == 'facts':
        (changed, cluster) = describe_cluster(module, conn)

    elif command == 'delete':
        (changed, cluster) = delete_cluster(module, conn)

    elif command == 'modify':
        (changed, cluster) = modify_cluster(module, conn)

    module.exit_json(changed=changed, cluster=cluster)
Beispiel #25
0
def main():
    protocols = [
        'http',
        'https',
        'email',
        'email_json',
        'sms',
        'sqs',
        'application',
        'lambda',
    ]

    argument_spec = dict(
        msg=dict(required=True, aliases=['default']),
        subject=dict(),
        topic=dict(required=True),
        message_attributes=dict(type='dict'),
        message_structure=dict(choices=['json', 'string'], default='json'),
    )

    for p in protocols:
        argument_spec[p] = dict()

    module = AnsibleAWSModule(argument_spec=argument_spec)

    sns_kwargs = dict(
        Message=module.params['msg'],
        Subject=module.params['subject'],
        MessageStructure=module.params['message_structure'],
    )

    if module.params['message_attributes']:
        if module.params['message_structure'] != 'string':
            module.fail_json(
                msg=
                'message_attributes is only supported when the message_structure is "string".'
            )
        sns_kwargs['MessageAttributes'] = module.params['message_attributes']

    dict_msg = {'default': sns_kwargs['Message']}

    for p in protocols:
        if module.params[p]:
            if sns_kwargs['MessageStructure'] != 'json':
                module.fail_json(
                    msg=
                    'Protocol-specific messages are only supported when message_structure is "json".'
                )
            dict_msg[p.replace('_', '-')] = module.params[p]

    client = module.client('sns')

    topic = module.params['topic']
    if ':' in topic:
        # Short names can't contain ':' so we'll assume this is the full ARN
        sns_kwargs['TopicArn'] = topic
    else:
        sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)

    if not sns_kwargs['TopicArn']:
        module.fail_json(msg='Could not find topic: {0}'.format(topic))

    if sns_kwargs['MessageStructure'] == 'json':
        sns_kwargs['Message'] = json.dumps(dict_msg)

    try:
        result = client.publish(**sns_kwargs)
    except (BotoCoreError, ClientError) as e:
        module.fail_json_aws(e, msg='Failed to publish message')

    module.exit_json(msg='OK', message_id=result['MessageId'])
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
        operation=dict(required=True, choices=['run', 'start', 'stop']),
        cluster=dict(required=False, type='str'),  # R S P
        task_definition=dict(required=False, type='str'),  # R* S*
        overrides=dict(required=False, type='dict'),  # R S
        count=dict(required=False, type='int'),  # R
        task=dict(required=False, type='str'),  # P*
        container_instances=dict(required=False, type='list'),  # S*
        started_by=dict(required=False, type='str'),  # R S
        network_configuration=dict(required=False, type='dict'),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        tags=dict(required=False, type='dict')
    ))

    module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True,
                              required_if=[('launch_type', 'FARGATE', ['network_configuration'])])

    # Validate Inputs
    if module.params['operation'] == 'run':
        if 'task_definition' not in module.params and module.params['task_definition'] is None:
            module.fail_json(msg="To run a task, a task_definition must be specified")
        task_to_list = module.params['task_definition']
        status_type = "RUNNING"

    if module.params['operation'] == 'start':
        if 'task_definition' not in module.params and module.params['task_definition'] is None:
            module.fail_json(msg="To start a task, a task_definition must be specified")
        if 'container_instances' not in module.params and module.params['container_instances'] is None:
            module.fail_json(msg="To start a task, container instances must be specified")
        task_to_list = module.params['task']
        status_type = "RUNNING"

    if module.params['operation'] == 'stop':
        if 'task' not in module.params and module.params['task'] is None:
            module.fail_json(msg="To stop a task, a task must be specified")
        if 'task_definition' not in module.params and module.params['task_definition'] is None:
            module.fail_json(msg="To stop a task, a task definition must be specified")
        task_to_list = module.params['task_definition']
        status_type = "STOPPED"

    service_mgr = EcsExecManager(module)

    if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
        module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')

    if module.params['launch_type'] and not service_mgr.ecs_api_handles_launch_type():
        module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch type')

    if module.params['tags']:
        if not service_mgr.ecs_api_handles_tags():
            module.fail_json(msg=missing_required_lib("botocore >= 1.12.46", reason="to use tags"))
        if not service_mgr.ecs_task_long_format_enabled():
            module.fail_json(msg="Cannot set task tags: long format task arns are required to set tags")

    existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)

    results = dict(changed=False)
    if module.params['operation'] == 'run':
        if existing:
            # TBD - validate the rest of the details
            results['task'] = existing
        else:
            if not module.check_mode:
                results['task'] = service_mgr.run_task(
                    module.params['cluster'],
                    module.params['task_definition'],
                    module.params['overrides'],
                    module.params['count'],
                    module.params['started_by'],
                    module.params['launch_type'],
                    module.params['tags'],
                )
            results['changed'] = True

    elif module.params['operation'] == 'start':
        if existing:
            # TBD - validate the rest of the details
            results['task'] = existing
        else:
            if not module.check_mode:
                results['task'] = service_mgr.start_task(
                    module.params['cluster'],
                    module.params['task_definition'],
                    module.params['overrides'],
                    module.params['container_instances'],
                    module.params['started_by'],
                    module.params['tags'],
                )
            results['changed'] = True

    elif module.params['operation'] == 'stop':
        if existing:
            results['task'] = existing
        else:
            if not module.check_mode:
                # it exists, so we should delete it and mark changed.
                # return info about the cluster deleted
                results['task'] = service_mgr.stop_task(
                    module.params['cluster'],
                    module.params['task']
                )
            results['changed'] = True

    module.exit_json(**results)
def main():
    argument_spec = dict(bucket=dict(required=True),
                         dest=dict(default=None, type='path'),
                         encrypt=dict(default=True, type='bool'),
                         encryption_mode=dict(choices=['AES256', 'aws:kms'],
                                              default='AES256'),
                         expiry=dict(default=600,
                                     type='int',
                                     aliases=['expiration']),
                         headers=dict(type='dict'),
                         marker=dict(default=""),
                         max_keys=dict(default=1000, type='int'),
                         metadata=dict(type='dict'),
                         mode=dict(choices=[
                             'get', 'put', 'delete', 'create', 'geturl',
                             'getstr', 'delobj', 'list'
                         ],
                                   required=True),
                         object=dict(),
                         permission=dict(type='list', default=['private']),
                         version=dict(default=None),
                         overwrite=dict(aliases=['force'], default='always'),
                         prefix=dict(default=""),
                         retries=dict(aliases=['retry'], type='int',
                                      default=0),
                         s3_url=dict(aliases=['S3_URL']),
                         dualstack=dict(default='no', type='bool'),
                         rgw=dict(default='no', type='bool'),
                         src=dict(),
                         ignore_nonexistent_bucket=dict(default=False,
                                                        type='bool'),
                         encryption_kms_key_id=dict())
    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_if=[['mode', 'put', ['src', 'object']],
                     ['mode', 'get', ['dest', 'object']],
                     ['mode', 'getstr', ['object']],
                     ['mode', 'geturl', ['object']]],
    )

    bucket = module.params.get('bucket')
    encrypt = module.params.get('encrypt')
    expiry = module.params.get('expiry')
    dest = module.params.get('dest', '')
    headers = module.params.get('headers')
    marker = module.params.get('marker')
    max_keys = module.params.get('max_keys')
    metadata = module.params.get('metadata')
    mode = module.params.get('mode')
    obj = module.params.get('object')
    version = module.params.get('version')
    overwrite = module.params.get('overwrite')
    prefix = module.params.get('prefix')
    retries = module.params.get('retries')
    s3_url = module.params.get('s3_url')
    dualstack = module.params.get('dualstack')
    rgw = module.params.get('rgw')
    src = module.params.get('src')
    ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')

    object_canned_acl = [
        "private", "public-read", "public-read-write", "aws-exec-read",
        "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"
    ]
    bucket_canned_acl = [
        "private", "public-read", "public-read-write", "authenticated-read"
    ]

    if overwrite not in ['always', 'never', 'different']:
        if module.boolean(overwrite):
            overwrite = 'always'
        else:
            overwrite = 'never'

    if overwrite == 'different' and not HAS_MD5:
        module.fail_json(
            msg=
            'overwrite=different is unavailable: ETag calculation requires MD5 support'
        )

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module,
                                                                  boto3=True)

    if region in ('us-east-1', '', None):
        # default to US Standard region
        location = 'us-east-1'
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    if module.params.get('object'):
        obj = module.params['object']
        # If there is a top level object, do nothing - if the object starts with /
        # remove the leading character to maintain compatibility with Ansible versions < 2.4
        if obj.startswith('/'):
            obj = obj[1:]

    # Bucket deletion does not require obj.  Prevents ambiguity with delobj.
    if obj and mode == "delete":
        module.fail_json(msg='Parameter obj cannot be used with mode=delete')

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
        module.fail_json(msg='dualstack only applies to AWS S3')

    if dualstack and not module.botocore_at_least('1.4.45'):
        module.fail_json(msg='dualstack requires botocore >= 1.4.45')

    # rgw requires an explicit url
    if rgw and not s3_url:
        module.fail_json(msg='rgw flavour requires s3_url')

    # Look at s3_url and tweak connection settings
    # if connecting to RGW, Walrus or fakes3
    if s3_url:
        for key in ['validate_certs', 'security_token', 'profile_name']:
            aws_connect_kwargs.pop(key, None)
    s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)

    validate = not ignore_nonexistent_bucket

    # separate types of ACLs
    bucket_acl = [
        acl for acl in module.params.get('permission')
        if acl in bucket_canned_acl
    ]
    object_acl = [
        acl for acl in module.params.get('permission')
        if acl in object_canned_acl
    ]
    error_acl = [
        acl for acl in module.params.get('permission')
        if acl not in bucket_canned_acl and acl not in object_canned_acl
    ]
    if error_acl:
        module.fail_json(msg='Unknown permission specified: %s' % error_acl)

    # First, we check to see if the bucket exists, we get "bucket" returned.
    bucketrtn = bucket_check(module, s3, bucket, validate=validate)

    if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
        module.fail_json(msg="Source bucket cannot be found.")

    if mode == 'get':
        keyrtn = key_check(module,
                           s3,
                           bucket,
                           obj,
                           version=version,
                           validate=validate)
        if keyrtn is False:
            if version:
                module.fail_json(
                    msg="Key %s with version id %s does not exist." %
                    (obj, version))
            else:
                module.fail_json(msg="Key %s does not exist." % obj)

        if path_check(dest) and overwrite != 'always':
            if overwrite == 'never':
                module.exit_json(
                    msg=
                    "Local object already exists and overwrite is disabled.",
                    changed=False)
            if etag_compare(module, dest, s3, bucket, obj, version=version):
                module.exit_json(
                    msg=
                    "Local and remote object are identical, ignoring. Use overwrite=always parameter to force.",
                    changed=False)

        try:
            download_s3file(module,
                            s3,
                            bucket,
                            obj,
                            dest,
                            retries,
                            version=version)
        except Sigv4Required:
            s3 = get_s3_connection(module,
                                   aws_connect_kwargs,
                                   location,
                                   rgw,
                                   s3_url,
                                   sig_4=True)
            download_s3file(module,
                            s3,
                            bucket,
                            obj,
                            dest,
                            retries,
                            version=version)

    if mode == 'put':

        # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
        # these were separated into the variables bucket_acl and object_acl above

        if not path_check(src):
            module.fail_json(msg="Local object for PUT does not exist")

        if bucketrtn:
            keyrtn = key_check(module,
                               s3,
                               bucket,
                               obj,
                               version=version,
                               validate=validate)
        else:
            # If the bucket doesn't exist we should create it.
            # only use valid bucket acls for create_bucket function
            module.params['permission'] = bucket_acl
            create_bucket(module, s3, bucket, location)

        if keyrtn and overwrite != 'always':
            if overwrite == 'never' or etag_compare(module, src, s3, bucket,
                                                    obj):
                # Return the download URL for the existing object
                get_download_url(module,
                                 s3,
                                 bucket,
                                 obj,
                                 expiry,
                                 changed=False)

        # only use valid object acls for the upload_s3file function
        module.params['permission'] = object_acl
        upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt,
                      headers)

    # Delete an object from a bucket, not the entire bucket
    if mode == 'delobj':
        if obj is None:
            module.fail_json(msg="object parameter is required")
        if bucket:
            deletertn = delete_key(module, s3, bucket, obj)
            if deletertn is True:
                module.exit_json(msg="Object deleted from bucket %s." % bucket,
                                 changed=True)
        else:
            module.fail_json(msg="Bucket parameter is required.")

    # Delete an entire bucket, including all objects in the bucket
    if mode == 'delete':
        if bucket:
            deletertn = delete_bucket(module, s3, bucket)
            if deletertn is True:
                module.exit_json(
                    msg="Bucket %s and all keys have been deleted." % bucket,
                    changed=True)
        else:
            module.fail_json(msg="Bucket parameter is required.")

    # Support for listing a set of keys
    if mode == 'list':
        exists = bucket_check(module, s3, bucket)

        # If the bucket does not exist then bail out
        if not exists:
            module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)

        list_keys(module, s3, bucket, prefix, marker, max_keys)

    # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
    # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
    if mode == 'create':

        # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
        # these were separated above into the variables bucket_acl and object_acl

        if bucket and not obj:
            if bucketrtn:
                module.exit_json(msg="Bucket already exists.", changed=False)
            else:
                # only use valid bucket acls when creating the bucket
                module.params['permission'] = bucket_acl
                module.exit_json(msg="Bucket created successfully",
                                 changed=create_bucket(module, s3, bucket,
                                                       location))
        if bucket and obj:
            if obj.endswith('/'):
                dirobj = obj
            else:
                dirobj = obj + "/"
            if bucketrtn:
                if key_check(module, s3, bucket, dirobj):
                    module.exit_json(
                        msg="Bucket %s and key %s already exists." %
                        (bucket, obj),
                        changed=False)
                else:
                    # setting valid object acls for the create_dirkey function
                    module.params['permission'] = object_acl
                    create_dirkey(module, s3, bucket, dirobj, encrypt)
            else:
                # only use valid bucket acls for the create_bucket function
                module.params['permission'] = bucket_acl
                created = create_bucket(module, s3, bucket, location)
                # only use valid object acls for the create_dirkey function
                module.params['permission'] = object_acl
                create_dirkey(module, s3, bucket, dirobj, encrypt)

    # Support for grabbing the time-expired URL for an object in S3/Walrus.
    if mode == 'geturl':
        if not bucket and not obj:
            module.fail_json(msg="Bucket and Object parameters must be set")

        keyrtn = key_check(module,
                           s3,
                           bucket,
                           obj,
                           version=version,
                           validate=validate)
        if keyrtn:
            get_download_url(module, s3, bucket, obj, expiry)
        else:
            module.fail_json(msg="Key %s does not exist." % obj)

    if mode == 'getstr':
        if bucket and obj:
            keyrtn = key_check(module,
                               s3,
                               bucket,
                               obj,
                               version=version,
                               validate=validate)
            if keyrtn:
                try:
                    download_s3str(module, s3, bucket, obj, version=version)
                except Sigv4Required:
                    s3 = get_s3_connection(module,
                                           aws_connect_kwargs,
                                           location,
                                           rgw,
                                           s3_url,
                                           sig_4=True)
                    download_s3str(module, s3, bucket, obj, version=version)
            elif version is not None:
                module.fail_json(
                    msg="Key %s with version id %s does not exist." %
                    (obj, version))
            else:
                module.fail_json(msg="Key %s does not exist." % obj)

    module.exit_json(failed=False)
def main():

    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(access_logs_enabled=dict(type='bool'),
             access_logs_s3_bucket=dict(type='str'),
             access_logs_s3_prefix=dict(type='str'),
             deletion_protection=dict(type='bool'),
             http2=dict(type='bool'),
             idle_timeout=dict(type='int'),
             listeners=dict(type='list',
                            elements='dict',
                            options=dict(Protocol=dict(type='str',
                                                       required=True),
                                         Port=dict(type='int', required=True),
                                         SslPolicy=dict(type='str'),
                                         Certificates=dict(type='list'),
                                         DefaultActions=dict(type='list',
                                                             required=True),
                                         Rules=dict(type='list'))),
             name=dict(required=True, type='str'),
             purge_listeners=dict(default=True, type='bool'),
             purge_tags=dict(default=True, type='bool'),
             subnets=dict(type='list'),
             security_groups=dict(type='list'),
             scheme=dict(default='internet-facing',
                         choices=['internet-facing', 'internal']),
             state=dict(choices=['present', 'absent'], default='present'),
             tags=dict(type='dict'),
             wait_timeout=dict(type='int'),
             wait=dict(default=False, type='bool'),
             purge_rules=dict(default=True, type='bool')))

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=[('state', 'present', ['subnets', 'security_groups'])],
        required_together=[['access_logs_enabled', 'access_logs_s3_bucket']])

    # Quick check of listeners parameters
    listeners = module.params.get("listeners")
    if listeners is not None:
        for listener in listeners:
            for key in listener.keys():
                if key == 'Protocol' and listener[key] == 'HTTPS':
                    if listener.get('SslPolicy') is None:
                        module.fail_json(
                            msg=
                            "'SslPolicy' is a required listener dict key when Protocol = HTTPS"
                        )

                    if listener.get('Certificates') is None:
                        module.fail_json(
                            msg=
                            "'Certificates' is a required listener dict key when Protocol = HTTPS"
                        )

    connection = module.client('elbv2')
    connection_ec2 = module.client('ec2')

    state = module.params.get("state")

    elb = ApplicationLoadBalancer(connection, connection_ec2, module)

    if state == 'present':
        create_or_update_elb(elb)
    else:
        delete_elb(elb)
Beispiel #29
0
def main():
    argument_spec = dict(
        name=dict(required=True),
        state=dict(default='present', choices=['present', 'absent']),
        runtime=dict(),
        role=dict(),
        handler=dict(),
        zip_file=dict(aliases=['src']),
        s3_bucket=dict(),
        s3_key=dict(),
        s3_object_version=dict(),
        description=dict(default=''),
        timeout=dict(type='int', default=3),
        memory_size=dict(type='int', default=128),
        vpc_subnet_ids=dict(type='list'),
        vpc_security_group_ids=dict(type='list'),
        environment_variables=dict(type='dict'),
        dead_letter_arn=dict(),
        tracing_mode=dict(choices=['Active', 'PassThrough']),
        tags=dict(type='dict'),
    )

    mutually_exclusive = [['zip_file', 's3_key'], ['zip_file', 's3_bucket'],
                          ['zip_file', 's3_object_version']]

    required_together = [['s3_key', 's3_bucket'],
                         ['vpc_subnet_ids', 'vpc_security_group_ids']]

    required_if = [['state', 'present', ['runtime', 'handler', 'role']]]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              mutually_exclusive=mutually_exclusive,
                              required_together=required_together,
                              required_if=required_if)

    name = module.params.get('name')
    state = module.params.get('state').lower()
    runtime = module.params.get('runtime')
    role = module.params.get('role')
    handler = module.params.get('handler')
    s3_bucket = module.params.get('s3_bucket')
    s3_key = module.params.get('s3_key')
    s3_object_version = module.params.get('s3_object_version')
    zip_file = module.params.get('zip_file')
    description = module.params.get('description')
    timeout = module.params.get('timeout')
    memory_size = module.params.get('memory_size')
    vpc_subnet_ids = module.params.get('vpc_subnet_ids')
    vpc_security_group_ids = module.params.get('vpc_security_group_ids')
    environment_variables = module.params.get('environment_variables')
    dead_letter_arn = module.params.get('dead_letter_arn')
    tracing_mode = module.params.get('tracing_mode')
    tags = module.params.get('tags')

    check_mode = module.check_mode
    changed = False

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module,
                                                                  boto3=True)
    if not region:
        module.fail_json(msg='region must be specified')

    try:
        client = boto3_conn(module,
                            conn_type='client',
                            resource='lambda',
                            region=region,
                            endpoint=ec2_url,
                            **aws_connect_kwargs)
    except (ClientError, ValidationError) as e:
        module.fail_json_aws(e, msg="Trying to connect to AWS")

    if state == 'present':
        if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role):
            role_arn = role
        else:
            # get account ID and assemble ARN
            account_id, partition = get_account_info(module,
                                                     region=region,
                                                     endpoint=ec2_url,
                                                     **aws_connect_kwargs)
            role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(
                partition, account_id, role)

    # Get function configuration if present, False otherwise
    current_function = get_current_function(client, name)

    # Update existing Lambda function
    if state == 'present' and current_function:

        # Get current state
        current_config = current_function['Configuration']
        current_version = None

        # Update function configuration
        func_kwargs = {'FunctionName': name}

        # Update configuration if needed
        if role_arn and current_config['Role'] != role_arn:
            func_kwargs.update({'Role': role_arn})
        if handler and current_config['Handler'] != handler:
            func_kwargs.update({'Handler': handler})
        if description and current_config['Description'] != description:
            func_kwargs.update({'Description': description})
        if timeout and current_config['Timeout'] != timeout:
            func_kwargs.update({'Timeout': timeout})
        if memory_size and current_config['MemorySize'] != memory_size:
            func_kwargs.update({'MemorySize': memory_size})
        if runtime and current_config['Runtime'] != runtime:
            func_kwargs.update({'Runtime': runtime})
        if (environment_variables
                is not None) and (current_config.get('Environment', {}).get(
                    'Variables', {}) != environment_variables):
            func_kwargs.update(
                {'Environment': {
                    'Variables': environment_variables
                }})
        if dead_letter_arn is not None:
            if current_config.get('DeadLetterConfig'):
                if current_config['DeadLetterConfig'][
                        'TargetArn'] != dead_letter_arn:
                    func_kwargs.update(
                        {'DeadLetterConfig': {
                            'TargetArn': dead_letter_arn
                        }})
            else:
                if dead_letter_arn != "":
                    func_kwargs.update(
                        {'DeadLetterConfig': {
                            'TargetArn': dead_letter_arn
                        }})
        if tracing_mode and (current_config.get('TracingConfig', {}).get(
                'Mode', 'PassThrough') != tracing_mode):
            func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})

        # If VPC configuration is desired
        if vpc_subnet_ids or vpc_security_group_ids:
            if not vpc_subnet_ids or not vpc_security_group_ids:
                module.fail_json(
                    msg=
                    'vpc connectivity requires at least one security group and one subnet'
                )

            if 'VpcConfig' in current_config:
                # Compare VPC config with current config
                current_vpc_subnet_ids = current_config['VpcConfig'][
                    'SubnetIds']
                current_vpc_security_group_ids = current_config['VpcConfig'][
                    'SecurityGroupIds']

                subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(
                    current_vpc_subnet_ids)
                vpc_security_group_ids_changed = sorted(
                    vpc_security_group_ids) != sorted(
                        current_vpc_security_group_ids)

            if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
                new_vpc_config = {
                    'SubnetIds': vpc_subnet_ids,
                    'SecurityGroupIds': vpc_security_group_ids
                }
                func_kwargs.update({'VpcConfig': new_vpc_config})
        else:
            # No VPC configuration is desired, assure VPC config is empty when present in current config
            if 'VpcConfig' in current_config and current_config[
                    'VpcConfig'].get('VpcId'):
                func_kwargs.update(
                    {'VpcConfig': {
                        'SubnetIds': [],
                        'SecurityGroupIds': []
                    }})

        # Upload new configuration if configuration has changed
        if len(func_kwargs) > 1:
            try:
                if not check_mode:
                    response = client.update_function_configuration(
                        **func_kwargs)
                    current_version = response['Version']
                changed = True
            except (ParamValidationError, ClientError) as e:
                module.fail_json_aws(
                    e, msg="Trying to update lambda configuration")

        # Update code configuration
        code_kwargs = {'FunctionName': name, 'Publish': True}

        # Update S3 location
        if s3_bucket and s3_key:
            # If function is stored on S3 always update
            code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})

            # If S3 Object Version is given
            if s3_object_version:
                code_kwargs.update({'S3ObjectVersion': s3_object_version})

        # Compare local checksum, update remote code when different
        elif zip_file:
            local_checksum = sha256sum(zip_file)
            remote_checksum = current_config['CodeSha256']

            # Only upload new code when local code is different compared to the remote code
            if local_checksum != remote_checksum:
                try:
                    with open(zip_file, 'rb') as f:
                        encoded_zip = f.read()
                    code_kwargs.update({'ZipFile': encoded_zip})
                except IOError as e:
                    module.fail_json(msg=str(e),
                                     exception=traceback.format_exc())

        # Tag Function
        if tags is not None:
            if set_tag(client, module, tags, current_function):
                changed = True

        # Upload new code if needed (e.g. code checksum has changed)
        if len(code_kwargs) > 2:
            try:
                if not check_mode:
                    response = client.update_function_code(**code_kwargs)
                    current_version = response['Version']
                changed = True
            except (ParamValidationError, ClientError) as e:
                module.fail_json_aws(e, msg="Trying to upload new code")

        # Describe function code and configuration
        response = get_current_function(client,
                                        name,
                                        qualifier=current_version)
        if not response:
            module.fail_json(
                msg='Unable to get function information after updating')

        # We're done
        module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))

    # Function doesn't exists, create new Lambda function
    elif state == 'present':
        if s3_bucket and s3_key:
            # If function is stored on S3
            code = {'S3Bucket': s3_bucket, 'S3Key': s3_key}
            if s3_object_version:
                code.update({'S3ObjectVersion': s3_object_version})
        elif zip_file:
            # If function is stored in local zipfile
            try:
                with open(zip_file, 'rb') as f:
                    zip_content = f.read()

                code = {'ZipFile': zip_content}
            except IOError as e:
                module.fail_json(msg=str(e), exception=traceback.format_exc())

        else:
            module.fail_json(
                msg='Either S3 object or path to zipfile required')

        func_kwargs = {
            'FunctionName': name,
            'Publish': True,
            'Runtime': runtime,
            'Role': role_arn,
            'Code': code,
            'Timeout': timeout,
            'MemorySize': memory_size,
        }

        if description is not None:
            func_kwargs.update({'Description': description})

        if handler is not None:
            func_kwargs.update({'Handler': handler})

        if environment_variables:
            func_kwargs.update(
                {'Environment': {
                    'Variables': environment_variables
                }})

        if dead_letter_arn:
            func_kwargs.update(
                {'DeadLetterConfig': {
                    'TargetArn': dead_letter_arn
                }})

        if tracing_mode:
            func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})

        # If VPC configuration is given
        if vpc_subnet_ids or vpc_security_group_ids:
            if not vpc_subnet_ids or not vpc_security_group_ids:
                module.fail_json(
                    msg=
                    'vpc connectivity requires at least one security group and one subnet'
                )

            func_kwargs.update({
                'VpcConfig': {
                    'SubnetIds': vpc_subnet_ids,
                    'SecurityGroupIds': vpc_security_group_ids
                }
            })

        # Finally try to create function
        current_version = None
        try:
            if not check_mode:
                response = client.create_function(**func_kwargs)
                current_version = response['Version']
            changed = True
        except (ParamValidationError, ClientError) as e:
            module.fail_json_aws(e, msg="Trying to create function")

        # Tag Function
        if tags is not None:
            if set_tag(client, module, tags,
                       get_current_function(client, name)):
                changed = True

        response = get_current_function(client,
                                        name,
                                        qualifier=current_version)
        if not response:
            module.fail_json(
                msg='Unable to get function information after creating')
        module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))

    # Delete existing Lambda function
    if state == 'absent' and current_function:
        try:
            if not check_mode:
                client.delete_function(FunctionName=name)
            changed = True
        except (ParamValidationError, ClientError) as e:
            module.fail_json_aws(e, msg="Trying to delete Lambda function")

        module.exit_json(changed=changed)

    # Function already absent, do nothing
    elif state == 'absent':
        module.exit_json(changed=changed)
def main():
    """
    Main entry point.

    :return dict: ansible facts
    """
    argument_spec = dict(function_name=dict(required=False,
                                            default=None,
                                            aliases=['function', 'name']),
                         query=dict(required=False,
                                    choices=[
                                        'aliases', 'all', 'config', 'mappings',
                                        'policy', 'versions'
                                    ],
                                    default='all'),
                         event_source_arn=dict(required=False, default=None))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              mutually_exclusive=[],
                              required_together=[])

    # validate function_name if present
    function_name = module.params['function_name']
    if function_name:
        if not re.search(r"^[\w\-:]+$", function_name):
            module.fail_json(
                msg=
                'Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'
                .format(function_name))
        if len(function_name) > 64:
            module.fail_json(
                msg='Function name "{0}" exceeds 64 character limit'.format(
                    function_name))

    try:
        region, endpoint, aws_connect_kwargs = get_aws_connection_info(
            module, boto3=True)
        aws_connect_kwargs.update(
            dict(region=region,
                 endpoint=endpoint,
                 conn_type='client',
                 resource='lambda'))
        client = boto3_conn(module, **aws_connect_kwargs)
    except ClientError as e:
        module.fail_json_aws(e, "trying to set up boto connection")

    this_module = sys.modules[__name__]

    invocations = dict(
        aliases='alias_details',
        all='all_details',
        config='config_details',
        mappings='mapping_details',
        policy='policy_details',
        versions='version_details',
    )

    this_module_function = getattr(this_module,
                                   invocations[module.params['query']])
    all_facts = fix_return(this_module_function(client, module))

    results = dict(ansible_facts={'lambda_facts': {
        'function': all_facts
    }},
                   changed=False)

    if module.check_mode:
        results['msg'] = 'Check mode set but ignored for fact gathering only.'

    module.exit_json(**results)