Exemplo n.º 1
0
def main():
    argument_spec = dict(
        device_id=dict(required=False, aliases=['instance_id']),
        public_ip=dict(required=False, aliases=['ip']),
        state=dict(required=False, default='present',
                   choices=['present', 'absent']),
        in_vpc=dict(required=False, type='bool', default=False),
        reuse_existing_ip_allowed=dict(required=False, type='bool',
                                       default=False),
        release_on_disassociation=dict(required=False, type='bool', default=False),
        allow_reassociation=dict(type='bool', default=False),
        wait_timeout=dict(type='int', removed_in_version='2.14'),
        private_ip_address=dict(),
        tag_name=dict(),
        tag_value=dict(),
        public_ipv4_pool=dict()
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_by={
            'private_ip_address': ['device_id'],
        },
    )

    ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())

    device_id = module.params.get('device_id')
    instance_id = module.params.get('instance_id')
    public_ip = module.params.get('public_ip')
    private_ip_address = module.params.get('private_ip_address')
    state = module.params.get('state')
    in_vpc = module.params.get('in_vpc')
    domain = 'vpc' if in_vpc else None
    reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
    release_on_disassociation = module.params.get('release_on_disassociation')
    allow_reassociation = module.params.get('allow_reassociation')
    tag_name = module.params.get('tag_name')
    tag_value = module.params.get('tag_value')
    public_ipv4_pool = module.params.get('public_ipv4_pool')

    if instance_id:
        warnings = ["instance_id is no longer used, please use device_id going forward"]
        is_instance = True
        device_id = instance_id
    else:
        if device_id and device_id.startswith('i-'):
            is_instance = True
        elif device_id:
            if device_id.startswith('eni-') and not in_vpc:
                module.fail_json(msg="If you are specifying an ENI, in_vpc must be true")
            is_instance = False

    tag_dict = generate_tag_dict(module, tag_name, tag_value)

    try:
        if device_id:
            address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance)
        else:
            address = find_address(ec2, module, public_ip, None)

        if state == 'present':
            if device_id:
                result = ensure_present(
                    ec2, module, domain, address, private_ip_address, device_id,
                    reuse_existing_ip_allowed, allow_reassociation,
                    module.check_mode, is_instance=is_instance
                )
            else:
                if address:
                    changed = False
                else:
                    address, changed = allocate_address(
                        ec2, module, domain, reuse_existing_ip_allowed,
                        module.check_mode, tag_dict, public_ipv4_pool
                    )
                result = {
                    'changed': changed,
                    'public_ip': address['PublicIp'],
                    'allocation_id': address['AllocationId']
                }
        else:
            if device_id:
                disassociated = ensure_absent(
                    ec2, module, address, device_id, module.check_mode, is_instance=is_instance
                )

                if release_on_disassociation and disassociated['changed']:
                    released = release_address(ec2, module, address, module.check_mode)
                    result = {
                        'changed': True,
                        'disassociated': disassociated,
                        'released': released
                    }
                else:
                    result = {
                        'changed': disassociated['changed'],
                        'disassociated': disassociated,
                        'released': {'changed': False}
                    }
            else:
                released = release_address(ec2, module, address, module.check_mode)
                result = {
                    'changed': released['changed'],
                    'disassociated': {'changed': False},
                    'released': released
                }

    except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
        module.fail_json_aws(str(e))

    if instance_id:
        result['warnings'] = warnings
    module.exit_json(**result)
Exemplo n.º 2
0
def main():
    argument_spec = dict(app_name=dict(aliases=['name'],
                                       type='str',
                                       required=False),
                         description=dict(),
                         state=dict(choices=['present', 'absent'],
                                    default='present'),
                         terminate_by_force=dict(type='bool',
                                                 default=False,
                                                 required=False))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True)

    app_name = module.params['app_name']
    description = module.params['description']
    state = module.params['state']
    terminate_by_force = module.params['terminate_by_force']

    if app_name is None:
        module.fail_json(msg='Module parameter "app_name" is required')

    result = {}

    ebs = module.client('elasticbeanstalk')

    app = describe_app(ebs, app_name, module)

    if module.check_mode:
        check_app(ebs, app, module)
        module.fail_json(
            msg='ASSERTION FAILURE: check_app() should not return control.')

    if state == 'present':
        if app is None:
            try:
                create_app = ebs.create_application(**filter_empty(
                    ApplicationName=app_name, Description=description))
            except (BotoCoreError, ClientError) as e:
                module.fail_json_aws(e, msg="Could not create application")

            app = describe_app(ebs, app_name, module)

            result = dict(changed=True, app=app)
        else:
            if app.get("Description", None) != description:
                try:
                    if not description:
                        ebs.update_application(ApplicationName=app_name)
                    else:
                        ebs.update_application(ApplicationName=app_name,
                                               Description=description)
                except (BotoCoreError, ClientError) as e:
                    module.fail_json_aws(e, msg="Could not update application")

                app = describe_app(ebs, app_name, module)

                result = dict(changed=True, app=app)
            else:
                result = dict(changed=False, app=app)

    else:
        if app is None:
            result = dict(changed=False,
                          output='Application not found',
                          app={})
        else:
            try:
                if terminate_by_force:
                    # Running environments will be terminated before deleting the application
                    ebs.delete_application(
                        ApplicationName=app_name,
                        TerminateEnvByForce=terminate_by_force)
                else:
                    ebs.delete_application(ApplicationName=app_name)
                changed = True
            except BotoCoreError as e:
                module.fail_json_aws(e, msg="Cannot terminate app")
            except ClientError as e:
                if 'It is currently pending deletion.' not in e.response[
                        'Error']['Message']:
                    module.fail_json_aws(e, msg="Cannot terminate app")
                else:
                    changed = False

            result = dict(changed=changed, app=app)

    module.exit_json(**result)
Exemplo n.º 3
0
def main():
    argument_spec = dict(
        command=dict(choices=['create', 'facts', 'delete', 'modify'],
                     required=True),
        identifier=dict(required=True),
        node_type=dict(choices=[
            'ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge',
            'dc1.large', 'dc2.large', 'dc1.8xlarge', 'dw1.xlarge',
            'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'
        ],
                       required=False),
        username=dict(required=False),
        password=dict(no_log=True, required=False),
        db_name=dict(required=False),
        cluster_type=dict(choices=['multi-node', 'single-node'],
                          default='single-node'),
        cluster_security_groups=dict(aliases=['security_groups'], type='list'),
        vpc_security_group_ids=dict(aliases=['vpc_security_groups'],
                                    type='list'),
        skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
                                         type='bool',
                                         default=False),
        final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'],
                                               required=False),
        cluster_subnet_group_name=dict(aliases=['subnet']),
        availability_zone=dict(aliases=['aws_zone', 'zone']),
        preferred_maintenance_window=dict(
            aliases=['maintance_window', 'maint_window']),
        cluster_parameter_group_name=dict(aliases=['param_group_name']),
        automated_snapshot_retention_period=dict(aliases=['retention_period'],
                                                 type='int'),
        port=dict(type='int'),
        cluster_version=dict(aliases=['version'], choices=['1.0']),
        allow_version_upgrade=dict(aliases=['version_upgrade'],
                                   type='bool',
                                   default=True),
        number_of_nodes=dict(type='int'),
        publicly_accessible=dict(type='bool', default=False),
        encrypted=dict(type='bool', default=False),
        elastic_ip=dict(required=False),
        new_cluster_identifier=dict(aliases=['new_identifier']),
        enhanced_vpc_routing=dict(type='bool', default=False),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=300),
    )

    required_if = [('command', 'delete', ['skip_final_cluster_snapshot']),
                   ('command', 'create', ['node_type', 'username',
                                          'password'])]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=required_if)

    command = module.params.get('command')
    skip_final_cluster_snapshot = module.params.get(
        'skip_final_cluster_snapshot')
    final_cluster_snapshot_identifier = module.params.get(
        'final_cluster_snapshot_identifier')
    # can't use module basic required_if check for this case
    if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
        module.fail_json(
            msg=
            "Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False"
        )

    conn = module.client('redshift')

    changed = True
    if command == 'create':
        (changed, cluster) = create_cluster(module, conn)

    elif command == 'facts':
        (changed, cluster) = describe_cluster(module, conn)

    elif command == 'delete':
        (changed, cluster) = delete_cluster(module, conn)

    elif command == 'modify':
        (changed, cluster) = modify_cluster(module, conn)

    module.exit_json(changed=changed, cluster=cluster)
Exemplo n.º 4
0
def main():
    argument_spec = dict(alias=dict(aliases=['key_alias']),
                         policy_mode=dict(aliases=['mode'],
                                          choices=['grant', 'deny'],
                                          default='grant'),
                         policy_role_name=dict(aliases=['role_name']),
                         policy_role_arn=dict(aliases=['role_arn']),
                         policy_grant_types=dict(aliases=['grant_types'],
                                                 type='list'),
                         policy_clean_invalid_entries=dict(
                             aliases=['clean_invalid_entries'],
                             type='bool',
                             default=True),
                         key_id=dict(aliases=['key_arn']),
                         description=dict(),
                         enabled=dict(type='bool', default=True),
                         tags=dict(type='dict', default={}),
                         purge_tags=dict(type='bool', default=False),
                         grants=dict(type='list', default=[]),
                         policy=dict(type='json'),
                         purge_grants=dict(type='bool', default=False),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         enable_key_rotation=(dict(type='bool')))

    module = AnsibleAWSModule(
        supports_check_mode=True,
        argument_spec=argument_spec,
        required_one_of=[['alias', 'key_id']],
    )

    mode = module.params['policy_mode']

    kms = module.client('kms')

    key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'),
                                      module.params.get('alias'))
    # We can't create keys with a specific ID, if we can't access the key we'll have to fail
    if module.params.get('state') == 'present' and module.params.get(
            'key_id') and not key_metadata:
        module.fail_json(msg="Could not find key with id %s to update")

    if module.params.get('policy_grant_types') or mode == 'deny':
        module.deprecate(
            'Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile'
            ' and has been deprecated in favour of the policy option.',
            version='2.13')
        result = update_policy_grants(kms, module, key_metadata, mode)
        module.exit_json(**result)

    if module.params.get('state') == 'absent':
        if key_metadata is None:
            module.exit_json(changed=False)
        result = delete_key(kms, module, key_metadata)
        module.exit_json(**result)

    if key_metadata:
        key_details = get_key_details(kms, module, key_metadata['Arn'])
        result = update_key(kms, module, key_details)
        module.exit_json(**result)

    result = create_key(kms, module)
    module.exit_json(**result)
def main():

    argument_spec = dict(access_logs_enabled=dict(type='bool'),
                         access_logs_s3_bucket=dict(type='str'),
                         access_logs_s3_prefix=dict(type='str'),
                         deletion_protection=dict(type='bool'),
                         http2=dict(type='bool'),
                         idle_timeout=dict(type='int'),
                         listeners=dict(type='list',
                                        elements='dict',
                                        options=dict(
                                            Protocol=dict(type='str',
                                                          required=True),
                                            Port=dict(type='int',
                                                      required=True),
                                            SslPolicy=dict(type='str'),
                                            Certificates=dict(type='list'),
                                            DefaultActions=dict(type='list',
                                                                required=True),
                                            Rules=dict(type='list'))),
                         name=dict(required=True, type='str'),
                         purge_listeners=dict(default=True, type='bool'),
                         purge_tags=dict(default=True, type='bool'),
                         subnets=dict(type='list'),
                         security_groups=dict(type='list'),
                         scheme=dict(default='internet-facing',
                                     choices=['internet-facing', 'internal']),
                         state=dict(choices=['present', 'absent'],
                                    default='present'),
                         tags=dict(type='dict'),
                         wait_timeout=dict(type='int'),
                         wait=dict(default=False, type='bool'),
                         purge_rules=dict(default=True, type='bool'))

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=[('state', 'present', ['subnets', 'security_groups'])],
        required_together=[['access_logs_enabled', 'access_logs_s3_bucket']])

    # Quick check of listeners parameters
    listeners = module.params.get("listeners")
    if listeners is not None:
        for listener in listeners:
            for key in listener.keys():
                if key == 'Protocol' and listener[key] == 'HTTPS':
                    if listener.get('SslPolicy') is None:
                        module.fail_json(
                            msg=
                            "'SslPolicy' is a required listener dict key when Protocol = HTTPS"
                        )

                    if listener.get('Certificates') is None:
                        module.fail_json(
                            msg=
                            "'Certificates' is a required listener dict key when Protocol = HTTPS"
                        )

    connection = module.client('elbv2')
    connection_ec2 = module.client('ec2')

    state = module.params.get("state")

    elb = ApplicationLoadBalancer(connection, connection_ec2, module)

    if state == 'present':
        create_or_update_elb(elb)
    else:
        delete_elb(elb)
Exemplo n.º 6
0
def main():
    argument_spec = dict(
        stack_name=dict(required=True),
        template_parameters=dict(required=False, type='dict', default={}),
        state=dict(default='present', choices=['present', 'absent']),
        template=dict(default=None, required=False, type='path'),
        notification_arns=dict(default=None, required=False),
        stack_policy=dict(default=None, required=False),
        disable_rollback=dict(default=False, type='bool'),
        on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
        create_timeout=dict(default=None, type='int'),
        template_url=dict(default=None, required=False),
        template_body=dict(default=None, required=False),
        template_format=dict(removed_in_version='2.14'),
        create_changeset=dict(default=False, type='bool'),
        changeset_name=dict(default=None, required=False),
        role_arn=dict(default=None, required=False),
        tags=dict(default=None, type='dict'),
        termination_protection=dict(default=None, type='bool'),
        events_limit=dict(default=200, type='int'),
        backoff_retries=dict(type='int', default=10, required=False),
        backoff_delay=dict(type='int', default=3, required=False),
        backoff_max_delay=dict(type='int', default=30, required=False),
        capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_url', 'template', 'template_body'],
                            ['disable_rollback', 'on_create_failure']],
        supports_check_mode=True
    )

    invalid_capabilities = []
    user_capabilities = module.params.get('capabilities')
    for user_cap in user_capabilities:
        if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
            invalid_capabilities.append(user_cap)

    if invalid_capabilities:
        module.fail_json(msg="Specified capabilities are invalid : %r,"
                             " please check documentation for valid capabilities" % invalid_capabilities)

    # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
    stack_params = {
        'Capabilities': user_capabilities,
        'ClientRequestToken': to_native(uuid.uuid4()),
    }
    state = module.params['state']
    stack_params['StackName'] = module.params['stack_name']

    if module.params['template'] is not None:
        with open(module.params['template'], 'r') as template_fh:
            stack_params['TemplateBody'] = template_fh.read()
    elif module.params['template_body'] is not None:
        stack_params['TemplateBody'] = module.params['template_body']
    elif module.params['template_url'] is not None:
        stack_params['TemplateURL'] = module.params['template_url']

    if module.params.get('notification_arns'):
        stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
    else:
        stack_params['NotificationARNs'] = []

    # can't check the policy when verifying.
    if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
        with open(module.params['stack_policy'], 'r') as stack_policy_fh:
            stack_params['StackPolicyBody'] = stack_policy_fh.read()

    template_parameters = module.params['template_parameters']

    stack_params['Parameters'] = []
    for k, v in template_parameters.items():
        if isinstance(v, dict):
            # set parameter based on a dict to allow additional CFN Parameter Attributes
            param = dict(ParameterKey=k)

            if 'value' in v:
                param['ParameterValue'] = str(v['value'])

            if 'use_previous_value' in v and bool(v['use_previous_value']):
                param['UsePreviousValue'] = True
                param.pop('ParameterValue', None)

            stack_params['Parameters'].append(param)
        else:
            # allow default k/v configuration to set a template parameter
            stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})

    if isinstance(module.params.get('tags'), dict):
        stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])

    if module.params.get('role_arn'):
        stack_params['RoleARN'] = module.params['role_arn']

    result = {}

    cfn = module.client('cloudformation')

    # Wrap the cloudformation client methods that this module uses with
    # automatic backoff / retry for throttling error codes
    backoff_wrapper = AWSRetry.jittered_backoff(
        retries=module.params.get('backoff_retries'),
        delay=module.params.get('backoff_delay'),
        max_delay=module.params.get('backoff_max_delay')
    )
    cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
    cfn.create_stack = backoff_wrapper(cfn.create_stack)
    cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
    cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
    cfn.update_stack = backoff_wrapper(cfn.update_stack)
    cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
    cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
    cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
    if boto_supports_termination_protection(cfn):
        cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)

    stack_info = get_stack_facts(cfn, stack_params['StackName'])

    if module.check_mode:
        if state == 'absent' and stack_info:
            module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
        elif state == 'absent' and not stack_info:
            module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
        elif state == 'present' and not stack_info:
            module.exit_json(changed=True, msg='New stack would be created', meta=[])
        else:
            module.exit_json(**check_mode_changeset(module, stack_params, cfn))

    if state == 'present':
        if not stack_info:
            result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
        elif module.params.get('create_changeset'):
            result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
        else:
            if module.params.get('termination_protection') is not None:
                update_termination_protection(module, cfn, stack_params['StackName'],
                                              bool(module.params.get('termination_protection')))
            result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))

        # format the stack output

        stack = get_stack_facts(cfn, stack_params['StackName'])
        if stack is not None:
            if result.get('stack_outputs') is None:
                # always define stack_outputs, but it may be empty
                result['stack_outputs'] = {}
            for output in stack.get('Outputs', []):
                result['stack_outputs'][output['OutputKey']] = output['OutputValue']
            stack_resources = []
            reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
            for res in reslist.get('StackResourceSummaries', []):
                stack_resources.append({
                    "logical_resource_id": res['LogicalResourceId'],
                    "physical_resource_id": res.get('PhysicalResourceId', ''),
                    "resource_type": res['ResourceType'],
                    "last_updated_time": res['LastUpdatedTimestamp'],
                    "status": res['ResourceStatus'],
                    "status_reason": res.get('ResourceStatusReason')  # can be blank, apparently
                })
            result['stack_resources'] = stack_resources

    elif state == 'absent':
        # absent state is different because of the way delete_stack works.
        # problem is it it doesn't give an error if stack isn't found
        # so must describe the stack first

        try:
            stack = get_stack_facts(cfn, stack_params['StackName'])
            if not stack:
                result = {'changed': False, 'output': 'Stack not found.'}
            else:
                if stack_params.get('RoleARN') is None:
                    cfn.delete_stack(StackName=stack_params['StackName'])
                else:
                    cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
                result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
                                         stack_params.get('ClientRequestToken', None))
        except Exception as err:
            module.fail_json_aws(err)

    module.exit_json(**result)
Exemplo n.º 7
0
def main():
    argument_spec = dict(certificate=dict(),
                         certificate_arn=dict(aliases=['arn']),
                         certificate_chain=dict(),
                         domain_name=dict(aliases=['domain']),
                         name_tag=dict(aliases=['name']),
                         private_key=dict(no_log=True),
                         state=dict(default='present',
                                    choices=['present', 'absent']))
    required_if = [
        ['state', 'present', ['certificate', 'name_tag', 'private_key']],
    ]
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=required_if)
    acm = ACMServiceManager(module)

    # Check argument requirements
    if module.params['state'] == 'present':
        if module.params['certificate_arn']:
            module.fail_json(
                msg=
                "Parameter 'certificate_arn' is only valid if parameter 'state' is specified as 'absent'"
            )
    else:  # absent
        # exactly one of these should be specified
        absent_args = ['certificate_arn', 'domain_name', 'name_tag']
        if sum([(module.params[a] is not None) for a in absent_args]) != 1:
            for a in absent_args:
                module.debug("%s is %s" % (a, module.params[a]))
            module.fail_json(
                msg=
                "If 'state' is specified as 'absent' then exactly one of 'name_tag', certificate_arn' or 'domain_name' must be specified"
            )

    if module.params['name_tag']:
        tags = dict(Name=module.params['name_tag'])
    else:
        tags = None

    client = module.client('acm')

    # fetch the list of certificates currently in ACM
    certificates = acm.get_certificates(
        client=client,
        module=module,
        domain_name=module.params['domain_name'],
        arn=module.params['certificate_arn'],
        only_tags=tags)

    module.debug("Found %d corresponding certificates in ACM" %
                 len(certificates))

    if module.params['state'] == 'present':
        if len(certificates) > 1:
            msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params[
                'name_tag']
            module.fail_json(msg=msg, certificates=certificates)
        elif len(certificates) == 1:
            # update the existing certificate
            module.debug("Existing certificate found in ACM")
            old_cert = certificates[0]  # existing cert in ACM
            if ('tags'
                    not in old_cert) or ('Name' not in old_cert['tags']) or (
                        old_cert['tags']['Name'] != module.params['name_tag']):
                # shouldn't happen
                module.fail_json(
                    msg="Internal error, unsure which certificate to update",
                    certificate=old_cert)

            if 'certificate' not in old_cert:
                # shouldn't happen
                module.fail_json(
                    msg=
                    "Internal error, unsure what the existing cert in ACM is",
                    certificate=old_cert)

            # Are the existing certificate in ACM and the local certificate the same?
            same = True
            same &= chain_compare(module, old_cert['certificate'],
                                  module.params['certificate'])
            if module.params['certificate_chain']:
                # Need to test this
                # not sure if Amazon appends the cert itself to the chain when self-signed
                same &= chain_compare(module, old_cert['certificate_chain'],
                                      module.params['certificate_chain'])
            else:
                # When there is no chain with a cert
                # it seems Amazon returns the cert itself as the chain
                same &= chain_compare(module, old_cert['certificate_chain'],
                                      module.params['certificate'])

            if same:
                module.debug(
                    "Existing certificate in ACM is the same, doing nothing")
                domain = acm.get_domain_of_cert(
                    client=client,
                    module=module,
                    arn=old_cert['certificate_arn'])
                module.exit_json(certificate=dict(
                    domain_name=domain, arn=old_cert['certificate_arn']),
                                 changed=False)
            else:
                module.debug(
                    "Existing certificate in ACM is different, overwriting")

                # update cert in ACM
                arn = acm.import_certificate(
                    client,
                    module,
                    certificate=module.params['certificate'],
                    private_key=module.params['private_key'],
                    certificate_chain=module.params['certificate_chain'],
                    arn=old_cert['certificate_arn'],
                    tags=tags)
                domain = acm.get_domain_of_cert(client=client,
                                                module=module,
                                                arn=arn)
                module.exit_json(certificate=dict(domain_name=domain, arn=arn),
                                 changed=True)
        else:  # len(certificates) == 0
            module.debug("No certificate in ACM. Creating new one.")
            arn = acm.import_certificate(
                client=client,
                module=module,
                certificate=module.params['certificate'],
                private_key=module.params['private_key'],
                certificate_chain=module.params['certificate_chain'],
                tags=tags)
            domain = acm.get_domain_of_cert(client=client,
                                            module=module,
                                            arn=arn)

            module.exit_json(certificate=dict(domain_name=domain, arn=arn),
                             changed=True)

    else:  # state == absent
        for cert in certificates:
            acm.delete_certificate(client, module, cert['certificate_arn'])
        module.exit_json(
            arns=[cert['certificate_arn'] for cert in certificates],
            changed=(len(certificates) > 0))
Exemplo n.º 8
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent', 'deleting']),
        name=dict(required=True, type='str'),
        cluster=dict(required=False, type='str'),
        task_definition=dict(required=False, type='str'),
        load_balancers=dict(required=False, default=[], type='list'),
        desired_count=dict(required=False, type='int'),
        client_token=dict(required=False, default='', type='str'),
        role=dict(required=False, default='', type='str'),
        delay=dict(required=False, type='int', default=10),
        repeat=dict(required=False, type='int', default=10),
        force_new_deployment=dict(required=False, default=False, type='bool'),
        deployment_configuration=dict(required=False, default={}, type='dict'),
        placement_constraints=dict(required=False,
                                   default=[],
                                   type='list',
                                   options=dict(type=dict(type='str'),
                                                expression=dict(type='str'))),
        placement_strategy=dict(required=False,
                                default=[],
                                type='list',
                                options=dict(
                                    type=dict(type='str'),
                                    field=dict(type='str'),
                                )),
        health_check_grace_period_seconds=dict(required=False, type='int'),
        network_configuration=dict(required=False,
                                   type='dict',
                                   options=dict(
                                       subnets=dict(type='list'),
                                       security_groups=dict(type='list'),
                                       assign_public_ip=dict(type='bool'))),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        service_registries=dict(required=False, type='list', default=[]),
        scheduling_strategy=dict(required=False, choices=['DAEMON',
                                                          'REPLICA']))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('state', 'present',
                                            ['task_definition']),
                                           ('launch_type', 'FARGATE',
                                            ['network_configuration'])],
                              required_together=[['load_balancers', 'role']])

    if module.params['state'] == 'present' and module.params[
            'scheduling_strategy'] == 'REPLICA':
        if module.params['desired_count'] is None:
            module.fail_json(
                msg=
                'state is present, scheduling_strategy is REPLICA; missing desired_count'
            )

    service_mgr = EcsServiceManager(module)
    if module.params['network_configuration']:
        if not service_mgr.ecs_api_handles_network_configuration():
            module.fail_json(
                msg=
                'botocore needs to be version 1.7.44 or higher to use network configuration'
            )
        network_configuration = service_mgr.format_network_configuration(
            module.params['network_configuration'])
    else:
        network_configuration = None

    deployment_configuration = map_complex_type(
        module.params['deployment_configuration'],
        DEPLOYMENT_CONFIGURATION_TYPE_MAP)

    deploymentConfiguration = snake_dict_to_camel_dict(
        deployment_configuration)
    serviceRegistries = list(
        map(snake_dict_to_camel_dict, module.params['service_registries']))

    try:
        existing = service_mgr.describe_service(module.params['cluster'],
                                                module.params['name'])
    except Exception as e:
        module.fail_json(msg="Exception describing service '" +
                         module.params['name'] + "' in cluster '" +
                         module.params['cluster'] + "': " + str(e))

    results = dict(changed=False)

    if module.params['launch_type']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.4 or higher to use launch_type'
            )
    if module.params['force_new_deployment']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.4 or higher to use force_new_deployment'
            )
    if module.params['health_check_grace_period_seconds']:
        if not module.botocore_at_least('1.8.20'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds'
            )

    if module.params['state'] == 'present':

        matching = False
        update = False

        if existing and 'status' in existing and existing['status'] == "ACTIVE":
            if module.params['force_new_deployment']:
                update = True
            elif service_mgr.is_matching_service(module.params, existing):
                matching = True
                results['service'] = existing
            else:
                update = True

        if not matching:
            if not module.check_mode:

                role = module.params['role']
                clientToken = module.params['client_token']

                loadBalancers = []
                for loadBalancer in module.params['load_balancers']:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])
                    loadBalancers.append(loadBalancer)

                for loadBalancer in loadBalancers:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])

                if update:
                    # check various parameters and boto versions and give a helpful error in boto is not new enough for feature

                    if module.params['scheduling_strategy']:
                        if not module.botocore_at_least('1.10.37'):
                            module.fail_json(
                                msg=
                                'botocore needs to be version 1.10.37 or higher to use scheduling_strategy'
                            )
                        elif (existing['schedulingStrategy']
                              ) != module.params['scheduling_strategy']:
                            module.fail_json(
                                msg=
                                "It is not possible to update the scheduling strategy of an existing service"
                            )

                    if module.params['service_registries']:
                        if not module.botocore_at_least('1.9.15'):
                            module.fail_json(
                                msg=
                                'botocore needs to be version 1.9.15 or higher to use service_registries'
                            )
                        elif (existing['serviceRegistries']
                              or []) != serviceRegistries:
                            module.fail_json(
                                msg=
                                "It is not possible to update the service registries of an existing service"
                            )

                    if (existing['loadBalancers'] or []) != loadBalancers:
                        module.fail_json(
                            msg=
                            "It is not possible to update the load balancers of an existing service"
                        )

                    # update required
                    response = service_mgr.update_service(
                        module.params['name'], module.params['cluster'],
                        module.params['task_definition'],
                        module.params['desired_count'],
                        deploymentConfiguration, network_configuration,
                        module.params['health_check_grace_period_seconds'],
                        module.params['force_new_deployment'])

                else:
                    try:
                        response = service_mgr.create_service(
                            module.params['name'], module.params['cluster'],
                            module.params['task_definition'], loadBalancers,
                            module.params['desired_count'], clientToken, role,
                            deploymentConfiguration,
                            module.params['placement_constraints'],
                            module.params['placement_strategy'],
                            module.params['health_check_grace_period_seconds'],
                            network_configuration, serviceRegistries,
                            module.params['launch_type'],
                            module.params['scheduling_strategy'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't create service")

                results['service'] = response

            results['changed'] = True

    elif module.params['state'] == 'absent':
        if not existing:
            pass
        else:
            # it exists, so we should delete it and mark changed.
            # return info about the cluster deleted
            del existing['deployments']
            del existing['events']
            results['ansible_facts'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    try:
                        service_mgr.delete_service(module.params['name'],
                                                   module.params['cluster'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't delete service")
                results['changed'] = True

    elif module.params['state'] == 'deleting':
        if not existing:
            module.fail_json(msg="Service '" + module.params['name'] +
                             " not found.")
            return
        # it exists, so we should delete it and mark changed.
        # return info about the cluster deleted
        delay = module.params['delay']
        repeat = module.params['repeat']
        time.sleep(delay)
        for i in range(repeat):
            existing = service_mgr.describe_service(module.params['cluster'],
                                                    module.params['name'])
            status = existing['status']
            if status == "INACTIVE":
                results['changed'] = True
                break
            time.sleep(delay)
        if i is repeat - 1:
            module.fail_json(msg="Service still not deleted after " +
                             str(repeat) + " tries of " + str(delay) +
                             " seconds each.")
            return

    module.exit_json(**results)
Exemplo n.º 9
0
def main():
    argument_spec = dict(bucket=dict(required=True),
                         dest=dict(default=None, type='path'),
                         encrypt=dict(default=True, type='bool'),
                         encryption_mode=dict(choices=['AES256', 'aws:kms'],
                                              default='AES256'),
                         expiry=dict(default=600,
                                     type='int',
                                     aliases=['expiration']),
                         headers=dict(type='dict'),
                         marker=dict(default=""),
                         max_keys=dict(default=1000, type='int'),
                         metadata=dict(type='dict'),
                         mode=dict(choices=[
                             'get', 'put', 'delete', 'create', 'geturl',
                             'getstr', 'delobj', 'list'
                         ],
                                   required=True),
                         object=dict(),
                         permission=dict(type='list', default=['private']),
                         version=dict(default=None),
                         overwrite=dict(aliases=['force'], default='always'),
                         prefix=dict(default=""),
                         retries=dict(aliases=['retry'], type='int',
                                      default=0),
                         s3_url=dict(aliases=['S3_URL']),
                         dualstack=dict(default='no', type='bool'),
                         rgw=dict(default='no', type='bool'),
                         src=dict(),
                         ignore_nonexistent_bucket=dict(default=False,
                                                        type='bool'),
                         encryption_kms_key_id=dict())
    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_if=[['mode', 'put', ['src', 'object']],
                     ['mode', 'get', ['dest', 'object']],
                     ['mode', 'getstr', ['object']],
                     ['mode', 'geturl', ['object']]],
    )

    bucket = module.params.get('bucket')
    encrypt = module.params.get('encrypt')
    expiry = module.params.get('expiry')
    dest = module.params.get('dest', '')
    headers = module.params.get('headers')
    marker = module.params.get('marker')
    max_keys = module.params.get('max_keys')
    metadata = module.params.get('metadata')
    mode = module.params.get('mode')
    obj = module.params.get('object')
    version = module.params.get('version')
    overwrite = module.params.get('overwrite')
    prefix = module.params.get('prefix')
    retries = module.params.get('retries')
    s3_url = module.params.get('s3_url')
    dualstack = module.params.get('dualstack')
    rgw = module.params.get('rgw')
    src = module.params.get('src')
    ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')

    object_canned_acl = [
        "private", "public-read", "public-read-write", "aws-exec-read",
        "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"
    ]
    bucket_canned_acl = [
        "private", "public-read", "public-read-write", "authenticated-read"
    ]

    if overwrite not in ['always', 'never', 'different']:
        if module.boolean(overwrite):
            overwrite = 'always'
        else:
            overwrite = 'never'

    if overwrite == 'different' and not HAS_MD5:
        module.fail_json(
            msg=
            'overwrite=different is unavailable: ETag calculation requires MD5 support'
        )

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module,
                                                                  boto3=True)

    if region in ('us-east-1', '', None):
        # default to US Standard region
        location = 'us-east-1'
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    if module.params.get('object'):
        obj = module.params['object']
        # If there is a top level object, do nothing - if the object starts with /
        # remove the leading character to maintain compatibility with Ansible versions < 2.4
        if obj.startswith('/'):
            obj = obj[1:]

    # Bucket deletion does not require obj.  Prevents ambiguity with delobj.
    if obj and mode == "delete":
        module.fail_json(msg='Parameter obj cannot be used with mode=delete')

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
        module.fail_json(msg='dualstack only applies to AWS S3')

    if dualstack and not module.botocore_at_least('1.4.45'):
        module.fail_json(msg='dualstack requires botocore >= 1.4.45')

    # rgw requires an explicit url
    if rgw and not s3_url:
        module.fail_json(msg='rgw flavour requires s3_url')

    # Look at s3_url and tweak connection settings
    # if connecting to RGW, Walrus or fakes3
    if s3_url:
        for key in ['validate_certs', 'security_token', 'profile_name']:
            aws_connect_kwargs.pop(key, None)
    s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)

    validate = not ignore_nonexistent_bucket

    # separate types of ACLs
    bucket_acl = [
        acl for acl in module.params.get('permission')
        if acl in bucket_canned_acl
    ]
    object_acl = [
        acl for acl in module.params.get('permission')
        if acl in object_canned_acl
    ]
    error_acl = [
        acl for acl in module.params.get('permission')
        if acl not in bucket_canned_acl and acl not in object_canned_acl
    ]
    if error_acl:
        module.fail_json(msg='Unknown permission specified: %s' % error_acl)

    # First, we check to see if the bucket exists, we get "bucket" returned.
    bucketrtn = bucket_check(module, s3, bucket, validate=validate)

    if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
        module.fail_json(msg="Source bucket cannot be found.")

    if mode == 'get':
        keyrtn = key_check(module,
                           s3,
                           bucket,
                           obj,
                           version=version,
                           validate=validate)
        if keyrtn is False:
            if version:
                module.fail_json(
                    msg="Key %s with version id %s does not exist." %
                    (obj, version))
            else:
                module.fail_json(msg="Key %s does not exist." % obj)

        if path_check(dest) and overwrite != 'always':
            if overwrite == 'never':
                module.exit_json(
                    msg=
                    "Local object already exists and overwrite is disabled.",
                    changed=False)
            if etag_compare(module, dest, s3, bucket, obj, version=version):
                module.exit_json(
                    msg=
                    "Local and remote object are identical, ignoring. Use overwrite=always parameter to force.",
                    changed=False)

        try:
            download_s3file(module,
                            s3,
                            bucket,
                            obj,
                            dest,
                            retries,
                            version=version)
        except Sigv4Required:
            s3 = get_s3_connection(module,
                                   aws_connect_kwargs,
                                   location,
                                   rgw,
                                   s3_url,
                                   sig_4=True)
            download_s3file(module,
                            s3,
                            bucket,
                            obj,
                            dest,
                            retries,
                            version=version)

    if mode == 'put':

        # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
        # these were separated into the variables bucket_acl and object_acl above

        if not path_check(src):
            module.fail_json(msg="Local object for PUT does not exist")

        if bucketrtn:
            keyrtn = key_check(module,
                               s3,
                               bucket,
                               obj,
                               version=version,
                               validate=validate)
        else:
            # If the bucket doesn't exist we should create it.
            # only use valid bucket acls for create_bucket function
            module.params['permission'] = bucket_acl
            create_bucket(module, s3, bucket, location)

        if keyrtn and overwrite != 'always':
            if overwrite == 'never' or etag_compare(module, src, s3, bucket,
                                                    obj):
                # Return the download URL for the existing object
                get_download_url(module,
                                 s3,
                                 bucket,
                                 obj,
                                 expiry,
                                 changed=False)

        # only use valid object acls for the upload_s3file function
        module.params['permission'] = object_acl
        upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt,
                      headers)

    # Delete an object from a bucket, not the entire bucket
    if mode == 'delobj':
        if obj is None:
            module.fail_json(msg="object parameter is required")
        if bucket:
            deletertn = delete_key(module, s3, bucket, obj)
            if deletertn is True:
                module.exit_json(msg="Object deleted from bucket %s." % bucket,
                                 changed=True)
        else:
            module.fail_json(msg="Bucket parameter is required.")

    # Delete an entire bucket, including all objects in the bucket
    if mode == 'delete':
        if bucket:
            deletertn = delete_bucket(module, s3, bucket)
            if deletertn is True:
                module.exit_json(
                    msg="Bucket %s and all keys have been deleted." % bucket,
                    changed=True)
        else:
            module.fail_json(msg="Bucket parameter is required.")

    # Support for listing a set of keys
    if mode == 'list':
        exists = bucket_check(module, s3, bucket)

        # If the bucket does not exist then bail out
        if not exists:
            module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)

        list_keys(module, s3, bucket, prefix, marker, max_keys)

    # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
    # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
    if mode == 'create':

        # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
        # these were separated above into the variables bucket_acl and object_acl

        if bucket and not obj:
            if bucketrtn:
                module.exit_json(msg="Bucket already exists.", changed=False)
            else:
                # only use valid bucket acls when creating the bucket
                module.params['permission'] = bucket_acl
                module.exit_json(msg="Bucket created successfully",
                                 changed=create_bucket(module, s3, bucket,
                                                       location))
        if bucket and obj:
            if obj.endswith('/'):
                dirobj = obj
            else:
                dirobj = obj + "/"
            if bucketrtn:
                if key_check(module, s3, bucket, dirobj):
                    module.exit_json(
                        msg="Bucket %s and key %s already exists." %
                        (bucket, obj),
                        changed=False)
                else:
                    # setting valid object acls for the create_dirkey function
                    module.params['permission'] = object_acl
                    create_dirkey(module, s3, bucket, dirobj, encrypt)
            else:
                # only use valid bucket acls for the create_bucket function
                module.params['permission'] = bucket_acl
                created = create_bucket(module, s3, bucket, location)
                # only use valid object acls for the create_dirkey function
                module.params['permission'] = object_acl
                create_dirkey(module, s3, bucket, dirobj, encrypt)

    # Support for grabbing the time-expired URL for an object in S3/Walrus.
    if mode == 'geturl':
        if not bucket and not obj:
            module.fail_json(msg="Bucket and Object parameters must be set")

        keyrtn = key_check(module,
                           s3,
                           bucket,
                           obj,
                           version=version,
                           validate=validate)
        if keyrtn:
            get_download_url(module, s3, bucket, obj, expiry)
        else:
            module.fail_json(msg="Key %s does not exist." % obj)

    if mode == 'getstr':
        if bucket and obj:
            keyrtn = key_check(module,
                               s3,
                               bucket,
                               obj,
                               version=version,
                               validate=validate)
            if keyrtn:
                try:
                    download_s3str(module, s3, bucket, obj, version=version)
                except Sigv4Required:
                    s3 = get_s3_connection(module,
                                           aws_connect_kwargs,
                                           location,
                                           rgw,
                                           s3_url,
                                           sig_4=True)
                    download_s3str(module, s3, bucket, obj, version=version)
            elif version is not None:
                module.fail_json(
                    msg="Key %s with version id %s does not exist." %
                    (obj, version))
            else:
                module.fail_json(msg="Key %s does not exist." % obj)

    module.exit_json(failed=False)
Exemplo n.º 10
0
def main():
    protocols = [
        'http',
        'https',
        'email',
        'email_json',
        'sms',
        'sqs',
        'application',
        'lambda',
    ]

    argument_spec = dict(
        msg=dict(required=True, aliases=['default']),
        subject=dict(),
        topic=dict(required=True),
        message_attributes=dict(type='dict'),
        message_structure=dict(choices=['json', 'string'], default='json'),
    )

    for p in protocols:
        argument_spec[p] = dict()

    module = AnsibleAWSModule(argument_spec=argument_spec)

    sns_kwargs = dict(
        Message=module.params['msg'],
        Subject=module.params['subject'],
        MessageStructure=module.params['message_structure'],
    )

    if module.params['message_attributes']:
        if module.params['message_structure'] != 'string':
            module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
        sns_kwargs['MessageAttributes'] = module.params['message_attributes']

    dict_msg = {
        'default': sns_kwargs['Message']
    }

    for p in protocols:
        if module.params[p]:
            if sns_kwargs['MessageStructure'] != 'json':
                module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
            dict_msg[p.replace('_', '-')] = module.params[p]

    client = module.client('sns')

    topic = module.params['topic']
    if ':' in topic:
        # Short names can't contain ':' so we'll assume this is the full ARN
        sns_kwargs['TopicArn'] = topic
    else:
        sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)

    if not sns_kwargs['TopicArn']:
        module.fail_json(msg='Could not find topic: {0}'.format(topic))

    if sns_kwargs['MessageStructure'] == 'json':
        sns_kwargs['Message'] = json.dumps(dict_msg)

    try:
        result = client.publish(**sns_kwargs)
    except (BotoCoreError, ClientError) as e:
        module.fail_json_aws(e, msg='Failed to publish message')

    module.exit_json(msg='OK', message_id=result['MessageId'])
Exemplo n.º 11
0
def main():
    argument_spec = dict(eni_id=dict(default=None, type='str'),
                         instance_id=dict(default=None, type='str'),
                         private_ip_address=dict(type='str'),
                         subnet_id=dict(type='str'),
                         description=dict(type='str'),
                         security_groups=dict(default=[], type='list'),
                         device_index=dict(default=0, type='int'),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         force_detach=dict(default='no', type='bool'),
                         source_dest_check=dict(default=None, type='bool'),
                         delete_on_termination=dict(default=None, type='bool'),
                         secondary_private_ip_addresses=dict(default=None,
                                                             type='list'),
                         purge_secondary_private_ip_addresses=dict(
                             default=False, type='bool'),
                         secondary_private_ip_address_count=dict(default=None,
                                                                 type='int'),
                         allow_reassignment=dict(default=False, type='bool'),
                         attached=dict(default=None, type='bool'))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              check_boto3=False,
                              mutually_exclusive=[[
                                  'secondary_private_ip_addresses',
                                  'secondary_private_ip_address_count'
                              ]],
                              required_if=([
                                  ('state', 'absent', ['eni_id']),
                                  ('attached', True, ['instance_id']),
                                  ('purge_secondary_private_ip_addresses',
                                   True, ['secondary_private_ip_addresses'])
                              ]))

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    region, ec2_url, aws_connect_params = get_aws_connection_info(module)

    if region:
        try:
            connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
            vpc_connection = connect_to_aws(boto.vpc, region,
                                            **aws_connect_params)
        except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
            module.fail_json_aws(e)
    else:
        module.fail_json(msg="region must be specified")

    state = module.params.get("state")

    if state == 'present':
        eni = uniquely_find_eni(connection, module)
        if eni is None:
            subnet_id = module.params.get("subnet_id")
            if subnet_id is None:
                module.fail_json(
                    msg="subnet_id is required when creating a new ENI")

            vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
            create_eni(connection, vpc_id, module)
        else:
            vpc_id = eni.vpc_id
            modify_eni(connection, vpc_id, module, eni)

    elif state == 'absent':
        delete_eni(connection, module)
Exemplo n.º 12
0
def main():
    argument_spec = dict(dhcp_options_id=dict(type='str', default=None),
                         domain_name=dict(type='str', default=None),
                         dns_servers=dict(type='list', default=None),
                         ntp_servers=dict(type='list', default=None),
                         netbios_name_servers=dict(type='list', default=None),
                         netbios_node_type=dict(type='int', default=None),
                         vpc_id=dict(type='str', default=None),
                         delete_old=dict(type='bool', default=True),
                         inherit_existing=dict(type='bool', default=False),
                         tags=dict(type='dict',
                                   default=None,
                                   aliases=['resource_tags']),
                         state=dict(type='str',
                                    default='present',
                                    choices=['present', 'absent']))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              check_boto3=False,
                              supports_check_mode=True)

    params = module.params
    found = False
    changed = False
    new_options = collections.defaultdict(lambda: None)

    if not HAS_BOTO:
        module.fail_json(msg='boto is required for this module')

    region, ec2_url, boto_params = get_aws_connection_info(module)
    connection = connect_to_aws(boto.vpc, region, **boto_params)

    existing_options = None

    # First check if we were given a dhcp_options_id
    if not params['dhcp_options_id']:
        # No, so create new_options from the parameters
        if params['dns_servers'] is not None:
            new_options['domain-name-servers'] = params['dns_servers']
        if params['netbios_name_servers'] is not None:
            new_options['netbios-name-servers'] = params[
                'netbios_name_servers']
        if params['ntp_servers'] is not None:
            new_options['ntp-servers'] = params['ntp_servers']
        if params['domain_name'] is not None:
            # needs to be a list for comparison with boto objects later
            new_options['domain-name'] = [params['domain_name']]
        if params['netbios_node_type'] is not None:
            # needs to be a list for comparison with boto objects later
            new_options['netbios-node-type'] = [
                str(params['netbios_node_type'])
            ]
        # If we were given a vpc_id then we need to look at the options on that
        if params['vpc_id']:
            existing_options = fetch_dhcp_options_for_vpc(
                connection, params['vpc_id'])
            # if we've been asked to inherit existing options, do that now
            if params['inherit_existing']:
                if existing_options:
                    for option in [
                            'domain-name-servers', 'netbios-name-servers',
                            'ntp-servers', 'domain-name', 'netbios-node-type'
                    ]:
                        if existing_options.options.get(
                                option) and new_options[option] != [] and (
                                    not new_options[option]
                                    or [''] == new_options[option]):
                            new_options[option] = existing_options.options.get(
                                option)

            # Do the vpc's dhcp options already match what we're asked for? if so we are done
            if existing_options and new_options == existing_options.options:
                module.exit_json(changed=changed,
                                 new_options=new_options,
                                 dhcp_options_id=existing_options.id)

        # If no vpc_id was given, or the options don't match then look for an existing set using tags
        found, dhcp_option = match_dhcp_options(connection, params['tags'],
                                                new_options)

    # Now let's cover the case where there are existing options that we were told about by id
    # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
    else:
        supplied_options = connection.get_all_dhcp_options(
            filters={'dhcp-options-id': params['dhcp_options_id']})
        if len(supplied_options) != 1:
            if params['state'] != 'absent':
                module.fail_json(
                    msg=" a dhcp_options_id was supplied, but does not exist")
        else:
            found = True
            dhcp_option = supplied_options[0]
            if params['state'] != 'absent' and params['tags']:
                ensure_tags(module, connection, dhcp_option.id, params['tags'],
                            False, module.check_mode)

    # Now we have the dhcp options set, let's do the necessary

    # if we found options we were asked to remove then try to do so
    if params['state'] == 'absent':
        if not module.check_mode:
            if found:
                changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
        module.exit_json(changed=changed, new_options={})

    # otherwise if we haven't found the required options we have something to do
    elif not module.check_mode and not found:

        # create some dhcp options if we weren't able to use existing ones
        if not found:
            # Convert netbios-node-type and domain-name back to strings
            if new_options['netbios-node-type']:
                new_options['netbios-node-type'] = new_options[
                    'netbios-node-type'][0]
            if new_options['domain-name']:
                new_options['domain-name'] = new_options['domain-name'][0]

            # create the new dhcp options set requested
            dhcp_option = connection.create_dhcp_options(
                new_options['domain-name'], new_options['domain-name-servers'],
                new_options['ntp-servers'],
                new_options['netbios-name-servers'],
                new_options['netbios-node-type'])

            # wait for dhcp option to be accessible
            found_dhcp_opt = False
            start_time = time()
            try:
                found_dhcp_opt = retry_not_found(
                    connection.get_all_dhcp_options,
                    dhcp_options_ids=[dhcp_option.id])
            except EC2ResponseError as e:
                module.fail_json_aws(e, msg="Failed to describe DHCP options")
            if not found_dhcp_opt:
                module.fail_json(msg="Failed to wait for {0} to be available.".
                                 format(dhcp_option.id))

            changed = True
            if params['tags']:
                ensure_tags(module, connection, dhcp_option.id, params['tags'],
                            False, module.check_mode)

    # If we were given a vpc_id, then attach the options we now have to that before we finish
    if params['vpc_id'] and not module.check_mode:
        changed = True
        connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
        # and remove old ones if that was requested
        if params['delete_old'] and existing_options:
            remove_dhcp_options_by_id(connection, existing_options.id)

    module.exit_json(changed=changed,
                     new_options=new_options,
                     dhcp_options_id=dhcp_option.id)
Exemplo n.º 13
0
def main():
    argument_spec = dict(
        name=dict(required=True, type='str'),
        expiration_days=dict(type='int'),
        expiration_date=dict(),
        noncurrent_version_expiration_days=dict(type='int'),
        noncurrent_version_storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']),
        noncurrent_version_transition_days=dict(type='int'),
        noncurrent_version_transitions=dict(type='list'),
        prefix=dict(),
        requester_pays=dict(type='bool', removed_in_version='2.14'),
        rule_id=dict(),
        state=dict(default='present', choices=['present', 'absent']),
        status=dict(default='enabled', choices=['enabled', 'disabled']),
        storage_class=dict(default='glacier', type='str', choices=['glacier', 'onezone_ia', 'standard_ia']),
        transition_days=dict(type='int'),
        transition_date=dict(),
        transitions=dict(type='list'),
        purge_transitions=dict(default='yes', type='bool')
    )

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              mutually_exclusive=[
                                  ['expiration_days', 'expiration_date'],
                                  ['expiration_days', 'transition_date'],
                                  ['transition_days', 'transition_date'],
                                  ['transition_days', 'expiration_date'],
                                  ['transition_days', 'transitions'],
                                  ['transition_date', 'transitions'],
                                  ['noncurrent_version_transition_days', 'noncurrent_version_transitions'],
                              ],)

    if not HAS_DATEUTIL:
        module.fail_json(msg='dateutil required for this module')

    client = module.client('s3')

    expiration_date = module.params.get("expiration_date")
    transition_date = module.params.get("transition_date")
    state = module.params.get("state")

    if state == 'present' and module.params["status"] == "enabled":  # allow deleting/disabling a rule by id/prefix

        required_when_present = ('expiration_date', 'expiration_days', 'transition_date',
                                 'transition_days', 'transitions', 'noncurrent_version_expiration_days',
                                 'noncurrent_version_transition_days',
                                 'noncurrent_version_transitions')
        for param in required_when_present:
            if module.params.get(param):
                break
        else:
            msg = "one of the following is required when 'state' is 'present': %s" % ', '.join(required_when_present)
            module.fail_json(msg=msg)
    # If expiration_date set, check string is valid
    if expiration_date is not None:
        try:
            datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
        except ValueError as e:
            module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")

    if transition_date is not None:
        try:
            datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
        except ValueError as e:
            module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")

    if state == 'present':
        create_lifecycle_rule(client, module)
    elif state == 'absent':
        destroy_lifecycle_rule(client, module)
Exemplo n.º 14
0
def main():
    argument_spec = dict(state=dict(required=True,
                                    choices=['present', 'absent']),
                         arn=dict(required=False, type='str'),
                         family=dict(required=False, type='str'),
                         revision=dict(required=False, type='int'),
                         force_create=dict(required=False,
                                           default=False,
                                           type='bool'),
                         containers=dict(required=False, type='list'),
                         network_mode=dict(required=False,
                                           default='bridge',
                                           choices=[
                                               'default', 'bridge', 'host',
                                               'none', 'awsvpc'
                                           ],
                                           type='str'),
                         task_role_arn=dict(required=False,
                                            default='',
                                            type='str'),
                         execution_role_arn=dict(required=False,
                                                 default='',
                                                 type='str'),
                         volumes=dict(required=False, type='list'),
                         launch_type=dict(required=False,
                                          choices=['EC2', 'FARGATE']),
                         cpu=dict(),
                         memory=dict(required=False, type='str'))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('launch_type', 'FARGATE',
                                            ['cpu', 'memory'])])

    task_to_describe = None
    task_mgr = EcsTaskManager(module)
    results = dict(changed=False)

    if module.params['launch_type']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.8.4 or higher to use launch_type'
            )

    if module.params['execution_role_arn']:
        if not module.botocore_at_least('1.10.44'):
            module.fail_json(
                msg=
                'botocore needs to be version 1.10.44 or higher to use execution_role_arn'
            )

    if module.params['containers']:
        for container in module.params['containers']:
            for environment in container.get('environment', []):
                environment['value'] = to_text(environment['value'])

    if module.params['state'] == 'present':
        if 'containers' not in module.params or not module.params['containers']:
            module.fail_json(
                msg=
                "To use task definitions, a list of containers must be specified"
            )

        if 'family' not in module.params or not module.params['family']:
            module.fail_json(
                msg="To use task definitions, a family must be specified")

        network_mode = module.params['network_mode']
        launch_type = module.params['launch_type']
        if launch_type == 'FARGATE' and network_mode != 'awsvpc':
            module.fail_json(
                msg="To use FARGATE launch type, network_mode must be awsvpc")

        family = module.params['family']
        existing_definitions_in_family = task_mgr.describe_task_definitions(
            module.params['family'])

        if 'revision' in module.params and module.params['revision']:
            # The definition specifies revision. We must guarantee that an active revision of that number will result from this.
            revision = int(module.params['revision'])

            # A revision has been explicitly specified. Attempt to locate a matching revision
            tasks_defs_for_revision = [
                td for td in existing_definitions_in_family
                if td['revision'] == revision
            ]
            existing = tasks_defs_for_revision[0] if len(
                tasks_defs_for_revision) > 0 else None

            if existing and existing['status'] != "ACTIVE":
                # We cannot reactivate an inactive revision
                module.fail_json(
                    msg=
                    "A task in family '%s' already exists for revision %d, but it is inactive"
                    % (family, revision))
            elif not existing:
                if not existing_definitions_in_family and revision != 1:
                    module.fail_json(
                        msg=
                        "You have specified a revision of %d but a created revision would be 1"
                        % revision)
                elif existing_definitions_in_family and existing_definitions_in_family[
                        -1]['revision'] + 1 != revision:
                    module.fail_json(
                        msg=
                        "You have specified a revision of %d but a created revision would be %d"
                        % (revision,
                           existing_definitions_in_family[-1]['revision'] + 1))
        else:
            existing = None

            def _right_has_values_of_left(left, right):
                # Make sure the values are equivalent for everything left has
                for k, v in left.items():
                    if not ((not v and (k not in right or not right[k])) or
                            (k in right and v == right[k])):
                        # We don't care about list ordering because ECS can change things
                        if isinstance(v, list) and k in right:
                            left_list = v
                            right_list = right[k] or []

                            if len(left_list) != len(right_list):
                                return False

                            for list_val in left_list:
                                if list_val not in right_list:
                                    return False
                        else:
                            return False

                # Make sure right doesn't have anything that left doesn't
                for k, v in right.items():
                    if v and k not in left:
                        return False

                return True

            def _task_definition_matches(requested_volumes,
                                         requested_containers,
                                         requested_task_role_arn,
                                         existing_task_definition):
                if td['status'] != "ACTIVE":
                    return None

                if requested_task_role_arn != td.get('taskRoleArn', ""):
                    return None

                existing_volumes = td.get('volumes', []) or []

                if len(requested_volumes) != len(existing_volumes):
                    # Nope.
                    return None

                if len(requested_volumes) > 0:
                    for requested_vol in requested_volumes:
                        found = False

                        for actual_vol in existing_volumes:
                            if _right_has_values_of_left(
                                    requested_vol, actual_vol):
                                found = True
                                break

                        if not found:
                            return None

                existing_containers = td.get('containerDefinitions', []) or []

                if len(requested_containers) != len(existing_containers):
                    # Nope.
                    return None

                for requested_container in requested_containers:
                    found = False

                    for actual_container in existing_containers:
                        if _right_has_values_of_left(requested_container,
                                                     actual_container):
                            found = True
                            break

                    if not found:
                        return None

                return existing_task_definition

            # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
            for td in existing_definitions_in_family:
                requested_volumes = module.params['volumes'] or []
                requested_containers = module.params['containers'] or []
                requested_task_role_arn = module.params['task_role_arn']
                existing = _task_definition_matches(requested_volumes,
                                                    requested_containers,
                                                    requested_task_role_arn,
                                                    td)

                if existing:
                    break

        if existing and not module.params.get('force_create'):
            # Awesome. Have an existing one. Nothing to do.
            results['taskdefinition'] = existing
        else:
            if not module.check_mode:
                # Doesn't exist. create it.
                volumes = module.params.get('volumes', []) or []
                results['taskdefinition'] = task_mgr.register_task(
                    module.params['family'], module.params['task_role_arn'],
                    module.params['execution_role_arn'],
                    module.params['network_mode'], module.params['containers'],
                    volumes, module.params['launch_type'],
                    module.params['cpu'], module.params['memory'])
            results['changed'] = True

    elif module.params['state'] == 'absent':
        # When de-registering a task definition, we can specify the ARN OR the family and revision.
        if module.params['state'] == 'absent':
            if 'arn' in module.params and module.params['arn'] is not None:
                task_to_describe = module.params['arn']
            elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
                    module.params['revision'] is not None:
                task_to_describe = module.params['family'] + ":" + str(
                    module.params['revision'])
            else:
                module.fail_json(
                    msg=
                    "To use task definitions, an arn or family and revision must be specified"
                )

        existing = task_mgr.describe_task(task_to_describe)

        if not existing:
            pass
        else:
            # It exists, so we should delete it and mark changed. Return info about the task definition deleted
            results['taskdefinition'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    task_mgr.deregister_task(task_to_describe)
                results['changed'] = True

    module.exit_json(**results)
Exemplo n.º 15
0
def main():
    argument_spec = dict(
        operation=dict(required=True, choices=['run', 'start', 'stop']),
        cluster=dict(required=False, type='str'),  # R S P
        task_definition=dict(required=False, type='str'),  # R* S*
        overrides=dict(required=False, type='dict'),  # R S
        count=dict(required=False, type='int'),  # R
        task=dict(required=False, type='str'),  # P*
        container_instances=dict(required=False, type='list'),  # S*
        started_by=dict(required=False, type='str'),  # R S
        network_configuration=dict(required=False, type='dict'),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        tags=dict(required=False, type='dict'))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('launch_type', 'FARGATE',
                                            ['network_configuration'])])

    # Validate Inputs
    if module.params['operation'] == 'run':
        if 'task_definition' not in module.params and module.params[
                'task_definition'] is None:
            module.fail_json(
                msg="To run a task, a task_definition must be specified")
        task_to_list = module.params['task_definition']
        status_type = "RUNNING"

    if module.params['operation'] == 'start':
        if 'task_definition' not in module.params and module.params[
                'task_definition'] is None:
            module.fail_json(
                msg="To start a task, a task_definition must be specified")
        if 'container_instances' not in module.params and module.params[
                'container_instances'] is None:
            module.fail_json(
                msg="To start a task, container instances must be specified")
        task_to_list = module.params['task']
        status_type = "RUNNING"

    if module.params['operation'] == 'stop':
        if 'task' not in module.params and module.params['task'] is None:
            module.fail_json(msg="To stop a task, a task must be specified")
        if 'task_definition' not in module.params and module.params[
                'task_definition'] is None:
            module.fail_json(
                msg="To stop a task, a task definition must be specified")
        task_to_list = module.params['task_definition']
        status_type = "STOPPED"

    service_mgr = EcsExecManager(module)

    if module.params[
            'network_configuration'] and not service_mgr.ecs_api_handles_network_configuration(
            ):
        module.fail_json(
            msg=
            'botocore needs to be version 1.7.44 or higher to use network configuration'
        )

    if module.params[
            'launch_type'] and not service_mgr.ecs_api_handles_launch_type():
        module.fail_json(
            msg=
            'botocore needs to be version 1.8.4 or higher to use launch type')

    if module.params['tags']:
        if not service_mgr.ecs_api_handles_tags():
            module.fail_json(msg=missing_required_lib("botocore >= 1.12.46",
                                                      reason="to use tags"))
        if not service_mgr.ecs_task_long_format_enabled():
            module.fail_json(
                msg=
                "Cannot set task tags: long format task arns are required to set tags"
            )

    existing = service_mgr.list_tasks(module.params['cluster'], task_to_list,
                                      status_type)

    results = dict(changed=False)
    if module.params['operation'] == 'run':
        if existing:
            # TBD - validate the rest of the details
            results['task'] = existing
        else:
            if not module.check_mode:
                results['task'] = service_mgr.run_task(
                    module.params['cluster'],
                    module.params['task_definition'],
                    module.params['overrides'],
                    module.params['count'],
                    module.params['started_by'],
                    module.params['launch_type'],
                    module.params['tags'],
                )
            results['changed'] = True

    elif module.params['operation'] == 'start':
        if existing:
            # TBD - validate the rest of the details
            results['task'] = existing
        else:
            if not module.check_mode:
                results['task'] = service_mgr.start_task(
                    module.params['cluster'],
                    module.params['task_definition'],
                    module.params['overrides'],
                    module.params['container_instances'],
                    module.params['started_by'],
                    module.params['tags'],
                )
            results['changed'] = True

    elif module.params['operation'] == 'stop':
        if existing:
            results['task'] = existing
        else:
            if not module.check_mode:
                # it exists, so we should delete it and mark changed.
                # return info about the cluster deleted
                results['task'] = service_mgr.stop_task(
                    module.params['cluster'], module.params['task'])
            results['changed'] = True

    module.exit_json(**results)
Exemplo n.º 16
0
def main():
    argument_spec = dict(
        instance=dict(),
        id=dict(),
        name=dict(),
        volume_size=dict(type='int'),
        volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'],
                         default='standard'),
        iops=dict(type='int'),
        encrypted=dict(type='bool', default=False),
        kms_key_id=dict(),
        device_name=dict(),
        delete_on_termination=dict(type='bool', default=False),
        zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
        snapshot=dict(),
        state=dict(choices=['absent', 'present', 'list'], default='present'),
        tags=dict(type='dict', default={}))
    module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    id = module.params.get('id')
    name = module.params.get('name')
    instance = module.params.get('instance')
    volume_size = module.params.get('volume_size')
    encrypted = module.params.get('encrypted')
    kms_key_id = module.params.get('kms_key_id')
    device_name = module.params.get('device_name')
    zone = module.params.get('zone')
    snapshot = module.params.get('snapshot')
    state = module.params.get('state')
    tags = module.params.get('tags')

    # Ensure we have the zone or can get the zone
    if instance is None and zone is None and state == 'present':
        module.fail_json(msg="You must specify either instance or zone")

    # Set volume detach flag
    if instance == 'None' or instance == '':
        instance = None
        detach_vol_flag = True
    else:
        detach_vol_flag = False

    # Set changed flag
    changed = False

    region, ec2_url, aws_connect_params = get_aws_connection_info(module)

    if region:
        try:
            ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
        except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
            module.fail_json_aws(e)
    else:
        module.fail_json(msg="region must be specified")

    if state == 'list':
        returned_volumes = []
        vols = get_volumes(module, ec2)

        for v in vols:
            attachment = v.attach_data

            returned_volumes.append(get_volume_info(v, state))

        module.exit_json(changed=False, volumes=returned_volumes)

    if encrypted and not boto_supports_volume_encryption():
        module.fail_json(
            msg="You must use boto >= v2.29.0 to use encrypted volumes")

    if kms_key_id is not None and not boto_supports_kms_key_id():
        module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")

    # Here we need to get the zone info for the instance. This covers situation where
    # instance is specified but zone isn't.
    # Useful for playbooks chaining instance launch with volume create + attach and where the
    # zone doesn't matter to the user.
    inst = None
    if instance:
        try:
            reservation = ec2.get_all_instances(instance_ids=instance)
        except BotoServerError as e:
            module.fail_json_aws(e)
        inst = reservation[0].instances[0]
        zone = inst.placement

        # Check if there is a volume already mounted there.
        if device_name:
            if device_name in inst.block_device_mapping:
                module.exit_json(
                    msg="Volume mapping for %s already exists on instance %s" %
                    (device_name, instance),
                    volume_id=inst.block_device_mapping[device_name].volume_id,
                    device=device_name,
                    changed=False)

    # Delaying the checks until after the instance check allows us to get volume ids for existing volumes
    # without needing to pass an unused volume_size
    if not volume_size and not (id or name or snapshot):
        module.fail_json(
            msg=
            "You must specify volume_size or identify an existing volume by id, name, or snapshot"
        )

    if volume_size and id:
        module.fail_json(msg="Cannot specify volume_size together with id")

    if state == 'present':
        volume, changed = create_volume(module, ec2, zone)
        if detach_vol_flag:
            volume, changed = detach_volume(module, ec2, volume)
        elif inst is not None:
            volume, changed = attach_volume(module, ec2, volume, inst)

        # Add device, volume_id and volume_type parameters separately to maintain backward compatibility
        volume_info = get_volume_info(volume, state)

        # deleteOnTermination is not correctly reflected on attachment
        if module.params.get('delete_on_termination'):
            for attempt in range(0, 8):
                if volume_info['attachment_set'].get(
                        'deleteOnTermination') == 'true':
                    break
                time.sleep(5)
                volume = ec2.get_all_volumes(volume_ids=volume.id)[0]
                volume_info = get_volume_info(volume, state)
        module.exit_json(changed=changed,
                         volume=volume_info,
                         device=volume_info['attachment_set']['device'],
                         volume_id=volume_info['id'],
                         volume_type=volume_info['type'])
    elif state == 'absent':
        delete_volume(module, ec2)
Exemplo n.º 17
0
def main():
    template_options = dict(
        block_device_mappings=dict(
            type='list',
            options=dict(
                device_name=dict(),
                ebs=dict(
                    type='dict',
                    options=dict(
                        delete_on_termination=dict(type='bool'),
                        encrypted=dict(type='bool'),
                        iops=dict(type='int'),
                        kms_key_id=dict(),
                        snapshot_id=dict(),
                        volume_size=dict(type='int'),
                        volume_type=dict(),
                    ),
                ),
                no_device=dict(),
                virtual_name=dict(),
            ),
        ),
        cpu_options=dict(
            type='dict',
            options=dict(
                core_count=dict(type='int'),
                threads_per_core=dict(type='int'),
            ),
        ),
        credit_specification=dict(
            dict(type='dict'),
            options=dict(cpu_credits=dict(), ),
        ),
        disable_api_termination=dict(type='bool'),
        ebs_optimized=dict(type='bool'),
        elastic_gpu_specifications=dict(
            options=dict(type=dict()),
            type='list',
        ),
        iam_instance_profile=dict(),
        image_id=dict(),
        instance_initiated_shutdown_behavior=dict(
            choices=['stop', 'terminate']),
        instance_market_options=dict(
            type='dict',
            options=dict(
                market_type=dict(),
                spot_options=dict(
                    type='dict',
                    options=dict(
                        block_duration_minutes=dict(type='int'),
                        instance_interruption_behavior=dict(
                            choices=['hibernate', 'stop', 'terminate']),
                        max_price=dict(),
                        spot_instance_type=dict(
                            choices=['one-time', 'persistent']),
                    ),
                ),
            ),
        ),
        instance_type=dict(),
        kernel_id=dict(),
        key_name=dict(),
        monitoring=dict(
            type='dict',
            options=dict(enabled=dict(type='bool')),
        ),
        network_interfaces=dict(
            type='list',
            options=dict(
                associate_public_ip_address=dict(type='bool'),
                delete_on_termination=dict(type='bool'),
                description=dict(),
                device_index=dict(type='int'),
                groups=dict(type='list'),
                ipv6_address_count=dict(type='int'),
                ipv6_addresses=dict(type='list'),
                network_interface_id=dict(),
                private_ip_address=dict(),
                subnet_id=dict(),
            ),
        ),
        placement=dict(
            options=dict(
                affinity=dict(),
                availability_zone=dict(),
                group_name=dict(),
                host_id=dict(),
                tenancy=dict(),
            ),
            type='dict',
        ),
        ram_disk_id=dict(),
        security_group_ids=dict(type='list'),
        security_groups=dict(type='list'),
        tags=dict(type='dict'),
        user_data=dict(),
    )

    arg_spec = dict(
        state=dict(choices=['present', 'absent'], default='present'),
        template_name=dict(aliases=['name']),
        template_id=dict(aliases=['id']),
        default_version=dict(default='latest'),
    )

    arg_spec.update(template_options)

    module = AnsibleAWSModule(argument_spec=arg_spec,
                              required_one_of=[('template_name', 'template_id')
                                               ],
                              supports_check_mode=True)

    if not module.boto3_at_least('1.6.0'):
        module.fail_json(msg="ec2_launch_template requires boto3 >= 1.6.0")

    for interface in (module.params.get('network_interfaces') or []):
        if interface.get('ipv6_addresses'):
            interface['ipv6_addresses'] = [{
                'ipv6_address': x
            } for x in interface['ipv6_addresses']]

    if module.params.get('state') == 'present':
        out = create_or_update(module, template_options)
        out.update(format_module_output(module))
    elif module.params.get('state') == 'absent':
        out = delete_template(module)
    else:
        module.fail_json(
            msg='Unsupported value "{0}" for `state` parameter'.format(
                module.params.get('state')))

    module.exit_json(**out)
Exemplo n.º 18
0
def main():

    argument_spec = (dict(
        cross_zone_load_balancing=dict(type='bool'),
        deletion_protection=dict(type='bool'),
        listeners=dict(type='list',
                       elements='dict',
                       options=dict(Protocol=dict(type='str', required=True),
                                    Port=dict(type='int', required=True),
                                    SslPolicy=dict(type='str'),
                                    Certificates=dict(type='list'),
                                    DefaultActions=dict(type='list',
                                                        required=True))),
        name=dict(required=True, type='str'),
        purge_listeners=dict(default=True, type='bool'),
        purge_tags=dict(default=True, type='bool'),
        subnets=dict(type='list'),
        subnet_mappings=dict(type='list'),
        scheme=dict(default='internet-facing',
                    choices=['internet-facing', 'internal']),
        state=dict(choices=['present', 'absent'], type='str'),
        tags=dict(type='dict'),
        wait_timeout=dict(type='int'),
        wait=dict(type='bool')))

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['subnets', 'subnet_mappings']])

    # Check for subnets or subnet_mappings if state is present
    state = module.params.get("state")
    if state == 'present':
        if module.params.get("subnets") is None and module.params.get(
                "subnet_mappings") is None:
            module.fail_json(
                msg=
                "'subnets' or 'subnet_mappings' is required when state=present"
            )

    if state is None:
        # See below, unless state==present we delete.  Ouch.
        module.deprecate(
            'State currently defaults to absent.  This is inconsistent with other modules'
            ' and the default will be changed to `present` in Ansible 2.14',
            version='2.14')

    # Quick check of listeners parameters
    listeners = module.params.get("listeners")
    if listeners is not None:
        for listener in listeners:
            for key in listener.keys():
                protocols_list = ['TCP', 'TLS', 'UDP', 'TCP_UDP']
                if key == 'Protocol' and listener[key] not in protocols_list:
                    module.fail_json(msg="'Protocol' must be either " +
                                     ", ".join(protocols_list))

    connection = module.client('elbv2')
    connection_ec2 = module.client('ec2')

    elb = NetworkLoadBalancer(connection, connection_ec2, module)

    if state == 'present':
        create_or_update_elb(elb)
    else:
        delete_elb(elb)
Exemplo n.º 19
0
def main():
    argument_spec = dict(
        name=dict(required=True),
        description=dict(),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=900),
        state=dict(default='present', choices=['present', 'absent']),
        purge_stacks=dict(type='bool', default=True),
        parameters=dict(type='dict', default={}),
        template=dict(type='path'),
        template_url=dict(),
        template_body=dict(),
        capabilities=dict(type='list',
                          choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
        regions=dict(type='list'),
        accounts=dict(type='list'),
        failure_tolerance=dict(
            type='dict',
            default={},
            options=dict(
                fail_count=dict(type='int'),
                fail_percentage=dict(type='int'),
                parallel_percentage=dict(type='int'),
                parallel_count=dict(type='int'),
            ),
            mutually_exclusive=[
                ['fail_count', 'fail_percentage'],
                ['parallel_count', 'parallel_percentage'],
            ],
        ),
        administration_role_arn=dict(
            aliases=['admin_role_arn', 'administration_role', 'admin_role']),
        execution_role_name=dict(
            aliases=['execution_role', 'exec_role', 'exec_role_name']),
        tags=dict(type='dict'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_url', 'template', 'template_body']],
        supports_check_mode=True)
    if not (module.boto3_at_least('1.6.0')
            and module.botocore_at_least('1.10.26')):
        module.fail_json(
            msg=
            "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26"
        )

    # Wrap the cloudformation client methods that this module uses with
    # automatic backoff / retry for throttling error codes
    jittered_backoff_decorator = AWSRetry.jittered_backoff(
        retries=10,
        delay=3,
        max_delay=30,
        catch_extra_error_codes=['StackSetNotFound'])
    cfn = module.client('cloudformation',
                        retry_decorator=jittered_backoff_decorator)
    existing_stack_set = stack_set_facts(cfn, module.params['name'])

    operation_uuid = to_native(uuid.uuid4())
    operation_ids = []
    # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
    stack_params = {}
    state = module.params['state']
    if state == 'present' and not module.params['accounts']:
        module.fail_json(
            msg=
            "Can't create a stack set without choosing at least one account. "
            "To get the ID of the current account, use the aws_caller_info module."
        )

    module.params['accounts'] = [
        to_native(a) for a in module.params['accounts']
    ]

    stack_params['StackSetName'] = module.params['name']
    if module.params.get('description'):
        stack_params['Description'] = module.params['description']

    if module.params.get('capabilities'):
        stack_params['Capabilities'] = module.params['capabilities']

    if module.params['template'] is not None:
        with open(module.params['template'], 'r') as tpl:
            stack_params['TemplateBody'] = tpl.read()
    elif module.params['template_body'] is not None:
        stack_params['TemplateBody'] = module.params['template_body']
    elif module.params['template_url'] is not None:
        stack_params['TemplateURL'] = module.params['template_url']
    else:
        # no template is provided, but if the stack set exists already, we can use the existing one.
        if existing_stack_set:
            stack_params['UsePreviousTemplate'] = True
        else:
            module.fail_json(
                msg=
                "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
                "`template_body`, or `template_url`".format(
                    module.params['name']))

    stack_params['Parameters'] = []
    for k, v in module.params['parameters'].items():
        if isinstance(v, dict):
            # set parameter based on a dict to allow additional CFN Parameter Attributes
            param = dict(ParameterKey=k)

            if 'value' in v:
                param['ParameterValue'] = to_native(v['value'])

            if 'use_previous_value' in v and bool(v['use_previous_value']):
                param['UsePreviousValue'] = True
                param.pop('ParameterValue', None)

            stack_params['Parameters'].append(param)
        else:
            # allow default k/v configuration to set a template parameter
            stack_params['Parameters'].append({
                'ParameterKey': k,
                'ParameterValue': str(v)
            })

    if module.params.get('tags') and isinstance(module.params.get('tags'),
                                                dict):
        stack_params['Tags'] = ansible_dict_to_boto3_tag_list(
            module.params['tags'])

    if module.params.get('administration_role_arn'):
        # TODO loosen the semantics here to autodetect the account ID and build the ARN
        stack_params['AdministrationRoleARN'] = module.params[
            'administration_role_arn']
    if module.params.get('execution_role_name'):
        stack_params['ExecutionRoleName'] = module.params[
            'execution_role_name']

    result = {}

    if module.check_mode:
        if state == 'absent' and existing_stack_set:
            module.exit_json(changed=True,
                             msg='Stack set would be deleted',
                             meta=[])
        elif state == 'absent' and not existing_stack_set:
            module.exit_json(changed=False,
                             msg='Stack set doesn\'t exist',
                             meta=[])
        elif state == 'present' and not existing_stack_set:
            module.exit_json(changed=True,
                             msg='New stack set would be created',
                             meta=[])
        elif state == 'present' and existing_stack_set:
            new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
                cfn,
                module.params['name'],
                module.params['accounts'],
                module.params['regions'],
            )
            if new_stacks:
                module.exit_json(changed=True,
                                 msg='New stack instance(s) would be created',
                                 meta=[])
            elif unspecified_stacks and module.params.get(
                    'purge_stack_instances'):
                module.exit_json(changed=True,
                                 msg='Old stack instance(s) would be deleted',
                                 meta=[])
        else:
            # TODO: need to check the template and other settings for correct check mode
            module.exit_json(changed=False, msg='No changes detected', meta=[])

    changed = False
    if state == 'present':
        if not existing_stack_set:
            # on create this parameter has a different name, and cannot be referenced later in the job log
            stack_params[
                'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(
                    operation_uuid)
            changed = True
            create_stack_set(module, stack_params, cfn)
        else:
            stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(
                operation_uuid)
            operation_ids.append(stack_params['OperationId'])
            if module.params.get('regions'):
                stack_params[
                    'OperationPreferences'] = get_operation_preferences(module)
            changed |= update_stack_set(module, stack_params, cfn)

        # now create/update any appropriate stack instances
        new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
            cfn,
            module.params['name'],
            module.params['accounts'],
            module.params['regions'],
        )
        if new_stack_instances:
            operation_ids.append(
                'Ansible-StackInstance-Create-{0}'.format(operation_uuid))
            changed = True
            cfn.create_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(set(acct
                                  for acct, region in new_stack_instances)),
                Regions=list(
                    set(region for acct, region in new_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        else:
            operation_ids.append(
                'Ansible-StackInstance-Update-{0}'.format(operation_uuid))
            cfn.update_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(
                    set(acct for acct, region in existing_stack_instances)),
                Regions=list(
                    set(region for acct, region in existing_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        for op in operation_ids:
            await_stack_set_operation(
                module,
                cfn,
                operation_id=op,
                stack_set_name=module.params['name'],
                max_wait=module.params.get('wait_timeout'),
            )

    elif state == 'absent':
        if not existing_stack_set:
            module.exit_json(msg='Stack set {0} does not exist'.format(
                module.params['name']))
        if module.params.get('purge_stack_instances') is False:
            pass
        try:
            cfn.delete_stack_set(StackSetName=module.params['name'], )
            module.exit_json(
                msg='Stack set {0} deleted'.format(module.params['name']))
        except is_boto3_error_code('OperationInProgressException') as e:  # pylint: disable=duplicate-except
            module.fail_json_aws(
                e,
                msg=
                'Cannot delete stack {0} while there is an operation in progress'
                .format(module.params['name']))
        except is_boto3_error_code('StackSetNotEmptyException'):  # pylint: disable=duplicate-except
            delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(
                operation_uuid)
            cfn.delete_stack_instances(
                StackSetName=module.params['name'],
                Accounts=module.params['accounts'],
                Regions=module.params['regions'],
                RetainStacks=(not module.params.get('purge_stacks')),
                OperationId=delete_instances_op)
            await_stack_set_operation(
                module,
                cfn,
                operation_id=delete_instances_op,
                stack_set_name=stack_params['StackSetName'],
                max_wait=module.params.get('wait_timeout'),
            )
            try:
                cfn.delete_stack_set(StackSetName=module.params['name'], )
            except is_boto3_error_code('StackSetNotEmptyException') as exc:  # pylint: disable=duplicate-except
                # this time, it is likely that either the delete failed or there are more stacks.
                instances = cfn.list_stack_instances(
                    StackSetName=module.params['name'], )
                stack_states = ', '.join(
                    '(account={Account}, region={Region}, state={Status})'.
                    format(**i) for i in instances['Summaries'])
                module.fail_json_aws(
                    exc,
                    msg=
                    'Could not purge all stacks, or not all accounts/regions were chosen for deletion: '
                    + stack_states)
            module.exit_json(changed=True,
                             msg='Stack set {0} deleted'.format(
                                 module.params['name']))

    result.update(**describe_stack_tree(
        module, stack_params['StackSetName'], operation_ids=operation_ids))
    if any(o['status'] == 'FAILED' for o in result['operations']):
        module.fail_json(msg="One or more operations failed to execute",
                         **result)
    module.exit_json(changed=changed, **result)
Exemplo n.º 20
0
def main():
    """
     Module action handler
    """
    argument_spec = dict(
        encrypt=dict(required=False, type="bool", default=False),
        state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
        kms_key_id=dict(required=False, type='str', default=None),
        purge_tags=dict(default=True, type='bool'),
        id=dict(required=False, type='str', default=None),
        name=dict(required=False, type='str', default=None),
        tags=dict(required=False, type="dict", default={}),
        targets=dict(required=False, type="list", default=[]),
        performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
        throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None),
        provisioned_throughput_in_mibps=dict(required=False, type='float'),
        wait=dict(required=False, type="bool", default=False),
        wait_timeout=dict(required=False, type="int", default=0)
    )

    module = AnsibleAWSModule(argument_spec=argument_spec)

    connection = EFSConnection(module)

    name = module.params.get('name')
    fs_id = module.params.get('id')
    tags = module.params.get('tags')
    target_translations = {
        'ip_address': 'IpAddress',
        'security_groups': 'SecurityGroups',
        'subnet_id': 'SubnetId'
    }
    targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
    performance_mode_translations = {
        'general_purpose': 'generalPurpose',
        'max_io': 'maxIO'
    }
    encrypt = module.params.get('encrypt')
    kms_key_id = module.params.get('kms_key_id')
    performance_mode = performance_mode_translations[module.params.get('performance_mode')]
    purge_tags = module.params.get('purge_tags')
    throughput_mode = module.params.get('throughput_mode')
    provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps')
    state = str(module.params.get('state')).lower()
    changed = False

    if state == 'present':
        if not name:
            module.fail_json(msg='Name parameter is required for create')

        changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps)
        if connection.supports_provisioned_mode():
            changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed
        changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets,
                                                  throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed
        result = first_or_default(connection.get_file_systems(CreationToken=name))

    elif state == 'absent':
        if not name and not fs_id:
            module.fail_json(msg='Either name or id parameter is required for delete')

        changed = connection.delete_file_system(name, fs_id)
        result = None
    if result:
        result = camel_dict_to_snake_dict(result)
    module.exit_json(changed=changed, efs=result)
Exemplo n.º 21
0
def main():
    """
    Main entry point.

    :return dict: ansible facts
    """
    argument_spec = dict(function_name=dict(required=False,
                                            default=None,
                                            aliases=['function', 'name']),
                         query=dict(required=False,
                                    choices=[
                                        'aliases', 'all', 'config', 'mappings',
                                        'policy', 'versions'
                                    ],
                                    default='all'),
                         event_source_arn=dict(required=False, default=None))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              mutually_exclusive=[],
                              required_together=[])

    # validate function_name if present
    function_name = module.params['function_name']
    if function_name:
        if not re.search(r"^[\w\-:]+$", function_name):
            module.fail_json(
                msg=
                'Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'
                .format(function_name))
        if len(function_name) > 64:
            module.fail_json(
                msg='Function name "{0}" exceeds 64 character limit'.format(
                    function_name))

    client = module.client('lambda')

    this_module = sys.modules[__name__]

    invocations = dict(
        aliases='alias_details',
        all='all_details',
        config='config_details',
        mappings='mapping_details',
        policy='policy_details',
        versions='version_details',
    )

    this_module_function = getattr(this_module,
                                   invocations[module.params['query']])
    all_facts = fix_return(this_module_function(client, module))

    results = dict(ansible_facts={'lambda_facts': {
        'function': all_facts
    }},
                   changed=False)

    if module.check_mode:
        results['msg'] = 'Check mode set but ignored for fact gathering only.'

    module.exit_json(**results)
Exemplo n.º 22
0
def main():

    argument_spec = dict(
        force=dict(default=False, type='bool'),
        policy=dict(type='json'),
        name=dict(required=True),
        requester_pays=dict(default=False, type='bool'),
        s3_url=dict(aliases=['S3_URL']),
        state=dict(default='present', choices=['present', 'absent']),
        tags=dict(type='dict'),
        purge_tags=dict(type='bool', default=True),
        versioning=dict(type='bool'),
        ceph=dict(default=False, type='bool'),
        encryption=dict(choices=['none', 'AES256', 'aws:kms']),
        encryption_key_id=dict()
    )

    required_by = dict(
        encryption_key_id=('encryption',),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec, required_by=required_by
    )

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)

    if region in ('us-east-1', '', None):
        # default to US Standard region
        location = 'us-east-1'
    else:
        # Boto uses symbolic names for locations but region strings will
        # actually work fine for everything except us-east-1 (US Standard)
        location = region

    s3_url = module.params.get('s3_url')
    ceph = module.params.get('ceph')

    # allow eucarc environment variables to be used if ansible vars aren't set
    if not s3_url and 'S3_URL' in os.environ:
        s3_url = os.environ['S3_URL']

    if ceph and not s3_url:
        module.fail_json(msg='ceph flavour requires s3_url')

    # Look at s3_url and tweak connection settings
    # if connecting to Ceph RGW, Walrus or fakes3
    if s3_url:
        for key in ['validate_certs', 'security_token', 'profile_name']:
            aws_connect_kwargs.pop(key, None)
    s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)

    if s3_client is None:  # this should never happen
        module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')

    state = module.params.get("state")
    encryption = module.params.get("encryption")
    encryption_key_id = module.params.get("encryption_key_id")

    if not hasattr(s3_client, "get_bucket_encryption"):
        if encryption is not None:
            module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")

    # Parameter validation
    if encryption_key_id is not None and encryption != 'aws:kms':
        module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")

    if state == 'present':
        create_or_update_bucket(s3_client, module, location)
    elif state == 'absent':
        destroy_bucket(s3_client, module)
Exemplo n.º 23
0
def main():

    argument_spec = dict(
        name=dict(type='str', required=True),
        path=dict(type='str', default="/"),
        assume_role_policy_document=dict(type='json'),
        managed_policies=dict(type='list', aliases=['managed_policy']),
        max_session_duration=dict(type='int'),
        state=dict(type='str',
                   choices=['present', 'absent'],
                   default='present'),
        description=dict(type='str'),
        boundary=dict(type='str', aliases=['boundary_policy_arn']),
        create_instance_profile=dict(type='bool', default=True),
        delete_instance_profile=dict(type='bool', default=False),
        purge_policies=dict(type='bool',
                            aliases=['purge_policy',
                                     'purge_managed_policies']),
        tags=dict(type='dict'),
        purge_tags=dict(type='bool', default=True),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=[('state', 'present',
                                            ['assume_role_policy_document'])],
                              supports_check_mode=True)

    if module.params.get('purge_policies') is None:
        module.deprecate(
            'In Ansible 2.14 the default value of purge_policies will change from true to false.'
            '  To maintain the existing behaviour explicity set purge_policies=true',
            version='2.14')

    if module.params.get('boundary'):
        if module.params.get('create_instance_profile'):
            module.fail_json(
                msg=
                "When using a boundary policy, `create_instance_profile` must be set to `false`."
            )
        if not module.params.get('boundary').startswith('arn:aws:iam'):
            module.fail_json(msg="Boundary policy must be an ARN")
    if module.params.get(
            'tags') is not None and not module.botocore_at_least('1.12.46'):
        module.fail_json(
            msg="When managing tags botocore must be at least v1.12.46. "
            "Current versions: boto3-{boto3_version} botocore-{botocore_version}"
            .format(**module._gather_versions()))
    if module.params.get(
            'boundary'
    ) is not None and not module.botocore_at_least('1.10.57'):
        module.fail_json(
            msg=
            "When using a boundary policy, botocore must be at least v1.10.57. "
            "Current versions: boto3-{boto3_version} botocore-{botocore_version}"
            .format(**module._gather_versions()))
    if module.params.get('max_session_duration'):
        max_session_duration = module.params.get('max_session_duration')
        if max_session_duration < 3600 or max_session_duration > 43200:
            module.fail_json(
                msg=
                "max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)"
            )
    if module.params.get('path'):
        path = module.params.get('path')
        if not path.endswith('/') or not path.startswith('/'):
            module.fail_json(msg="path must begin and end with /")

    connection = module.client('iam',
                               retry_decorator=AWSRetry.jittered_backoff())

    state = module.params.get("state")

    if state == 'present':
        create_or_update_role(connection, module)
    else:
        destroy_role(connection, module)
Exemplo n.º 24
0
def main():
    argument_spec = dict(
        name=dict(required=True),
        cidr_block=dict(type='list', required=True),
        ipv6_cidr=dict(type='bool', default=False),
        tenancy=dict(choices=['default', 'dedicated'], default='default'),
        dns_support=dict(type='bool', default=True),
        dns_hostnames=dict(type='bool', default=True),
        dhcp_opts_id=dict(),
        tags=dict(type='dict', aliases=['resource_tags']),
        state=dict(choices=['present', 'absent'], default='present'),
        multi_ok=dict(type='bool', default=False),
        purge_cidrs=dict(type='bool', default=False),
    )

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True)

    name = module.params.get('name')
    cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
    ipv6_cidr = module.params.get('ipv6_cidr')
    purge_cidrs = module.params.get('purge_cidrs')
    tenancy = module.params.get('tenancy')
    dns_support = module.params.get('dns_support')
    dns_hostnames = module.params.get('dns_hostnames')
    dhcp_id = module.params.get('dhcp_opts_id')
    tags = module.params.get('tags')
    state = module.params.get('state')
    multi = module.params.get('multi_ok')

    changed = False

    connection = module.client(
        'ec2',
        retry_decorator=AWSRetry.jittered_backoff(
            retries=8,
            delay=3,
            catch_extra_error_codes=['InvalidVpcID.NotFound']))

    if dns_hostnames and not dns_support:
        module.fail_json(
            msg=
            'In order to enable DNS Hostnames you must also enable DNS support'
        )

    if state == 'present':

        # Check if VPC exists
        vpc_id = vpc_exists(module, connection, name, cidr_block, multi)

        if vpc_id is None:
            vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
            changed = True

        vpc_obj = get_vpc(module, connection, vpc_id)

        associated_cidrs = dict(
            (cidr['CidrBlock'], cidr['AssociationId'])
            for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
            if cidr['CidrBlockState']['State'] != 'disassociated')
        to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
        to_remove = [
            associated_cidrs[cidr] for cidr in associated_cidrs
            if cidr not in cidr_block
        ]
        expected_cidrs = [
            cidr for cidr in associated_cidrs
            if associated_cidrs[cidr] not in to_remove
        ] + to_add

        if len(cidr_block) > 1:
            for cidr in to_add:
                changed = True
                try:
                    connection.associate_vpc_cidr_block(CidrBlock=cidr,
                                                        VpcId=vpc_id)
                except (botocore.exceptions.ClientError,
                        botocore.exceptions.BotoCoreError) as e:
                    module.fail_json_aws(
                        e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
        if ipv6_cidr:
            if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
                module.warn(
                    "Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}"
                    .format(
                        vpc_id, vpc_obj['Ipv6CidrBlockAssociationSet'][0]
                        ['Ipv6CidrBlock']))
            else:
                try:
                    connection.associate_vpc_cidr_block(
                        AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id)
                    changed = True
                except (botocore.exceptions.ClientError,
                        botocore.exceptions.BotoCoreError) as e:
                    module.fail_json_aws(
                        e, "Unable to associate CIDR {0}.".format(ipv6_cidr))

        if purge_cidrs:
            for association_id in to_remove:
                changed = True
                try:
                    connection.disassociate_vpc_cidr_block(
                        AssociationId=association_id)
                except (botocore.exceptions.ClientError,
                        botocore.exceptions.BotoCoreError) as e:
                    module.fail_json_aws(
                        e,
                        "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
                        "are associated with the CIDR block before you can disassociate it."
                        .format(association_id))

        if dhcp_id is not None:
            try:
                if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
                    changed = True
            except (botocore.exceptions.ClientError,
                    botocore.exceptions.BotoCoreError) as e:
                module.fail_json_aws(e, "Failed to update DHCP options")

        if tags is not None or name is not None:
            try:
                if update_vpc_tags(connection, module, vpc_id, tags, name):
                    changed = True
            except (botocore.exceptions.ClientError,
                    botocore.exceptions.BotoCoreError) as e:
                module.fail_json_aws(e, msg="Failed to update tags")

        current_dns_enabled = connection.describe_vpc_attribute(
            Attribute='enableDnsSupport', VpcId=vpc_id,
            aws_retry=True)['EnableDnsSupport']['Value']
        current_dns_hostnames = connection.describe_vpc_attribute(
            Attribute='enableDnsHostnames', VpcId=vpc_id,
            aws_retry=True)['EnableDnsHostnames']['Value']
        if current_dns_enabled != dns_support:
            changed = True
            if not module.check_mode:
                try:
                    connection.modify_vpc_attribute(
                        VpcId=vpc_id, EnableDnsSupport={'Value': dns_support})
                except (botocore.exceptions.ClientError,
                        botocore.exceptions.BotoCoreError) as e:
                    module.fail_json_aws(
                        e, "Failed to update enabled dns support attribute")
        if current_dns_hostnames != dns_hostnames:
            changed = True
            if not module.check_mode:
                try:
                    connection.modify_vpc_attribute(
                        VpcId=vpc_id,
                        EnableDnsHostnames={'Value': dns_hostnames})
                except (botocore.exceptions.ClientError,
                        botocore.exceptions.BotoCoreError) as e:
                    module.fail_json_aws(
                        e, "Failed to update enabled dns hostnames attribute")

        # wait for associated cidrs to match
        if to_add or to_remove:
            try:
                connection.get_waiter('vpc_available').wait(
                    VpcIds=[vpc_id],
                    Filters=[{
                        'Name': 'cidr-block-association.cidr-block',
                        'Values': expected_cidrs
                    }])
            except (botocore.exceptions.ClientError,
                    botocore.exceptions.BotoCoreError) as e:
                module.fail_json_aws(e, "Failed to wait for CIDRs to update")

        # try to wait for enableDnsSupport and enableDnsHostnames to match
        wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport',
                               dns_support)
        wait_for_vpc_attribute(connection, module, vpc_id,
                               'enableDnsHostnames', dns_hostnames)

        final_state = camel_dict_to_snake_dict(
            get_vpc(module, connection, vpc_id))
        final_state['tags'] = boto3_tag_list_to_ansible_dict(
            final_state.get('tags', []))
        final_state['id'] = final_state.pop('vpc_id')

        module.exit_json(changed=changed, vpc=final_state)

    elif state == 'absent':

        # Check if VPC exists
        vpc_id = vpc_exists(module, connection, name, cidr_block, multi)

        if vpc_id is not None:
            try:
                if not module.check_mode:
                    connection.delete_vpc(VpcId=vpc_id)
                changed = True
            except (botocore.exceptions.ClientError,
                    botocore.exceptions.BotoCoreError) as e:
                module.fail_json_aws(
                    e,
                    msg=
                    "Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
                    "and/or ec2_vpc_route_table modules to ensure the other components are absent."
                    .format(vpc_id))

        module.exit_json(changed=changed, vpc={})
Exemplo n.º 25
0
def main():
    argument_spec = dict(
        name=dict(required=True),
        state=dict(default='present', choices=['present', 'absent']),
        runtime=dict(),
        role=dict(),
        handler=dict(),
        zip_file=dict(aliases=['src']),
        s3_bucket=dict(),
        s3_key=dict(),
        s3_object_version=dict(),
        description=dict(default=''),
        timeout=dict(type='int', default=3),
        memory_size=dict(type='int', default=128),
        vpc_subnet_ids=dict(type='list'),
        vpc_security_group_ids=dict(type='list'),
        environment_variables=dict(type='dict'),
        dead_letter_arn=dict(),
        tracing_mode=dict(choices=['Active', 'PassThrough']),
        tags=dict(type='dict'),
    )

    mutually_exclusive = [['zip_file', 's3_key'],
                          ['zip_file', 's3_bucket'],
                          ['zip_file', 's3_object_version']]

    required_together = [['s3_key', 's3_bucket'],
                         ['vpc_subnet_ids', 'vpc_security_group_ids']]

    required_if = [['state', 'present', ['runtime', 'handler', 'role']]]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              mutually_exclusive=mutually_exclusive,
                              required_together=required_together,
                              required_if=required_if)

    name = module.params.get('name')
    state = module.params.get('state').lower()
    runtime = module.params.get('runtime')
    role = module.params.get('role')
    handler = module.params.get('handler')
    s3_bucket = module.params.get('s3_bucket')
    s3_key = module.params.get('s3_key')
    s3_object_version = module.params.get('s3_object_version')
    zip_file = module.params.get('zip_file')
    description = module.params.get('description')
    timeout = module.params.get('timeout')
    memory_size = module.params.get('memory_size')
    vpc_subnet_ids = module.params.get('vpc_subnet_ids')
    vpc_security_group_ids = module.params.get('vpc_security_group_ids')
    environment_variables = module.params.get('environment_variables')
    dead_letter_arn = module.params.get('dead_letter_arn')
    tracing_mode = module.params.get('tracing_mode')
    tags = module.params.get('tags')

    check_mode = module.check_mode
    changed = False

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
    if not region:
        module.fail_json(msg='region must be specified')

    try:
        client = boto3_conn(module, conn_type='client', resource='lambda',
                            region=region, endpoint=ec2_url, **aws_connect_kwargs)
    except (ClientError, ValidationError) as e:
        module.fail_json_aws(e, msg="Trying to connect to AWS")

    if state == 'present':
        if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role):
            role_arn = role
        else:
            # get account ID and assemble ARN
            account_id, partition = get_account_info(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
            role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role)

    # Get function configuration if present, False otherwise
    current_function = get_current_function(client, name)

    # Update existing Lambda function
    if state == 'present' and current_function:

        # Get current state
        current_config = current_function['Configuration']
        current_version = None

        # Update function configuration
        func_kwargs = {'FunctionName': name}

        # Update configuration if needed
        if role_arn and current_config['Role'] != role_arn:
            func_kwargs.update({'Role': role_arn})
        if handler and current_config['Handler'] != handler:
            func_kwargs.update({'Handler': handler})
        if description and current_config['Description'] != description:
            func_kwargs.update({'Description': description})
        if timeout and current_config['Timeout'] != timeout:
            func_kwargs.update({'Timeout': timeout})
        if memory_size and current_config['MemorySize'] != memory_size:
            func_kwargs.update({'MemorySize': memory_size})
        if runtime and current_config['Runtime'] != runtime:
            func_kwargs.update({'Runtime': runtime})
        if (environment_variables is not None) and (current_config.get(
                'Environment', {}).get('Variables', {}) != environment_variables):
            func_kwargs.update({'Environment': {'Variables': environment_variables}})
        if dead_letter_arn is not None:
            if current_config.get('DeadLetterConfig'):
                if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn:
                    func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
            else:
                if dead_letter_arn != "":
                    func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})
        if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode):
            func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})

        # If VPC configuration is desired
        if vpc_subnet_ids or vpc_security_group_ids:
            if not vpc_subnet_ids or not vpc_security_group_ids:
                module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')

            if 'VpcConfig' in current_config:
                # Compare VPC config with current config
                current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
                current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']

                subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
                vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)

            if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
                new_vpc_config = {'SubnetIds': vpc_subnet_ids,
                                  'SecurityGroupIds': vpc_security_group_ids}
                func_kwargs.update({'VpcConfig': new_vpc_config})
        else:
            # No VPC configuration is desired, assure VPC config is empty when present in current config
            if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
                func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})

        # Upload new configuration if configuration has changed
        if len(func_kwargs) > 1:
            try:
                if not check_mode:
                    response = client.update_function_configuration(**func_kwargs)
                    current_version = response['Version']
                changed = True
            except (ParamValidationError, ClientError) as e:
                module.fail_json_aws(e, msg="Trying to update lambda configuration")

        # Update code configuration
        code_kwargs = {'FunctionName': name, 'Publish': True}

        # Update S3 location
        if s3_bucket and s3_key:
            # If function is stored on S3 always update
            code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})

            # If S3 Object Version is given
            if s3_object_version:
                code_kwargs.update({'S3ObjectVersion': s3_object_version})

        # Compare local checksum, update remote code when different
        elif zip_file:
            local_checksum = sha256sum(zip_file)
            remote_checksum = current_config['CodeSha256']

            # Only upload new code when local code is different compared to the remote code
            if local_checksum != remote_checksum:
                try:
                    with open(zip_file, 'rb') as f:
                        encoded_zip = f.read()
                    code_kwargs.update({'ZipFile': encoded_zip})
                except IOError as e:
                    module.fail_json(msg=str(e), exception=traceback.format_exc())

        # Tag Function
        if tags is not None:
            if set_tag(client, module, tags, current_function):
                changed = True

        # Upload new code if needed (e.g. code checksum has changed)
        if len(code_kwargs) > 2:
            try:
                if not check_mode:
                    response = client.update_function_code(**code_kwargs)
                    current_version = response['Version']
                changed = True
            except (ParamValidationError, ClientError) as e:
                module.fail_json_aws(e, msg="Trying to upload new code")

        # Describe function code and configuration
        response = get_current_function(client, name, qualifier=current_version)
        if not response:
            module.fail_json(msg='Unable to get function information after updating')

        # We're done
        module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))

    # Function doesn't exists, create new Lambda function
    elif state == 'present':
        if s3_bucket and s3_key:
            # If function is stored on S3
            code = {'S3Bucket': s3_bucket,
                    'S3Key': s3_key}
            if s3_object_version:
                code.update({'S3ObjectVersion': s3_object_version})
        elif zip_file:
            # If function is stored in local zipfile
            try:
                with open(zip_file, 'rb') as f:
                    zip_content = f.read()

                code = {'ZipFile': zip_content}
            except IOError as e:
                module.fail_json(msg=str(e), exception=traceback.format_exc())

        else:
            module.fail_json(msg='Either S3 object or path to zipfile required')

        func_kwargs = {'FunctionName': name,
                       'Publish': True,
                       'Runtime': runtime,
                       'Role': role_arn,
                       'Code': code,
                       'Timeout': timeout,
                       'MemorySize': memory_size,
                       }

        if description is not None:
            func_kwargs.update({'Description': description})

        if handler is not None:
            func_kwargs.update({'Handler': handler})

        if environment_variables:
            func_kwargs.update({'Environment': {'Variables': environment_variables}})

        if dead_letter_arn:
            func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}})

        if tracing_mode:
            func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})

        # If VPC configuration is given
        if vpc_subnet_ids or vpc_security_group_ids:
            if not vpc_subnet_ids or not vpc_security_group_ids:
                module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')

            func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
                                              'SecurityGroupIds': vpc_security_group_ids}})

        # Finally try to create function
        current_version = None
        try:
            if not check_mode:
                response = client.create_function(**func_kwargs)
                current_version = response['Version']
            changed = True
        except (ParamValidationError, ClientError) as e:
            module.fail_json_aws(e, msg="Trying to create function")

        # Tag Function
        if tags is not None:
            if set_tag(client, module, tags, get_current_function(client, name)):
                changed = True

        response = get_current_function(client, name, qualifier=current_version)
        if not response:
            module.fail_json(msg='Unable to get function information after creating')
        module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))

    # Delete existing Lambda function
    if state == 'absent' and current_function:
        try:
            if not check_mode:
                client.delete_function(FunctionName=name)
            changed = True
        except (ParamValidationError, ClientError) as e:
            module.fail_json_aws(e, msg="Trying to delete Lambda function")

        module.exit_json(changed=changed)

    # Function already absent, do nothing
    elif state == 'absent':
        module.exit_json(changed=changed)