def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent']),
        group_name=dict(required=True, aliases=['name']),
        group_description=dict(required=False, aliases=['description']),
        group_subnets=dict(required=False,
                           aliases=['subnets'],
                           type='list',
                           elements='str'),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False)

    if not HAS_BOTO:
        module.fail_json(msg='boto v2.9.0+ required for this module')

    state = module.params.get('state')
    group_name = module.params.get('group_name')
    group_description = module.params.get('group_description')
    group_subnets = module.params.get('group_subnets')

    if state == 'present':
        for required in ('group_name', 'group_description', 'group_subnets'):
            if not module.params.get(required):
                module.fail_json(
                    msg=str("parameter %s required for state='present'" %
                            required))
    else:
        for not_allowed in ('group_description', 'group_subnets'):
            if module.params.get(not_allowed):
                module.fail_json(
                    msg=str("parameter %s not allowed for state='absent'" %
                            not_allowed))

    region, ec2_url, aws_connect_params = get_aws_connection_info(module)
    if not region:
        module.fail_json(msg=str(
            "Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file"
        ))

    # Connect to the Redshift endpoint.
    try:
        conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
    except boto.exception.JSONResponseError as e:
        module.fail_json(msg=str(e))

    try:
        changed = False
        exists = False
        group = None

        try:
            matching_groups = conn.describe_cluster_subnet_groups(
                group_name, max_records=100)
            exists = len(matching_groups) > 0
        except boto.exception.JSONResponseError as e:
            if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
                # if e.code != 'ClusterSubnetGroupNotFoundFault':
                module.fail_json(msg=str(e))

        if state == 'absent':
            if exists:
                conn.delete_cluster_subnet_group(group_name)
                changed = True

        else:
            if not exists:
                new_group = conn.create_cluster_subnet_group(
                    group_name, group_description, group_subnets)
                group = {
                    'name':
                    new_group['CreateClusterSubnetGroupResponse']
                    ['CreateClusterSubnetGroupResult']['ClusterSubnetGroup']
                    ['ClusterSubnetGroupName'],
                    'vpc_id':
                    new_group['CreateClusterSubnetGroupResponse']
                    ['CreateClusterSubnetGroupResult']['ClusterSubnetGroup']
                    ['VpcId'],
                }
            else:
                changed_group = conn.modify_cluster_subnet_group(
                    group_name, group_subnets, description=group_description)
                group = {
                    'name':
                    changed_group['ModifyClusterSubnetGroupResponse']
                    ['ModifyClusterSubnetGroupResult']['ClusterSubnetGroup']
                    ['ClusterSubnetGroupName'],
                    'vpc_id':
                    changed_group['ModifyClusterSubnetGroupResponse']
                    ['ModifyClusterSubnetGroupResult']['ClusterSubnetGroup']
                    ['VpcId'],
                }

            changed = True

    except boto.exception.JSONResponseError as e:
        module.fail_json(msg=str(e))

    module.exit_json(changed=changed, group=group)
Пример #2
0
def main():
    argument_spec = dict(
        name=dict(),
        function_arn=dict(),
        wait=dict(default=True, type='bool'),
        tail_log=dict(default=False, type='bool'),
        dry_run=dict(default=False, type='bool'),
        version_qualifier=dict(),
        payload=dict(default={}, type='dict'),
    )
    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              mutually_exclusive=[
                                  ['name', 'function_arn'],
                              ])

    name = module.params.get('name')
    function_arn = module.params.get('function_arn')
    await_return = module.params.get('wait')
    dry_run = module.params.get('dry_run')
    tail_log = module.params.get('tail_log')
    version_qualifier = module.params.get('version_qualifier')
    payload = module.params.get('payload')

    if not (name or function_arn):
        module.fail_json(
            msg="Must provide either a function_arn or a name to invoke.")

    try:
        client = module.client('lambda')
    except (botocore.exceptions.ClientError,
            botocore.exceptions.BotoCoreError) as e:
        module.fail_json_aws(e, msg='Failed to connect to AWS')

    invoke_params = {}

    if await_return:
        # await response
        invoke_params['InvocationType'] = 'RequestResponse'
    else:
        # fire and forget
        invoke_params['InvocationType'] = 'Event'
    if dry_run or module.check_mode:
        # dry_run overrides invocation type
        invoke_params['InvocationType'] = 'DryRun'

    if tail_log and await_return:
        invoke_params['LogType'] = 'Tail'
    elif tail_log and not await_return:
        module.fail_json(msg="The `tail_log` parameter is only available if "
                         "the invocation waits for the function to complete. "
                         "Set `wait` to true or turn off `tail_log`.")
    else:
        invoke_params['LogType'] = 'None'

    if version_qualifier:
        invoke_params['Qualifier'] = version_qualifier

    if payload:
        invoke_params['Payload'] = json.dumps(payload)

    if function_arn:
        invoke_params['FunctionName'] = function_arn
    elif name:
        invoke_params['FunctionName'] = name

    try:
        response = client.invoke(**invoke_params)
    except is_boto3_error_code('ResourceNotFoundException') as nfe:
        module.fail_json_aws(nfe,
                             msg="Could not find Lambda to execute. Make sure "
                             "the ARN is correct and your profile has "
                             "permissions to execute this function.")
    except botocore.exceptions.ClientError as ce:  # pylint: disable=duplicate-except
        module.fail_json_aws(
            ce,
            msg=
            "Client-side error when invoking Lambda, check inputs and specific error"
        )
    except botocore.exceptions.ParamValidationError as ve:  # pylint: disable=duplicate-except
        module.fail_json_aws(ve,
                             msg="Parameters to `invoke` failed to validate")
    except Exception as e:
        module.fail_json_aws(
            e, msg="Unexpected failure while invoking Lambda function")

    results = {
        'logs': '',
        'status': response['StatusCode'],
        'output': '',
    }

    if response.get('LogResult'):
        try:
            # logs are base64 encoded in the API response
            results['logs'] = base64.b64decode(response.get('LogResult', ''))
        except Exception as e:
            module.fail_json_aws(e, msg="Failed while decoding logs")

    if invoke_params['InvocationType'] == 'RequestResponse':
        try:
            results['output'] = json.loads(
                response['Payload'].read().decode('utf8'))
        except Exception as e:
            module.fail_json_aws(
                e, msg="Failed while decoding function return value")

        if isinstance(results.get('output'), dict) and any([
                results['output'].get('stackTrace'),
                results['output'].get('errorMessage')
        ]):
            # AWS sends back stack traces and error messages when a function failed
            # in a RequestResponse (synchronous) context.
            template = (
                "Function executed, but there was an error in the Lambda function. "
                "Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
            error_data = {
                # format the stacktrace sent back as an array into a multiline string
                'trace':
                '\n'.join([
                    ' '.join([
                        str(x) for x in line  # cast line numbers to strings
                    ]) for line in results.get('output', {}).get(
                        'stackTrace', [])
                ]),
                'errmsg':
                results['output'].get('errorMessage'),
                'type':
                results['output'].get('errorType')
            }
            module.fail_json(msg=template.format(**error_data), result=results)

    module.exit_json(changed=True, result=results)
def main():
    """
    Get list of S3 buckets
    :return:
    """
    argument_spec = dict(
        name=dict(type='str', default=""),
        name_filter=dict(type='str', default=""),
        bucket_facts=dict(type='dict', options=dict(
            bucket_accelerate_configuration=dict(type='bool', default=False),
            bucket_acl=dict(type='bool', default=False),
            bucket_cors=dict(type='bool', default=False),
            bucket_encryption=dict(type='bool', default=False),
            bucket_lifecycle_configuration=dict(type='bool', default=False),
            bucket_location=dict(type='bool', default=False),
            bucket_logging=dict(type='bool', default=False),
            bucket_notification_configuration=dict(type='bool', default=False),
            bucket_ownership_controls=dict(type='bool', default=False),
            bucket_policy=dict(type='bool', default=False),
            bucket_policy_status=dict(type='bool', default=False),
            bucket_replication=dict(type='bool', default=False),
            bucket_request_payment=dict(type='bool', default=False),
            bucket_tagging=dict(type='bool', default=False),
            bucket_website=dict(type='bool', default=False),
            public_access_block=dict(type='bool', default=False),
        )),
        transform_location=dict(type='bool', default=False)
    )

    # Ensure we have an empty dict
    result = {}

    # Define mutually exclusive options
    mutually_exclusive = [
        ['name', 'name_filter']
    ]

    # Including ec2 argument spec
    module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=mutually_exclusive)
    is_old_facts = module._name == 'aws_s3_bucket_facts'
    if is_old_facts:
        module.deprecate("The 'aws_s3_bucket_facts' module has been renamed to 'aws_s3_bucket_info', "
                         "and the renamed one no longer returns ansible_facts", date='2021-12-01', collection_name='community.aws')

    # Get parameters
    name = module.params.get("name")
    name_filter = module.params.get("name_filter")
    requested_facts = module.params.get("bucket_facts")
    transform_location = module.params.get("bucket_facts")

    # Set up connection
    connection = {}
    try:
        connection = module.client('s3')
    except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code:
        module.fail_json_aws(err_code, msg='Failed to connect to AWS')

    # Get basic bucket list (name + creation date)
    bucket_list = get_bucket_list(module, connection, name, name_filter)

    # Add information about name/name_filter to result
    if name:
        result['bucket_name'] = name
    elif name_filter:
        result['bucket_name_filter'] = name_filter

    # Gather detailed information about buckets if requested
    bucket_facts = module.params.get("bucket_facts")
    if bucket_facts:
        result['buckets'] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location)
    else:
        result['buckets'] = bucket_list

    # Send exit
    if is_old_facts:
        module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result)
    else:
        module.exit_json(msg="Retrieved s3 info.", **result)
Пример #4
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent', 'deleting']),
        name=dict(required=True, type='str'),
        cluster=dict(required=False, type='str'),
        task_definition=dict(required=False, type='str'),
        load_balancers=dict(required=False,
                            default=[],
                            type='list',
                            elements='dict'),
        desired_count=dict(required=False, type='int'),
        client_token=dict(required=False, default='', type='str',
                          no_log=False),
        role=dict(required=False, default='', type='str'),
        delay=dict(required=False, type='int', default=10),
        repeat=dict(required=False, type='int', default=10),
        force_new_deployment=dict(required=False, default=False, type='bool'),
        force_deletion=dict(required=False, default=False, type='bool'),
        deployment_configuration=dict(required=False, default={}, type='dict'),
        placement_constraints=dict(required=False,
                                   default=[],
                                   type='list',
                                   elements='dict',
                                   options=dict(type=dict(type='str'),
                                                expression=dict(type='str'))),
        placement_strategy=dict(required=False,
                                default=[],
                                type='list',
                                elements='dict',
                                options=dict(
                                    type=dict(type='str'),
                                    field=dict(type='str'),
                                )),
        health_check_grace_period_seconds=dict(required=False, type='int'),
        network_configuration=dict(required=False,
                                   type='dict',
                                   options=dict(
                                       subnets=dict(type='list',
                                                    elements='str'),
                                       security_groups=dict(type='list',
                                                            elements='str'),
                                       assign_public_ip=dict(type='bool'))),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        platform_version=dict(required=False, type='str'),
        service_registries=dict(required=False,
                                type='list',
                                default=[],
                                elements='dict'),
        scheduling_strategy=dict(required=False, choices=['DAEMON',
                                                          'REPLICA']))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('state', 'present',
                                            ['task_definition']),
                                           ('launch_type', 'FARGATE',
                                            ['network_configuration'])],
                              required_together=[['load_balancers', 'role']])

    if module.params['state'] == 'present' and module.params[
            'scheduling_strategy'] == 'REPLICA':
        if module.params['desired_count'] is None:
            module.fail_json(
                msg=
                'state is present, scheduling_strategy is REPLICA; missing desired_count'
            )

    service_mgr = EcsServiceManager(module)
    if module.params['network_configuration']:
        network_configuration = service_mgr.format_network_configuration(
            module.params['network_configuration'])
    else:
        network_configuration = None

    deployment_configuration = map_complex_type(
        module.params['deployment_configuration'],
        DEPLOYMENT_CONFIGURATION_TYPE_MAP)

    deploymentConfiguration = snake_dict_to_camel_dict(
        deployment_configuration)
    serviceRegistries = list(
        map(snake_dict_to_camel_dict, module.params['service_registries']))

    try:
        existing = service_mgr.describe_service(module.params['cluster'],
                                                module.params['name'])
    except Exception as e:
        module.fail_json(msg="Exception describing service '" +
                         module.params['name'] + "' in cluster '" +
                         module.params['cluster'] + "': " + str(e))

    results = dict(changed=False)

    if module.params['state'] == 'present':

        matching = False
        update = False

        if existing and 'status' in existing and existing['status'] == "ACTIVE":
            if module.params['force_new_deployment']:
                update = True
            elif service_mgr.is_matching_service(module.params, existing):
                matching = True
                results['service'] = existing
            else:
                update = True

        if not matching:
            if not module.check_mode:

                role = module.params['role']
                clientToken = module.params['client_token']

                loadBalancers = []
                for loadBalancer in module.params['load_balancers']:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])
                    loadBalancers.append(loadBalancer)

                for loadBalancer in loadBalancers:
                    if 'containerPort' in loadBalancer:
                        loadBalancer['containerPort'] = int(
                            loadBalancer['containerPort'])

                if update:
                    # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature

                    if module.params['scheduling_strategy']:
                        if (existing['schedulingStrategy']
                            ) != module.params['scheduling_strategy']:
                            module.fail_json(
                                msg=
                                "It is not possible to update the scheduling strategy of an existing service"
                            )

                    if module.params['service_registries']:
                        if (existing['serviceRegistries']
                                or []) != serviceRegistries:
                            module.fail_json(
                                msg=
                                "It is not possible to update the service registries of an existing service"
                            )

                    if (existing['loadBalancers'] or []) != loadBalancers:
                        module.fail_json(
                            msg=
                            "It is not possible to update the load balancers of an existing service"
                        )

                    # update required
                    response = service_mgr.update_service(
                        module.params['name'], module.params['cluster'],
                        module.params['task_definition'],
                        module.params['desired_count'],
                        deploymentConfiguration, network_configuration,
                        module.params['health_check_grace_period_seconds'],
                        module.params['force_new_deployment'])

                else:
                    try:
                        response = service_mgr.create_service(
                            module.params['name'], module.params['cluster'],
                            module.params['task_definition'], loadBalancers,
                            module.params['desired_count'], clientToken, role,
                            deploymentConfiguration,
                            module.params['placement_constraints'],
                            module.params['placement_strategy'],
                            module.params['health_check_grace_period_seconds'],
                            network_configuration, serviceRegistries,
                            module.params['launch_type'],
                            module.params['platform_version'],
                            module.params['scheduling_strategy'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't create service")

                results['service'] = response

            results['changed'] = True

    elif module.params['state'] == 'absent':
        if not existing:
            pass
        else:
            # it exists, so we should delete it and mark changed.
            # return info about the cluster deleted
            del existing['deployments']
            del existing['events']
            results['ansible_facts'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    try:
                        service_mgr.delete_service(
                            module.params['name'],
                            module.params['cluster'],
                            module.params['force_deletion'],
                        )
                    except botocore.exceptions.ClientError as e:
                        module.fail_json_aws(e, msg="Couldn't delete service")
                results['changed'] = True

    elif module.params['state'] == 'deleting':
        if not existing:
            module.fail_json(msg="Service '" + module.params['name'] +
                             " not found.")
            return
        # it exists, so we should delete it and mark changed.
        # return info about the cluster deleted
        delay = module.params['delay']
        repeat = module.params['repeat']
        time.sleep(delay)
        for i in range(repeat):
            existing = service_mgr.describe_service(module.params['cluster'],
                                                    module.params['name'])
            status = existing['status']
            if status == "INACTIVE":
                results['changed'] = True
                break
            time.sleep(delay)
        if i is repeat - 1:
            module.fail_json(msg="Service still not deleted after " +
                             str(repeat) + " tries of " + str(delay) +
                             " seconds each.")
            return

    module.exit_json(**results)
Пример #5
0
def main():
    argument_spec = dict(
        state={
            'required': True,
            'choices': ['present', 'absent']
        },
        instance_id={'required': True},
        ec2_elbs={
            'default': None,
            'required': False,
            'type': 'list',
            'elements': 'str'
        },
        enable_availability_zone={
            'default': True,
            'required': False,
            'type': 'bool'
        },
        wait={
            'required': False,
            'default': True,
            'type': 'bool'
        },
        wait_timeout={
            'required': False,
            'default': 0,
            'type': 'int'
        },
    )
    required_if = [
        ('state', 'present', ['ec2_elbs']),
    ]

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=required_if,
        supports_check_mode=True,
    )

    ec2_elbs = module.params['ec2_elbs']
    wait = module.params['wait']
    enable_availability_zone = module.params['enable_availability_zone']
    timeout = module.params['wait_timeout']
    instance_id = module.params['instance_id']

    elb_man = ElbManager(module, instance_id, ec2_elbs)

    if ec2_elbs is not None:
        for elb in ec2_elbs:
            if not elb_man.exists(elb):
                module.fail_json(msg="ELB {0} does not exist".format(elb))

    if module.params['state'] == 'present':
        elb_man.register(wait, enable_availability_zone, timeout)
    elif module.params['state'] == 'absent':
        elb_man.deregister(wait, timeout)

    # XXX We're not an _fact module we shouldn't be returning a fact and poluting
    # the namespace
    ansible_facts = {
        'ec2_elbs': [lb['LoadBalancerName'] for lb in elb_man.lbs]
    }

    module.exit_json(
        changed=elb_man.changed,
        ansible_facts=ansible_facts,
        updated_elbs=list(elb_man.updated_elbs),
    )
Пример #6
0
def main():
    argument_spec = dict(cluster_name=dict(required=True),
                         resource=dict(required=False),
                         tags=dict(type='dict'),
                         purge_tags=dict(type='bool', default=False),
                         state=dict(default='present',
                                    choices=['present', 'absent']),
                         resource_type=dict(default='cluster',
                                            choices=[
                                                'cluster', 'task', 'service',
                                                'task_definition', 'container'
                                            ]))
    required_if = [('state', 'present', ['tags']),
                   ('state', 'absent', ['tags'])]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=required_if,
                              supports_check_mode=True)

    resource_type = module.params['resource_type']
    cluster_name = module.params['cluster_name']
    if resource_type == 'cluster':
        resource = cluster_name
    else:
        resource = module.params['resource']
    tags = module.params['tags']
    state = module.params['state']
    purge_tags = module.params['purge_tags']

    result = {'changed': False}

    ecs = module.client('ecs')

    resource_arn = get_arn(ecs, module, cluster_name, resource_type, resource)

    current_tags = get_tags(ecs, module, resource_arn)

    add_tags, remove = compare_aws_tags(current_tags,
                                        tags,
                                        purge_tags=purge_tags)

    remove_tags = {}
    if state == 'absent':
        for key in tags:
            if key in current_tags and (tags[key] is None
                                        or current_tags[key] == tags[key]):
                remove_tags[key] = current_tags[key]

    for key in remove:
        remove_tags[key] = current_tags[key]

    if remove_tags:
        result['changed'] = True
        result['removed_tags'] = remove_tags
        if not module.check_mode:
            try:
                ecs.untag_resource(resourceArn=resource_arn,
                                   tagKeys=list(remove_tags.keys()))
            except (BotoCoreError, ClientError) as e:
                module.fail_json_aws(
                    e,
                    msg='Failed to remove tags {0} from resource {1}'.format(
                        remove_tags, resource))

    if state == 'present' and add_tags:
        result['changed'] = True
        result['added_tags'] = add_tags
        current_tags.update(add_tags)
        if not module.check_mode:
            try:
                tags = ansible_dict_to_boto3_tag_list(
                    add_tags,
                    tag_name_key_name='key',
                    tag_value_key_name='value')
                ecs.tag_resource(resourceArn=resource_arn, tags=tags)
            except (BotoCoreError, ClientError) as e:
                module.fail_json_aws(
                    e,
                    msg='Failed to set tags {0} on resource {1}'.format(
                        add_tags, resource))

    result['tags'] = get_tags(ecs, module, resource_arn)
    module.exit_json(**result)
Пример #7
0
def main():
    argument_spec = dict(
        command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
        identifier=dict(required=True),
        node_type=dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge',
                                'ds2.8xlarge', 'dc1.large', 'dc2.large',
                                'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge',
                                'dw2.large', 'dw2.8xlarge'], required=False),
        username=dict(required=False),
        password=dict(no_log=True, required=False),
        db_name=dict(required=False),
        cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'),
        cluster_security_groups=dict(aliases=['security_groups'], type='list', elements='str'),
        vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list', elements='str'),
        skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'],
                                         type='bool', default=False),
        final_cluster_snapshot_identifier=dict(aliases=['final_snapshot_id'], required=False),
        cluster_subnet_group_name=dict(aliases=['subnet']),
        availability_zone=dict(aliases=['aws_zone', 'zone']),
        preferred_maintenance_window=dict(aliases=['maintance_window', 'maint_window']),
        cluster_parameter_group_name=dict(aliases=['param_group_name']),
        automated_snapshot_retention_period=dict(aliases=['retention_period'], type='int'),
        port=dict(type='int'),
        cluster_version=dict(aliases=['version'], choices=['1.0']),
        allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True),
        number_of_nodes=dict(type='int'),
        publicly_accessible=dict(type='bool', default=False),
        encrypted=dict(type='bool', default=False),
        elastic_ip=dict(required=False),
        new_cluster_identifier=dict(aliases=['new_identifier']),
        enhanced_vpc_routing=dict(type='bool', default=False),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=300),
        tags=dict(type='dict', aliases=['resource_tags']),
        purge_tags=dict(type='bool', default=True)
    )

    required_if = [
        ('command', 'delete', ['skip_final_cluster_snapshot']),
        ('command', 'create', ['node_type',
                               'username',
                               'password'])
    ]

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=required_if
    )

    command = module.params.get('command')
    skip_final_cluster_snapshot = module.params.get('skip_final_cluster_snapshot')
    final_cluster_snapshot_identifier = module.params.get('final_cluster_snapshot_identifier')
    # can't use module basic required_if check for this case
    if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None:
        module.fail_json(msg="Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False")

    conn = module.client('redshift')

    changed = True
    if command == 'create':
        (changed, cluster) = create_cluster(module, conn)

    elif command == 'facts':
        (changed, cluster) = describe_cluster(module, conn)

    elif command == 'delete':
        (changed, cluster) = delete_cluster(module, conn)

    elif command == 'modify':
        (changed, cluster) = modify_cluster(module, conn)

    module.exit_json(changed=changed, cluster=cluster)
Пример #8
0
def main():
    argument_spec = dict(
        name=dict(required=True),
        state=dict(default='present', choices=['present', 'absent']),
        runtime=dict(),
        role=dict(),
        handler=dict(),
        zip_file=dict(aliases=['src']),
        s3_bucket=dict(),
        s3_key=dict(no_log=False),
        s3_object_version=dict(),
        description=dict(default=''),
        timeout=dict(type='int', default=3),
        memory_size=dict(type='int', default=128),
        vpc_subnet_ids=dict(type='list', elements='str'),
        vpc_security_group_ids=dict(type='list', elements='str'),
        environment_variables=dict(type='dict'),
        dead_letter_arn=dict(),
        tracing_mode=dict(choices=['Active', 'PassThrough']),
        tags=dict(type='dict'),
    )

    mutually_exclusive = [['zip_file', 's3_key'], ['zip_file', 's3_bucket'],
                          ['zip_file', 's3_object_version']]

    required_together = [['s3_key', 's3_bucket'],
                         ['vpc_subnet_ids', 'vpc_security_group_ids']]

    required_if = [['state', 'present', ['runtime', 'handler', 'role']]]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              mutually_exclusive=mutually_exclusive,
                              required_together=required_together,
                              required_if=required_if)

    name = module.params.get('name')
    state = module.params.get('state').lower()
    runtime = module.params.get('runtime')
    role = module.params.get('role')
    handler = module.params.get('handler')
    s3_bucket = module.params.get('s3_bucket')
    s3_key = module.params.get('s3_key')
    s3_object_version = module.params.get('s3_object_version')
    zip_file = module.params.get('zip_file')
    description = module.params.get('description')
    timeout = module.params.get('timeout')
    memory_size = module.params.get('memory_size')
    vpc_subnet_ids = module.params.get('vpc_subnet_ids')
    vpc_security_group_ids = module.params.get('vpc_security_group_ids')
    environment_variables = module.params.get('environment_variables')
    dead_letter_arn = module.params.get('dead_letter_arn')
    tracing_mode = module.params.get('tracing_mode')
    tags = module.params.get('tags')

    check_mode = module.check_mode
    changed = False

    try:
        client = module.client('lambda',
                               retry_decorator=AWSRetry.jittered_backoff())
    except (ClientError, BotoCoreError) as e:
        module.fail_json_aws(e, msg="Trying to connect to AWS")

    if state == 'present':
        if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role):
            role_arn = role
        else:
            # get account ID and assemble ARN
            account_id, partition = get_account_info(module)
            role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(
                partition, account_id, role)

    # Get function configuration if present, False otherwise
    current_function = get_current_function(client, name)

    # Update existing Lambda function
    if state == 'present' and current_function:

        # Get current state
        current_config = current_function['Configuration']
        current_version = None

        # Update function configuration
        func_kwargs = {'FunctionName': name}

        # Update configuration if needed
        if role_arn and current_config['Role'] != role_arn:
            func_kwargs.update({'Role': role_arn})
        if handler and current_config['Handler'] != handler:
            func_kwargs.update({'Handler': handler})
        if description and current_config['Description'] != description:
            func_kwargs.update({'Description': description})
        if timeout and current_config['Timeout'] != timeout:
            func_kwargs.update({'Timeout': timeout})
        if memory_size and current_config['MemorySize'] != memory_size:
            func_kwargs.update({'MemorySize': memory_size})
        if runtime and current_config['Runtime'] != runtime:
            func_kwargs.update({'Runtime': runtime})
        if (environment_variables
                is not None) and (current_config.get('Environment', {}).get(
                    'Variables', {}) != environment_variables):
            func_kwargs.update(
                {'Environment': {
                    'Variables': environment_variables
                }})
        if dead_letter_arn is not None:
            if current_config.get('DeadLetterConfig'):
                if current_config['DeadLetterConfig'][
                        'TargetArn'] != dead_letter_arn:
                    func_kwargs.update(
                        {'DeadLetterConfig': {
                            'TargetArn': dead_letter_arn
                        }})
            else:
                if dead_letter_arn != "":
                    func_kwargs.update(
                        {'DeadLetterConfig': {
                            'TargetArn': dead_letter_arn
                        }})
        if tracing_mode and (current_config.get('TracingConfig', {}).get(
                'Mode', 'PassThrough') != tracing_mode):
            func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})

        # If VPC configuration is desired
        if vpc_subnet_ids:

            if 'VpcConfig' in current_config:
                # Compare VPC config with current config
                current_vpc_subnet_ids = current_config['VpcConfig'][
                    'SubnetIds']
                current_vpc_security_group_ids = current_config['VpcConfig'][
                    'SecurityGroupIds']

                subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(
                    current_vpc_subnet_ids)
                vpc_security_group_ids_changed = sorted(
                    vpc_security_group_ids) != sorted(
                        current_vpc_security_group_ids)

            if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
                new_vpc_config = {
                    'SubnetIds': vpc_subnet_ids,
                    'SecurityGroupIds': vpc_security_group_ids
                }
                func_kwargs.update({'VpcConfig': new_vpc_config})
        else:
            # No VPC configuration is desired, assure VPC config is empty when present in current config
            if 'VpcConfig' in current_config and current_config[
                    'VpcConfig'].get('VpcId'):
                func_kwargs.update(
                    {'VpcConfig': {
                        'SubnetIds': [],
                        'SecurityGroupIds': []
                    }})

        # Upload new configuration if configuration has changed
        if len(func_kwargs) > 1:
            if not check_mode:
                wait_for_lambda(client, module, name)

            try:
                if not check_mode:
                    response = client.update_function_configuration(
                        aws_retry=True, **func_kwargs)
                    current_version = response['Version']
                changed = True
            except (BotoCoreError, ClientError) as e:
                module.fail_json_aws(
                    e, msg="Trying to update lambda configuration")

        # Update code configuration
        code_kwargs = {'FunctionName': name, 'Publish': True}

        # Update S3 location
        if s3_bucket and s3_key:
            # If function is stored on S3 always update
            code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})

            # If S3 Object Version is given
            if s3_object_version:
                code_kwargs.update({'S3ObjectVersion': s3_object_version})

        # Compare local checksum, update remote code when different
        elif zip_file:
            local_checksum = sha256sum(zip_file)
            remote_checksum = current_config['CodeSha256']

            # Only upload new code when local code is different compared to the remote code
            if local_checksum != remote_checksum:
                try:
                    with open(zip_file, 'rb') as f:
                        encoded_zip = f.read()
                    code_kwargs.update({'ZipFile': encoded_zip})
                except IOError as e:
                    module.fail_json(msg=str(e),
                                     exception=traceback.format_exc())

        # Tag Function
        if tags is not None:
            if set_tag(client, module, tags, current_function):
                changed = True

        # Upload new code if needed (e.g. code checksum has changed)
        if len(code_kwargs) > 2:
            if not check_mode:
                wait_for_lambda(client, module, name)

            try:
                if not check_mode:
                    response = client.update_function_code(aws_retry=True,
                                                           **code_kwargs)
                    current_version = response['Version']
                changed = True
            except (BotoCoreError, ClientError) as e:
                module.fail_json_aws(e, msg="Trying to upload new code")

        # Describe function code and configuration
        response = get_current_function(client,
                                        name,
                                        qualifier=current_version)
        if not response:
            module.fail_json(
                msg='Unable to get function information after updating')

        # We're done
        module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))

    # Function doesn't exists, create new Lambda function
    elif state == 'present':
        if s3_bucket and s3_key:
            # If function is stored on S3
            code = {'S3Bucket': s3_bucket, 'S3Key': s3_key}
            if s3_object_version:
                code.update({'S3ObjectVersion': s3_object_version})
        elif zip_file:
            # If function is stored in local zipfile
            try:
                with open(zip_file, 'rb') as f:
                    zip_content = f.read()

                code = {'ZipFile': zip_content}
            except IOError as e:
                module.fail_json(msg=str(e), exception=traceback.format_exc())

        else:
            module.fail_json(
                msg='Either S3 object or path to zipfile required')

        func_kwargs = {
            'FunctionName': name,
            'Publish': True,
            'Runtime': runtime,
            'Role': role_arn,
            'Code': code,
            'Timeout': timeout,
            'MemorySize': memory_size,
        }

        if description is not None:
            func_kwargs.update({'Description': description})

        if handler is not None:
            func_kwargs.update({'Handler': handler})

        if environment_variables:
            func_kwargs.update(
                {'Environment': {
                    'Variables': environment_variables
                }})

        if dead_letter_arn:
            func_kwargs.update(
                {'DeadLetterConfig': {
                    'TargetArn': dead_letter_arn
                }})

        if tracing_mode:
            func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}})

        # If VPC configuration is given
        if vpc_subnet_ids:
            func_kwargs.update({
                'VpcConfig': {
                    'SubnetIds': vpc_subnet_ids,
                    'SecurityGroupIds': vpc_security_group_ids
                }
            })

        # Finally try to create function
        current_version = None
        try:
            if not check_mode:
                response = client.create_function(aws_retry=True,
                                                  **func_kwargs)
                current_version = response['Version']
            changed = True
        except (BotoCoreError, ClientError) as e:
            module.fail_json_aws(e, msg="Trying to create function")

        # Tag Function
        if tags is not None:
            if set_tag(client, module, tags,
                       get_current_function(client, name)):
                changed = True

        response = get_current_function(client,
                                        name,
                                        qualifier=current_version)
        if not response:
            module.fail_json(
                msg='Unable to get function information after creating')
        module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))

    # Delete existing Lambda function
    if state == 'absent' and current_function:
        try:
            if not check_mode:
                client.delete_function(FunctionName=name, aws_retry=True)
            changed = True
        except (BotoCoreError, ClientError) as e:
            module.fail_json_aws(e, msg="Trying to delete Lambda function")

        module.exit_json(changed=changed)

    # Function already absent, do nothing
    elif state == 'absent':
        module.exit_json(changed=changed)
Пример #9
0
def main():
    argument_spec = dict(
        distribution_id=dict(required=False, type='str'),
        invalidation_id=dict(required=False, type='str'),
        origin_access_identity_id=dict(required=False, type='str'),
        domain_name_alias=dict(required=False, type='str'),
        all_lists=dict(required=False, default=False, type='bool'),
        distribution=dict(required=False, default=False, type='bool'),
        distribution_config=dict(required=False, default=False, type='bool'),
        origin_access_identity=dict(required=False, default=False,
                                    type='bool'),
        origin_access_identity_config=dict(required=False,
                                           default=False,
                                           type='bool'),
        invalidation=dict(required=False, default=False, type='bool'),
        streaming_distribution=dict(required=False, default=False,
                                    type='bool'),
        streaming_distribution_config=dict(required=False,
                                           default=False,
                                           type='bool'),
        list_origin_access_identities=dict(required=False,
                                           default=False,
                                           type='bool'),
        list_distributions=dict(required=False, default=False, type='bool'),
        list_distributions_by_web_acl_id=dict(required=False,
                                              default=False,
                                              type='bool'),
        list_invalidations=dict(required=False, default=False, type='bool'),
        list_streaming_distributions=dict(required=False,
                                          default=False,
                                          type='bool'),
        summary=dict(required=False, default=False, type='bool'),
    )

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=False)
    is_old_facts = module._name == 'cloudfront_facts'
    if is_old_facts:
        module.deprecate(
            "The 'cloudfront_facts' module has been renamed to 'cloudfront_info', "
            "and the renamed one no longer returns ansible_facts",
            date='2021-12-01',
            collection_name='community.aws')

    service_mgr = CloudFrontServiceManager(module)

    distribution_id = module.params.get('distribution_id')
    invalidation_id = module.params.get('invalidation_id')
    origin_access_identity_id = module.params.get('origin_access_identity_id')
    web_acl_id = module.params.get('web_acl_id')
    domain_name_alias = module.params.get('domain_name_alias')
    all_lists = module.params.get('all_lists')
    distribution = module.params.get('distribution')
    distribution_config = module.params.get('distribution_config')
    origin_access_identity = module.params.get('origin_access_identity')
    origin_access_identity_config = module.params.get(
        'origin_access_identity_config')
    invalidation = module.params.get('invalidation')
    streaming_distribution = module.params.get('streaming_distribution')
    streaming_distribution_config = module.params.get(
        'streaming_distribution_config')
    list_origin_access_identities = module.params.get(
        'list_origin_access_identities')
    list_distributions = module.params.get('list_distributions')
    list_distributions_by_web_acl_id = module.params.get(
        'list_distributions_by_web_acl_id')
    list_invalidations = module.params.get('list_invalidations')
    list_streaming_distributions = module.params.get(
        'list_streaming_distributions')
    summary = module.params.get('summary')

    aliases = []
    result = {'cloudfront': {}}
    facts = {}

    require_distribution_id = (distribution or distribution_config
                               or invalidation or streaming_distribution
                               or streaming_distribution_config
                               or list_invalidations)

    # set default to summary if no option specified
    summary = summary or not (
        distribution or distribution_config or origin_access_identity or
        origin_access_identity_config or invalidation or streaming_distribution
        or streaming_distribution_config or list_origin_access_identities
        or list_distributions_by_web_acl_id or list_invalidations
        or list_streaming_distributions or list_distributions)

    # validations
    if require_distribution_id and distribution_id is None and domain_name_alias is None:
        module.fail_json(
            msg=
            'Error distribution_id or domain_name_alias have not been specified.'
        )
    if (invalidation and invalidation_id is None):
        module.fail_json(msg='Error invalidation_id has not been specified.')
    if (origin_access_identity or origin_access_identity_config
        ) and origin_access_identity_id is None:
        module.fail_json(
            msg='Error origin_access_identity_id has not been specified.')
    if list_distributions_by_web_acl_id and web_acl_id is None:
        module.fail_json(msg='Error web_acl_id has not been specified.')

    # get distribution id from domain name alias
    if require_distribution_id and distribution_id is None:
        distribution_id = service_mgr.get_distribution_id_from_domain_name(
            domain_name_alias)
        if not distribution_id:
            module.fail_json(
                msg=
                'Error unable to source a distribution id from domain_name_alias'
            )

    # set appropriate cloudfront id
    if distribution_id and not list_invalidations:
        facts = {distribution_id: {}}
        aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
        for alias in aliases:
            facts.update({alias: {}})
        if invalidation_id:
            facts.update({invalidation_id: {}})
    elif distribution_id and list_invalidations:
        facts = {distribution_id: {}}
        aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
        for alias in aliases:
            facts.update({alias: {}})
    elif origin_access_identity_id:
        facts = {origin_access_identity_id: {}}
    elif web_acl_id:
        facts = {web_acl_id: {}}

    # get details based on options
    if distribution:
        facts_to_set = service_mgr.get_distribution(distribution_id)
    if distribution_config:
        facts_to_set = service_mgr.get_distribution_config(distribution_id)
    if origin_access_identity:
        facts[origin_access_identity_id].update(
            service_mgr.get_origin_access_identity(origin_access_identity_id))
    if origin_access_identity_config:
        facts[origin_access_identity_id].update(
            service_mgr.get_origin_access_identity_config(
                origin_access_identity_id))
    if invalidation:
        facts_to_set = service_mgr.get_invalidation(distribution_id,
                                                    invalidation_id)
        facts[invalidation_id].update(facts_to_set)
    if streaming_distribution:
        facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
    if streaming_distribution_config:
        facts_to_set = service_mgr.get_streaming_distribution_config(
            distribution_id)
    if list_invalidations:
        facts_to_set = {
            'invalidations': service_mgr.list_invalidations(distribution_id)
        }
    if 'facts_to_set' in vars():
        facts = set_facts_for_distribution_id_and_alias(
            facts_to_set, facts, distribution_id, aliases)

    # get list based on options
    if all_lists or list_origin_access_identities:
        facts[
            'origin_access_identities'] = service_mgr.list_origin_access_identities(
            )
    if all_lists or list_distributions:
        facts['distributions'] = service_mgr.list_distributions()
    if all_lists or list_streaming_distributions:
        facts[
            'streaming_distributions'] = service_mgr.list_streaming_distributions(
            )
    if list_distributions_by_web_acl_id:
        facts[
            'distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(
                web_acl_id)
    if list_invalidations:
        facts['invalidations'] = service_mgr.list_invalidations(
            distribution_id)

    # default summary option
    if summary:
        facts['summary'] = service_mgr.summary()

    result['changed'] = False
    result['cloudfront'].update(facts)
    if is_old_facts:
        module.exit_json(msg="Retrieved CloudFront facts.",
                         ansible_facts=result)
    else:
        module.exit_json(msg="Retrieved CloudFront info.", **result)
Пример #10
0
def main():
    """
     Module action handler
    """
    argument_spec = dict(
        encrypt=dict(required=False, type="bool", default=False),
        state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
        kms_key_id=dict(required=False, type='str', default=None),
        purge_tags=dict(default=True, type='bool'),
        id=dict(required=False, type='str', default=None),
        name=dict(required=False, type='str', default=None),
        tags=dict(required=False, type="dict", default={}),
        targets=dict(required=False, type="list", default=[], elements='dict'),
        performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
        transition_to_ia=dict(required=False, type='str', choices=["None", "7", "14", "30", "60", "90"], default=None),
        throughput_mode=dict(required=False, type='str', choices=["bursting", "provisioned"], default=None),
        provisioned_throughput_in_mibps=dict(required=False, type='float'),
        wait=dict(required=False, type="bool", default=False),
        wait_timeout=dict(required=False, type="int", default=0)
    )

    module = AnsibleAWSModule(argument_spec=argument_spec)

    connection = EFSConnection(module)

    name = module.params.get('name')
    fs_id = module.params.get('id')
    tags = module.params.get('tags')
    target_translations = {
        'ip_address': 'IpAddress',
        'security_groups': 'SecurityGroups',
        'subnet_id': 'SubnetId'
    }
    targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
    performance_mode_translations = {
        'general_purpose': 'generalPurpose',
        'max_io': 'maxIO'
    }
    encrypt = module.params.get('encrypt')
    kms_key_id = module.params.get('kms_key_id')
    performance_mode = performance_mode_translations[module.params.get('performance_mode')]
    purge_tags = module.params.get('purge_tags')
    transition_to_ia = module.params.get('transition_to_ia')
    throughput_mode = module.params.get('throughput_mode')
    provisioned_throughput_in_mibps = module.params.get('provisioned_throughput_in_mibps')
    state = str(module.params.get('state')).lower()
    changed = False

    if state == 'present':
        if not name:
            module.fail_json(msg='Name parameter is required for create')

        changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id, throughput_mode, provisioned_throughput_in_mibps)
        changed = connection.update_file_system(name, throughput_mode, provisioned_throughput_in_mibps) or changed
        changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets,
                                                  throughput_mode=throughput_mode, provisioned_throughput_in_mibps=provisioned_throughput_in_mibps) or changed
        if transition_to_ia:
            changed |= connection.update_lifecycle_policy(name, transition_to_ia)
        result = first_or_default(connection.get_file_systems(CreationToken=name))

    elif state == 'absent':
        if not name and not fs_id:
            module.fail_json(msg='Either name or id parameter is required for delete')

        changed = connection.delete_file_system(name, fs_id)
        result = None
    if result:
        result = camel_dict_to_snake_dict(result)
    module.exit_json(changed=changed, efs=result)
def main():
    argument_spec = dict(
        name=dict(required=False, aliases=['registry_name', 'schema_name', 'database_name']),
        table_name=dict(required=False),
        resource_share_type=dict(required=False, choices=['FOREIGN', 'ALL'], default='ALL'),
        list_workflows=dict(required=False, type=bool),
        list_triggers=dict(required=False, type=bool),
        list_schemas=dict(required=False, type=bool),
        list_registries=dict(required=False, type=bool),
        list_ml_transforms=dict(required=False, type=bool),
        list_jobs=dict(required=False, type=bool),
        list_dev_endpoints=dict(required=False, type=bool),
        list_crawlers=dict(required=False, type=bool),
        get_databases=dict(required=False, type=bool),
        get_tables=dict(required=False, type=bool),
        get_partitions=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=(
            ('list_schemas', True, ['name']),
            ('get_tables', True, ['name']),
            ('get_partitions', True, ['name', 'table_name']),
        ),
        mutually_exclusive=[
            (
                'list_workflows',
                'list_triggers',
                'list_schemas',
                'list_schema_versions',
                'list_registries',
                'list_ml_transforms',
                'list_jobs',
                'list_dev_endpoints',
                'list_crawlers',
                'get_databases',
                'get_tables',
                'get_partitions',
            )
        ],
    )

    client = module.client('glue', retry_decorator=AWSRetry.exponential_backoff())
    it, paginate = _glue(client, module)

    if module.params['list_workflows']:
        module.exit_json(workflows=aws_response_list_parser(paginate, it, 'Workflows'))
    elif module.params['list_triggers']:
        module.exit_json(triggers=aws_response_list_parser(paginate, it, 'TriggerNames'))
    elif module.params['list_schemas']:
        module.exit_json(schemas=aws_response_list_parser(paginate, it, 'Schemas'))
    elif module.params['list_registries']:
        module.exit_json(registries=aws_response_list_parser(paginate, it, 'Registries'))
    elif module.params['list_ml_transforms']:
        module.exit_json(ml_transforms=aws_response_list_parser(paginate, it, 'TransformIds'))
    elif module.params['list_jobs']:
        module.exit_json(jobs=aws_response_list_parser(paginate, it, 'JobNames'))
    elif module.params['list_dev_endpoints']:
        module.exit_json(dev_endpoints=aws_response_list_parser(paginate, it, 'DevEndpointNames'))
    elif module.params['list_crawlers']:
        module.exit_json(crawlers=aws_response_list_parser(paginate, it, 'CrawlerNames'))
    elif module.params['get_databases']:
        module.exit_json(databases=aws_response_list_parser(paginate, it, 'DatabaseList'))
    elif module.params['get_tables']:
        module.exit_json(tables=aws_response_list_parser(paginate, it, 'TableList'))
    elif module.params['get_partitions']:
        module.exit_json(partitions=aws_response_list_parser(paginate, it, 'Partitions'))
    else:
        module.fail_json("unknown options are passed")
def main():
    argument_spec = dict(
        id=dict(required=False, aliases=['directory_id']),
        describe_directories=dict(required=False, type=bool),
        list_certificates=dict(required=False, type=bool),
        list_ip_routes=dict(required=False, type=bool),
        list_log_subscriptions=dict(required=False, type=bool),
        list_schema_extensions=dict(required=False, type=bool),
        describe_conditional_forwarders=dict(required=False, type=bool),
        describe_domain_controllers=dict(required=False, type=bool),
        describe_event_topics=dict(required=False, type=bool),
        describe_snapshots=dict(required=False, type=bool),
        describe_trusts=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=(
            ('list_certificates', True, ['id']),
            ('list_ip_routes', True, ['id']),
            ('list_log_subscriptions', True, ['id']),
            ('list_schema_extensions', True, ['id']),
            ('describe_conditional_forwarders', True, ['id']),
            ('describe_domain_controllers', True, ['id']),
            ('describe_event_topics', True, ['id']),
            ('describe_snapshots', True, ['id']),
            ('describe_trusts', True, ['id']),
        ),
        mutually_exclusive=[
            (
                'describe_directories',
                'list_certificates',
                'list_ip_routes',
                'list_log_subscriptions',
                'list_schema_extensions',
                'describe_conditional_forwarders',
                'describe_domain_controllers',
                'describe_event_topics',
                'describe_snapshots',
                'describe_trusts',
            )
        ],
    )

    client = module.client('ds', retry_decorator=AWSRetry.exponential_backoff())
    it, paginate = _ds(client, module)

    if module.params['describe_directories']:
        module.exit_json(directories=aws_response_list_parser(paginate, it, 'DirectoryDescriptions'))
    elif module.params['list_certificates']:
        module.exit_json(certificates=aws_response_list_parser(paginate, it, 'CertificatesInfo'))
    elif module.params['list_ip_routes']:
        module.exit_json(ip_routes=aws_response_list_parser(paginate, it, 'IpRoutesInfo'))
    elif module.params['list_log_subscriptions']:
        module.exit_json(log_subscriptions=aws_response_list_parser(paginate, it, 'LogSubscriptions'))
    elif module.params['list_schema_extensions']:
        module.exit_json(schema_extensions=aws_response_list_parser(paginate, it, 'SchemaExtensionsInfo'))
    elif module.params['describe_conditional_forwarders']:
        module.exit_json(conditional_forwarders=aws_response_list_parser(paginate, it, 'ConditionalForwarders'))
    elif module.params['describe_domain_controllers']:
        module.exit_json(domain_controllers=aws_response_list_parser(paginate, it, 'DomainControllers'))
    elif module.params['describe_event_topics']:
        module.exit_json(event_topics=aws_response_list_parser(paginate, it, 'EventTopics'))
    elif module.params['describe_snapshots']:
        module.exit_json(snapshots=aws_response_list_parser(paginate, it, 'Snapshots'))
    elif module.params['describe_trusts']:
        module.exit_json(trusts=aws_response_list_parser(paginate, it, 'Trusts'))
    else:
        module.fail_json("unknown options are passed")
Пример #13
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent']),
        arn=dict(required=False, type='str'),
        family=dict(required=False, type='str'),
        revision=dict(required=False, type='int'),
        force_create=dict(required=False, default=False, type='bool'),
        containers=dict(required=False, type='list', elements='dict'),
        network_mode=dict(required=False, default='bridge', choices=['default', 'bridge', 'host', 'none', 'awsvpc'], type='str'),
        task_role_arn=dict(required=False, default='', type='str'),
        execution_role_arn=dict(required=False, default='', type='str'),
        volumes=dict(required=False, type='list', elements='dict'),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        cpu=dict(),
        memory=dict(required=False, type='str')
    )

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])]
                              )

    task_to_describe = None
    task_mgr = EcsTaskManager(module)
    results = dict(changed=False)

    if module.params['launch_type']:
        if not module.botocore_at_least('1.8.4'):
            module.fail_json(msg='botocore needs to be version 1.8.4 or higher to use launch_type')

    if module.params['execution_role_arn']:
        if not module.botocore_at_least('1.10.44'):
            module.fail_json(msg='botocore needs to be version 1.10.44 or higher to use execution_role_arn')

    if module.params['containers']:
        for container in module.params['containers']:
            for environment in container.get('environment', []):
                environment['value'] = to_text(environment['value'])

    if module.params['state'] == 'present':
        if 'containers' not in module.params or not module.params['containers']:
            module.fail_json(msg="To use task definitions, a list of containers must be specified")

        if 'family' not in module.params or not module.params['family']:
            module.fail_json(msg="To use task definitions, a family must be specified")

        network_mode = module.params['network_mode']
        launch_type = module.params['launch_type']
        if launch_type == 'FARGATE' and network_mode != 'awsvpc':
            module.fail_json(msg="To use FARGATE launch type, network_mode must be awsvpc")

        family = module.params['family']
        existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])

        if 'revision' in module.params and module.params['revision']:
            # The definition specifies revision. We must guarantee that an active revision of that number will result from this.
            revision = int(module.params['revision'])

            # A revision has been explicitly specified. Attempt to locate a matching revision
            tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
            existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None

            if existing and existing['status'] != "ACTIVE":
                # We cannot reactivate an inactive revision
                module.fail_json(msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision))
            elif not existing:
                if not existing_definitions_in_family and revision != 1:
                    module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
                elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
                    module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
                                         (revision, existing_definitions_in_family[-1]['revision'] + 1))
        else:
            existing = None

            def _right_has_values_of_left(left, right):
                # Make sure the values are equivalent for everything left has
                for k, v in left.items():
                    if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
                        # We don't care about list ordering because ECS can change things
                        if isinstance(v, list) and k in right:
                            left_list = v
                            right_list = right[k] or []

                            if len(left_list) != len(right_list):
                                return False

                            for list_val in left_list:
                                if list_val not in right_list:
                                    return False
                        else:
                            return False

                # Make sure right doesn't have anything that left doesn't
                for k, v in right.items():
                    if v and k not in left:
                        return False

                return True

            def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, existing_task_definition):
                if td['status'] != "ACTIVE":
                    return None

                if requested_task_role_arn != td.get('taskRoleArn', ""):
                    return None

                existing_volumes = td.get('volumes', []) or []

                if len(requested_volumes) != len(existing_volumes):
                    # Nope.
                    return None

                if len(requested_volumes) > 0:
                    for requested_vol in requested_volumes:
                        found = False

                        for actual_vol in existing_volumes:
                            if _right_has_values_of_left(requested_vol, actual_vol):
                                found = True
                                break

                        if not found:
                            return None

                existing_containers = td.get('containerDefinitions', []) or []

                if len(requested_containers) != len(existing_containers):
                    # Nope.
                    return None

                for requested_container in requested_containers:
                    found = False

                    for actual_container in existing_containers:
                        if _right_has_values_of_left(requested_container, actual_container):
                            found = True
                            break

                    if not found:
                        return None

                return existing_task_definition

            # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
            for td in existing_definitions_in_family:
                requested_volumes = module.params['volumes'] or []
                requested_containers = module.params['containers'] or []
                requested_task_role_arn = module.params['task_role_arn']
                existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, td)

                if existing:
                    break

        if existing and not module.params.get('force_create'):
            # Awesome. Have an existing one. Nothing to do.
            results['taskdefinition'] = existing
        else:
            if not module.check_mode:
                # Doesn't exist. create it.
                volumes = module.params.get('volumes', []) or []
                results['taskdefinition'] = task_mgr.register_task(module.params['family'],
                                                                   module.params['task_role_arn'],
                                                                   module.params['execution_role_arn'],
                                                                   module.params['network_mode'],
                                                                   module.params['containers'],
                                                                   volumes,
                                                                   module.params['launch_type'],
                                                                   module.params['cpu'],
                                                                   module.params['memory'])
            results['changed'] = True

    elif module.params['state'] == 'absent':
        # When de-registering a task definition, we can specify the ARN OR the family and revision.
        if module.params['state'] == 'absent':
            if 'arn' in module.params and module.params['arn'] is not None:
                task_to_describe = module.params['arn']
            elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
                    module.params['revision'] is not None:
                task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
            else:
                module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")

        existing = task_mgr.describe_task(task_to_describe)

        if not existing:
            pass
        else:
            # It exists, so we should delete it and mark changed. Return info about the task definition deleted
            results['taskdefinition'] = existing
            if 'status' in existing and existing['status'] == "INACTIVE":
                results['changed'] = False
            else:
                if not module.check_mode:
                    task_mgr.deregister_task(task_to_describe)
                results['changed'] = True

    module.exit_json(**results)
Пример #14
0
def main():
    """ elasticache ansible module """
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent', 'rebooted']),
        name=dict(required=True),
        engine=dict(default='memcached'),
        cache_engine_version=dict(default=""),
        node_type=dict(default='cache.t2.small'),
        num_nodes=dict(default=1, type='int'),
        # alias for compat with the original PR 1950
        cache_parameter_group=dict(default="", aliases=['parameter_group']),
        cache_port=dict(type='int'),
        cache_subnet_group=dict(default=""),
        cache_security_groups=dict(default=[], type='list', elements='str'),
        security_group_ids=dict(default=[], type='list', elements='str'),
        zone=dict(),
        wait=dict(default=True, type='bool'),
        hard_modify=dict(type='bool'),
    )

    module = AnsibleAWSModule(argument_spec=argument_spec, )

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)

    name = module.params['name']
    state = module.params['state']
    engine = module.params['engine']
    cache_engine_version = module.params['cache_engine_version']
    node_type = module.params['node_type']
    num_nodes = module.params['num_nodes']
    cache_port = module.params['cache_port']
    cache_subnet_group = module.params['cache_subnet_group']
    cache_security_groups = module.params['cache_security_groups']
    security_group_ids = module.params['security_group_ids']
    zone = module.params['zone']
    wait = module.params['wait']
    hard_modify = module.params['hard_modify']
    cache_parameter_group = module.params['cache_parameter_group']

    if cache_subnet_group and cache_security_groups:
        module.fail_json(
            msg=
            "Can't specify both cache_subnet_group and cache_security_groups")

    if state == 'present' and not num_nodes:
        module.fail_json(
            msg=
            "'num_nodes' is a required parameter. Please specify num_nodes > 0"
        )

    elasticache_manager = ElastiCacheManager(
        module, name, engine, cache_engine_version, node_type, num_nodes,
        cache_port, cache_parameter_group, cache_subnet_group,
        cache_security_groups, security_group_ids, zone, wait, hard_modify,
        region, **aws_connect_kwargs)

    if state == 'present':
        elasticache_manager.ensure_present()
    elif state == 'absent':
        elasticache_manager.ensure_absent()
    elif state == 'rebooted':
        elasticache_manager.ensure_rebooted()

    facts_result = dict(changed=elasticache_manager.changed,
                        elasticache=elasticache_manager.get_info())

    module.exit_json(**facts_result)
Пример #15
0
def main():
    module = AnsibleAWSModule(
        argument_spec=dict(filters=dict(type='dict', default={})),
        supports_check_mode=True)

    module.exit_json(changed=False, addresses=get_eips_details(module))
def main():
    argument_spec = dict(
        name=dict(required=False, aliases=['parameter_group_name']),
        snapshot_type=dict(required=False,
                           choices=['automated', 'manual', 'shared', 'public'],
                           default='automated'),
        source_type=dict(required=False,
                         choices=[
                             'db-instance', 'db-parameter-group',
                             'db-security-group', 'db-snapshot'
                         ],
                         default='db-instance'),
        start_time=dict(required=False),
        end_time=dict(required=False),
        describe_db_cluster_parameter_groups=dict(required=False, type=bool),
        describe_certificates=dict(required=False, type=bool),
        describe_db_cluster_parameters=dict(required=False, type=bool),
        describe_db_cluster_snapshots=dict(required=False, type=bool),
        describe_db_instances=dict(required=False, type=bool),
        describe_db_subnet_groups=dict(required=False, type=bool),
        describe_event_categories=dict(required=False, type=bool),
        describe_events=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=(
            ('describe_db_cluster_parameters', True, ['name']),
            ('describe_events', True, ['start_time', 'end_time']),
        ),
        mutually_exclusive=[(
            'describe_db_cluster_parameter_groups',
            'describe_certificates',
            'describe_db_cluster_parameters',
            'describe_db_cluster_snapshots',
            'describe_db_instances',
            'describe_db_subnet_groups',
            'describe_event_categories',
            'describe_events',
        )],
    )

    client = module.client('docdb',
                           retry_decorator=AWSRetry.exponential_backoff())
    it, paginate = _docdb(client, module)

    if module.params['describe_db_cluster_parameter_groups']:
        module.exit_json(db_cluster_parameter_groups=aws_response_list_parser(
            paginate, it, 'DBClusterParameterGroups'))
    elif module.params['describe_certificates']:
        module.exit_json(certificates=aws_response_list_parser(
            paginate, it, 'Certificates'))
    elif module.params['describe_db_cluster_parameters']:
        module.exit_json(
            parameters=aws_response_list_parser(paginate, it, 'Parameters'))
    elif module.params['describe_db_cluster_snapshots']:
        module.exit_json(snapshots=aws_response_list_parser(
            paginate, it, 'DBClusterSnapshots'))
    elif module.params['describe_db_instances']:
        module.exit_json(
            instances=aws_response_list_parser(paginate, it, 'DBInstances'))
    elif module.params['describe_db_subnet_groups']:
        module.exit_json(db_subnet_groups=aws_response_list_parser(
            paginate, it, 'DBSubnetGroups'))
    elif module.params['describe_event_categories']:
        module.exit_json(event_categories=aws_response_list_parser(
            paginate, it, 'EventCategoriesMapList'))
    elif module.params['describe_events']:
        module.exit_json(
            events=aws_response_list_parser(paginate, it, 'Events'))
    else:
        module.exit_json(
            clusters=aws_response_list_parser(paginate, it, 'DBClusters'))
Пример #17
0
def main():
    argument_spec = dict(
        name=dict(required=False, aliases=['mesh_name']),
        list_virtual_routers=dict(required=False, type=bool),
        virtual_router_name=dict(required=False),
        list_routes=dict(required=False, type=bool),
        list_virtual_nodes=dict(required=False, type=bool),
        list_virtual_gateways=dict(required=False, type=bool),
        list_virtual_services=dict(required=False, type=bool),
        virtual_gateway_name=dict(required=False),
        list_gateway_routes=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=[
            ('list_virtual_routers', True, ['name']),
            ('list_virtual_nodes', True, ['name']),
            ('list_virtual_gateways', True, ['name']),
            ('list_virtual_services', True, ['name']),
            ('list_routes', True, ['name', 'virtual_router_name']),
            ('list_gateway_routes', True, ['name', 'virtual_gateway_name']),
        ],
        mutually_exclusive=[
            (
                'list_virtual_routers',
                'list_routes',
                'list_virtual_nodes',
                'list_virtual_gateways',
                'list_virtual_services',
            ),
        ],
    )

    client = module.client('appmesh', retry_decorator=AWSRetry.exponential_backoff())
    _it, paginate = _appmesh(client, module)

    if module.params['list_virtual_routers']:
        module.exit_json(virtual_routers=aws_response_list_parser(paginate, _it, 'virtualRouters'))
    elif module.params['list_routes']:
        module.exit_json(routes=aws_response_list_parser(paginate, _it, 'routes'))
    elif module.params['list_virtual_nodes']:
        module.exit_json(virtual_nodes=aws_response_list_parser(paginate, _it, 'virtualNodes'))
    elif module.params['list_virtual_gateways']:
        module.exit_json(virtual_gateways=aws_response_list_parser(paginate, _it, 'virtualGateways'))
    elif module.params['list_virtual_services']:
        module.exit_json(virtual_services=aws_response_list_parser(paginate, _it, 'virtualServices'))
    elif module.params['list_gateway_routes']:
        module.exit_json(gateway_routes=aws_response_list_parser(paginate, _it, 'gatewayRoutes'))
    else:
        module.exit_json(meshes=aws_response_list_parser(paginate, _it, 'meshes'))
Пример #18
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent']),
        name=dict(required=True),
        description=dict(required=False),
        subnets=dict(required=False, type='list', elements='str'),
        tags=dict(required=False, type='dict'),
        purge_tags=dict(type='bool', default=True),
    )
    required_if = [('state', 'present', ['description', 'subnets'])]

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              required_if=required_if,
                              supports_check_mode=True)

    state = module.params.get('state')
    group_name = module.params.get('name').lower()
    group_description = module.params.get('description')
    group_subnets = module.params.get('subnets') or []

    try:
        connection = module.client('rds',
                                   retry_decorator=AWSRetry.jittered_backoff())
    except (botocore.exceptions.BotoCoreError,
            botocore.exceptions.ClientError) as e:
        module.fail_json_aws(e, 'Failed to instantiate AWS connection.')

    # Default.
    changed = None
    result = create_result(False)
    tags_update = False
    subnet_update = False

    if module.params.get("tags") is not None:
        _tags = ansible_dict_to_boto3_tag_list(module.params.get("tags"))
    else:
        _tags = list()

    matching_groups = get_subnet_group(connection, module)

    if state == 'present':
        if matching_groups:
            # We have one or more subnets at this point.

            # Check if there is any tags update
            tags_update = ensure_tags(connection, module,
                                      matching_groups['db_subnet_group_arn'],
                                      matching_groups['tags'],
                                      module.params.get("tags"),
                                      module.params['purge_tags'])

            # Sort the subnet groups before we compare them
            existing_subnets = create_subnet_list(matching_groups['subnets'])
            existing_subnets.sort()
            group_subnets.sort()

            # See if anything changed.
            if (matching_groups['db_subnet_group_name'] != group_name
                    or matching_groups['db_subnet_group_description'] !=
                    group_description or existing_subnets != group_subnets):
                if not module.check_mode:
                    # Modify existing group.
                    try:
                        connection.modify_db_subnet_group(
                            aws_retry=True,
                            DBSubnetGroupName=group_name,
                            DBSubnetGroupDescription=group_description,
                            SubnetIds=group_subnets)
                    except (botocore.exceptions.BotoCoreError,
                            botocore.exceptions.ClientError) as e:
                        module.fail_json_aws(
                            e, 'Failed to update a subnet group.')
                subnet_update = True
        else:
            if not module.check_mode:
                try:
                    connection.create_db_subnet_group(
                        aws_retry=True,
                        DBSubnetGroupName=group_name,
                        DBSubnetGroupDescription=group_description,
                        SubnetIds=group_subnets,
                        Tags=_tags)
                except (botocore.exceptions.BotoCoreError,
                        botocore.exceptions.ClientError) as e:
                    module.fail_json_aws(
                        e, 'Failed to create a new subnet group.')
            subnet_update = True
    elif state == 'absent':
        if not module.check_mode:
            try:
                connection.delete_db_subnet_group(aws_retry=True,
                                                  DBSubnetGroupName=group_name)
            except is_boto3_error_code('DBSubnetGroupNotFoundFault'):
                module.exit_json(**result)
            except (botocore.exceptions.BotoCoreError,
                    botocore.exceptions.ClientError) as e:  # pylint: disable=duplicate-except
                module.fail_json_aws(e, 'Failed to delete a subnet group.')
        else:
            subnet_group = get_subnet_group(connection, module)
            if subnet_group:
                subnet_update = True
            result = create_result(subnet_update, subnet_group)
            module.exit_json(**result)

        subnet_update = True

    subnet_group = get_subnet_group(connection, module)
    changed = tags_update or subnet_update
    result = create_result(changed, subnet_group)
    module.exit_json(**result)
Пример #19
0
def main():
    argument_spec = dict(
        query=dict(choices=[
            'change',
            'checker_ip_range',
            'health_check',
            'hosted_zone',
            'record_sets',
            'reusable_delegation_set',
        ],
                   required=True),
        change_id=dict(),
        hosted_zone_id=dict(),
        max_items=dict(),
        next_marker=dict(),
        delegation_set_id=dict(),
        start_record_name=dict(),
        type=dict(choices=[
            'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS'
        ]),
        dns_name=dict(),
        resource_id=dict(type='list', aliases=['resource_ids'],
                         elements='str'),
        health_check_id=dict(),
        hosted_zone_method=dict(
            choices=['details', 'list', 'list_by_name', 'count', 'tags'],
            default='list'),
        health_check_method=dict(choices=[
            'list',
            'details',
            'status',
            'failure_reason',
            'count',
            'tags',
        ],
                                 default='list'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        mutually_exclusive=[
            ['hosted_zone_method', 'health_check_method'],
        ],
        check_boto3=False,
    )
    if module._name == 'route53_facts':
        module.deprecate(
            "The 'route53_facts' module has been renamed to 'route53_info'",
            date='2021-12-01',
            collection_name='community.aws')

    try:
        route53 = module.client('route53')
    except (botocore.exceptions.ClientError,
            botocore.exceptions.BotoCoreError) as e:
        module.fail_json_aws(e, msg='Failed to connect to AWS')

    invocations = {
        'change': change_details,
        'checker_ip_range': checker_ip_range_details,
        'health_check': health_check_details,
        'hosted_zone': hosted_zone_details,
        'record_sets': record_sets_details,
        'reusable_delegation_set': reusable_delegation_set_details,
    }

    results = dict(changed=False)
    try:
        results = invocations[module.params.get('query')](route53, module)
    except (botocore.exceptions.ClientError,
            botocore.exceptions.BotoCoreError) as e:
        module.fail_json(msg=to_native(e))

    module.exit_json(**results)
Пример #20
0
def main():
    argument_spec = dict(
        subnet_id=dict(type='str'),
        eip_address=dict(type='str'),
        allocation_id=dict(type='str'),
        if_exist_do_not_create=dict(type='bool', default=False),
        state=dict(default='present', choices=['present', 'absent']),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=320, required=False),
        release_eip=dict(type='bool', default=False),
        nat_gateway_id=dict(type='str'),
        client_token=dict(type='str'),
        tags=dict(required=False, type='dict', aliases=['resource_tags']),
        purge_tags=dict(default=True, type='bool'),
    )
    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        mutually_exclusive=[['allocation_id', 'eip_address']],
        required_if=[['state', 'absent', ['nat_gateway_id']],
                     ['state', 'present', ['subnet_id']]],
    )

    state = module.params.get('state').lower()
    check_mode = module.check_mode
    subnet_id = module.params.get('subnet_id')
    allocation_id = module.params.get('allocation_id')
    eip_address = module.params.get('eip_address')
    nat_gateway_id = module.params.get('nat_gateway_id')
    wait = module.params.get('wait')
    wait_timeout = module.params.get('wait_timeout')
    release_eip = module.params.get('release_eip')
    client_token = module.params.get('client_token')
    if_exist_do_not_create = module.params.get('if_exist_do_not_create')
    tags = module.params.get('tags')
    purge_tags = module.params.get('purge_tags')

    try:
        client = module.client('ec2',
                               retry_decorator=AWSRetry.jittered_backoff())
    except (botocore.exceptions.ClientError,
            botocore.exceptions.BotoCoreError) as e:
        module.fail_json_aws(e, msg='Failed to connect to AWS')

    changed = False
    err_msg = ''

    if state == 'present':
        success, changed, err_msg, results = (pre_create(
            client,
            module,
            subnet_id,
            tags,
            purge_tags,
            allocation_id,
            eip_address,
            if_exist_do_not_create,
            wait,
            wait_timeout,
            client_token,
            check_mode=check_mode))
    else:
        success, changed, err_msg, results = (remove(client,
                                                     nat_gateway_id,
                                                     wait,
                                                     wait_timeout,
                                                     release_eip,
                                                     check_mode=check_mode))

    if not success:
        module.fail_json(msg=err_msg, success=success, changed=changed)
    else:
        module.exit_json(msg=err_msg,
                         success=success,
                         changed=changed,
                         **results)
Пример #21
0
def main():
    argument_spec = dict(
        name=dict(required=True),
        description=dict(),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=900),
        state=dict(default='present', choices=['present', 'absent']),
        purge_stacks=dict(type='bool', default=True),
        parameters=dict(type='dict', default={}),
        template=dict(type='path'),
        template_url=dict(),
        template_body=dict(),
        capabilities=dict(type='list',
                          choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']),
        regions=dict(type='list'),
        accounts=dict(type='list'),
        failure_tolerance=dict(
            type='dict',
            default={},
            options=dict(
                fail_count=dict(type='int'),
                fail_percentage=dict(type='int'),
                parallel_percentage=dict(type='int'),
                parallel_count=dict(type='int'),
            ),
            mutually_exclusive=[
                ['fail_count', 'fail_percentage'],
                ['parallel_count', 'parallel_percentage'],
            ],
        ),
        administration_role_arn=dict(
            aliases=['admin_role_arn', 'administration_role', 'admin_role']),
        execution_role_name=dict(
            aliases=['execution_role', 'exec_role', 'exec_role_name']),
        tags=dict(type='dict'),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_url', 'template', 'template_body']],
        supports_check_mode=True)
    if not (module.boto3_at_least('1.6.0')
            and module.botocore_at_least('1.10.26')):
        module.fail_json(
            msg=
            "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26"
        )

    # Wrap the cloudformation client methods that this module uses with
    # automatic backoff / retry for throttling error codes
    jittered_backoff_decorator = AWSRetry.jittered_backoff(
        retries=10,
        delay=3,
        max_delay=30,
        catch_extra_error_codes=['StackSetNotFound'])
    cfn = module.client('cloudformation',
                        retry_decorator=jittered_backoff_decorator)
    existing_stack_set = stack_set_facts(cfn, module.params['name'])

    operation_uuid = to_native(uuid.uuid4())
    operation_ids = []
    # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
    stack_params = {}
    state = module.params['state']
    if state == 'present' and not module.params['accounts']:
        module.fail_json(
            msg=
            "Can't create a stack set without choosing at least one account. "
            "To get the ID of the current account, use the aws_caller_info module."
        )

    module.params['accounts'] = [
        to_native(a) for a in module.params['accounts']
    ]

    stack_params['StackSetName'] = module.params['name']
    if module.params.get('description'):
        stack_params['Description'] = module.params['description']

    if module.params.get('capabilities'):
        stack_params['Capabilities'] = module.params['capabilities']

    if module.params['template'] is not None:
        with open(module.params['template'], 'r') as tpl:
            stack_params['TemplateBody'] = tpl.read()
    elif module.params['template_body'] is not None:
        stack_params['TemplateBody'] = module.params['template_body']
    elif module.params['template_url'] is not None:
        stack_params['TemplateURL'] = module.params['template_url']
    else:
        # no template is provided, but if the stack set exists already, we can use the existing one.
        if existing_stack_set:
            stack_params['UsePreviousTemplate'] = True
        else:
            module.fail_json(
                msg=
                "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, "
                "`template_body`, or `template_url`".format(
                    module.params['name']))

    stack_params['Parameters'] = []
    for k, v in module.params['parameters'].items():
        if isinstance(v, dict):
            # set parameter based on a dict to allow additional CFN Parameter Attributes
            param = dict(ParameterKey=k)

            if 'value' in v:
                param['ParameterValue'] = to_native(v['value'])

            if 'use_previous_value' in v and bool(v['use_previous_value']):
                param['UsePreviousValue'] = True
                param.pop('ParameterValue', None)

            stack_params['Parameters'].append(param)
        else:
            # allow default k/v configuration to set a template parameter
            stack_params['Parameters'].append({
                'ParameterKey': k,
                'ParameterValue': str(v)
            })

    if module.params.get('tags') and isinstance(module.params.get('tags'),
                                                dict):
        stack_params['Tags'] = ansible_dict_to_boto3_tag_list(
            module.params['tags'])

    if module.params.get('administration_role_arn'):
        # TODO loosen the semantics here to autodetect the account ID and build the ARN
        stack_params['AdministrationRoleARN'] = module.params[
            'administration_role_arn']
    if module.params.get('execution_role_name'):
        stack_params['ExecutionRoleName'] = module.params[
            'execution_role_name']

    result = {}

    if module.check_mode:
        if state == 'absent' and existing_stack_set:
            module.exit_json(changed=True,
                             msg='Stack set would be deleted',
                             meta=[])
        elif state == 'absent' and not existing_stack_set:
            module.exit_json(changed=False,
                             msg='Stack set doesn\'t exist',
                             meta=[])
        elif state == 'present' and not existing_stack_set:
            module.exit_json(changed=True,
                             msg='New stack set would be created',
                             meta=[])
        elif state == 'present' and existing_stack_set:
            new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances(
                cfn,
                module.params['name'],
                module.params['accounts'],
                module.params['regions'],
            )
            if new_stacks:
                module.exit_json(changed=True,
                                 msg='New stack instance(s) would be created',
                                 meta=[])
            elif unspecified_stacks and module.params.get(
                    'purge_stack_instances'):
                module.exit_json(changed=True,
                                 msg='Old stack instance(s) would be deleted',
                                 meta=[])
        else:
            # TODO: need to check the template and other settings for correct check mode
            module.exit_json(changed=False, msg='No changes detected', meta=[])

    changed = False
    if state == 'present':
        if not existing_stack_set:
            # on create this parameter has a different name, and cannot be referenced later in the job log
            stack_params[
                'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format(
                    operation_uuid)
            changed = True
            create_stack_set(module, stack_params, cfn)
        else:
            stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format(
                operation_uuid)
            operation_ids.append(stack_params['OperationId'])
            if module.params.get('regions'):
                stack_params[
                    'OperationPreferences'] = get_operation_preferences(module)
            changed |= update_stack_set(module, stack_params, cfn)

        # now create/update any appropriate stack instances
        new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances(
            cfn,
            module.params['name'],
            module.params['accounts'],
            module.params['regions'],
        )
        if new_stack_instances:
            operation_ids.append(
                'Ansible-StackInstance-Create-{0}'.format(operation_uuid))
            changed = True
            cfn.create_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(set(acct
                                  for acct, region in new_stack_instances)),
                Regions=list(
                    set(region for acct, region in new_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        else:
            operation_ids.append(
                'Ansible-StackInstance-Update-{0}'.format(operation_uuid))
            cfn.update_stack_instances(
                StackSetName=module.params['name'],
                Accounts=list(
                    set(acct for acct, region in existing_stack_instances)),
                Regions=list(
                    set(region for acct, region in existing_stack_instances)),
                OperationPreferences=get_operation_preferences(module),
                OperationId=operation_ids[-1],
            )
        for op in operation_ids:
            await_stack_set_operation(
                module,
                cfn,
                operation_id=op,
                stack_set_name=module.params['name'],
                max_wait=module.params.get('wait_timeout'),
            )

    elif state == 'absent':
        if not existing_stack_set:
            module.exit_json(msg='Stack set {0} does not exist'.format(
                module.params['name']))
        if module.params.get('purge_stack_instances') is False:
            pass
        try:
            cfn.delete_stack_set(StackSetName=module.params['name'], )
            module.exit_json(
                msg='Stack set {0} deleted'.format(module.params['name']))
        except is_boto3_error_code('OperationInProgressException') as e:  # pylint: disable=duplicate-except
            module.fail_json_aws(
                e,
                msg=
                'Cannot delete stack {0} while there is an operation in progress'
                .format(module.params['name']))
        except is_boto3_error_code('StackSetNotEmptyException'):  # pylint: disable=duplicate-except
            delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format(
                operation_uuid)
            cfn.delete_stack_instances(
                StackSetName=module.params['name'],
                Accounts=module.params['accounts'],
                Regions=module.params['regions'],
                RetainStacks=(not module.params.get('purge_stacks')),
                OperationId=delete_instances_op)
            await_stack_set_operation(
                module,
                cfn,
                operation_id=delete_instances_op,
                stack_set_name=stack_params['StackSetName'],
                max_wait=module.params.get('wait_timeout'),
            )
            try:
                cfn.delete_stack_set(StackSetName=module.params['name'], )
            except is_boto3_error_code('StackSetNotEmptyException') as exc:  # pylint: disable=duplicate-except
                # this time, it is likely that either the delete failed or there are more stacks.
                instances = cfn.list_stack_instances(
                    StackSetName=module.params['name'], )
                stack_states = ', '.join(
                    '(account={Account}, region={Region}, state={Status})'.
                    format(**i) for i in instances['Summaries'])
                module.fail_json_aws(
                    exc,
                    msg=
                    'Could not purge all stacks, or not all accounts/regions were chosen for deletion: '
                    + stack_states)
            module.exit_json(changed=True,
                             msg='Stack set {0} deleted'.format(
                                 module.params['name']))

    result.update(**describe_stack_tree(
        module, stack_params['StackSetName'], operation_ids=operation_ids))
    if any(o['status'] == 'FAILED' for o in result['operations']):
        module.fail_json(msg="One or more operations failed to execute",
                         **result)
    module.exit_json(changed=changed, **result)
Пример #22
0
def main():
    argument_spec = dict(
        operation=dict(required=True, choices=['run', 'start', 'stop']),
        cluster=dict(required=True, type='str'),  # R S P
        task_definition=dict(required=False, type='str'),  # R* S*
        overrides=dict(required=False, type='dict'),  # R S
        count=dict(required=False, type='int'),  # R
        task=dict(required=False, type='str'),  # P*
        container_instances=dict(required=False, type='list',
                                 elements='str'),  # S*
        started_by=dict(required=False, type='str'),  # R S
        network_configuration=dict(required=False, type='dict'),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        tags=dict(required=False, type='dict'))

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_if=[
            ('launch_type', 'FARGATE', ['network_configuration']),
            ('operation', 'run', ['task_definition']),
            ('operation', 'start', ['task_definition', 'container_instances']),
            ('operation', 'stop', ['task_definition', 'task']),
        ])

    # Validate Inputs
    if module.params['operation'] == 'run':
        task_to_list = module.params['task_definition']
        status_type = "RUNNING"

    if module.params['operation'] == 'start':
        task_to_list = module.params['task']
        status_type = "RUNNING"

    if module.params['operation'] == 'stop':
        task_to_list = module.params['task_definition']
        status_type = "STOPPED"

    service_mgr = EcsExecManager(module)

    if module.params['tags']:
        if not service_mgr.ecs_task_long_format_enabled():
            module.fail_json(
                msg=
                "Cannot set task tags: long format task arns are required to set tags"
            )

    existing = service_mgr.list_tasks(module.params['cluster'], task_to_list,
                                      status_type)

    results = dict(changed=False)
    if module.params['operation'] == 'run':
        if existing:
            # TBD - validate the rest of the details
            results['task'] = existing
        else:
            if not module.check_mode:
                results['task'] = service_mgr.run_task(
                    module.params['cluster'],
                    module.params['task_definition'],
                    module.params['overrides'],
                    module.params['count'],
                    module.params['started_by'],
                    module.params['launch_type'],
                    module.params['tags'],
                )
            results['changed'] = True

    elif module.params['operation'] == 'start':
        if existing:
            # TBD - validate the rest of the details
            results['task'] = existing
        else:
            if not module.check_mode:
                results['task'] = service_mgr.start_task(
                    module.params['cluster'],
                    module.params['task_definition'],
                    module.params['overrides'],
                    module.params['container_instances'],
                    module.params['started_by'],
                    module.params['tags'],
                )
            results['changed'] = True

    elif module.params['operation'] == 'stop':
        if existing:
            results['task'] = existing
        else:
            if not module.check_mode:
                # it exists, so we should delete it and mark changed.
                # return info about the cluster deleted
                results['task'] = service_mgr.stop_task(
                    module.params['cluster'], module.params['task'])
            results['changed'] = True

    module.exit_json(**results)
def main():
    template_options = dict(
        block_device_mappings=dict(
            type='list',
            elements='dict',
            options=dict(
                device_name=dict(),
                ebs=dict(
                    type='dict',
                    options=dict(
                        delete_on_termination=dict(type='bool'),
                        encrypted=dict(type='bool'),
                        iops=dict(type='int'),
                        kms_key_id=dict(),
                        snapshot_id=dict(),
                        volume_size=dict(type='int'),
                        volume_type=dict(),
                    ),
                ),
                no_device=dict(),
                virtual_name=dict(),
            ),
        ),
        cpu_options=dict(
            type='dict',
            options=dict(
                core_count=dict(type='int'),
                threads_per_core=dict(type='int'),
            ),
        ),
        credit_specification=dict(
            dict(type='dict'),
            options=dict(
                cpu_credits=dict(),
            ),
        ),
        disable_api_termination=dict(type='bool'),
        ebs_optimized=dict(type='bool'),
        elastic_gpu_specifications=dict(
            options=dict(type=dict()),
            type='list',
            elements='dict',
        ),
        iam_instance_profile=dict(),
        image_id=dict(),
        instance_initiated_shutdown_behavior=dict(choices=['stop', 'terminate']),
        instance_market_options=dict(
            type='dict',
            options=dict(
                market_type=dict(),
                spot_options=dict(
                    type='dict',
                    options=dict(
                        block_duration_minutes=dict(type='int'),
                        instance_interruption_behavior=dict(choices=['hibernate', 'stop', 'terminate']),
                        max_price=dict(),
                        spot_instance_type=dict(choices=['one-time', 'persistent']),
                    ),
                ),
            ),
        ),
        instance_type=dict(),
        kernel_id=dict(),
        key_name=dict(),
        monitoring=dict(
            type='dict',
            options=dict(
                enabled=dict(type='bool')
            ),
        ),
        metadata_options=dict(
            type='dict',
            options=dict(
                http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'),
                http_put_response_hop_limit=dict(type='int', default=1),
                http_tokens=dict(choices=['optional', 'required'], default='optional')
            )
        ),
        network_interfaces=dict(
            type='list',
            elements='dict',
            options=dict(
                associate_public_ip_address=dict(type='bool'),
                delete_on_termination=dict(type='bool'),
                description=dict(),
                device_index=dict(type='int'),
                groups=dict(type='list', elements='str'),
                ipv6_address_count=dict(type='int'),
                ipv6_addresses=dict(type='list', elements='str'),
                network_interface_id=dict(),
                private_ip_address=dict(),
                subnet_id=dict(),
            ),
        ),
        placement=dict(
            options=dict(
                affinity=dict(),
                availability_zone=dict(),
                group_name=dict(),
                host_id=dict(),
                tenancy=dict(),
            ),
            type='dict',
        ),
        ram_disk_id=dict(),
        security_group_ids=dict(type='list', elements='str'),
        security_groups=dict(type='list', elements='str'),
        tags=dict(type='dict'),
        user_data=dict(),
    )

    arg_spec = dict(
        state=dict(choices=['present', 'absent'], default='present'),
        template_name=dict(aliases=['name']),
        template_id=dict(aliases=['id']),
        default_version=dict(default='latest'),
    )

    arg_spec.update(template_options)

    module = AnsibleAWSModule(
        argument_spec=arg_spec,
        required_one_of=[
            ('template_name', 'template_id')
        ],
        supports_check_mode=True
    )

    if not module.boto3_at_least('1.6.0'):
        module.fail_json(msg="ec2_launch_template requires boto3 >= 1.6.0")

    for interface in (module.params.get('network_interfaces') or []):
        if interface.get('ipv6_addresses'):
            interface['ipv6_addresses'] = [{'ipv6_address': x} for x in interface['ipv6_addresses']]

    if module.params.get('state') == 'present':
        out = create_or_update(module, template_options)
        out.update(format_module_output(module))
    elif module.params.get('state') == 'absent':
        out = delete_template(module)
    else:
        module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get('state')))

    module.exit_json(**out)
Пример #24
0
def main():
    argument_spec = dict(
        api_id=dict(type='str', required=False),
        state=dict(type='str',
                   default='present',
                   choices=['present', 'absent']),
        swagger_file=dict(type='path',
                          default=None,
                          aliases=['src', 'api_file']),
        swagger_dict=dict(type='json', default=None),
        swagger_text=dict(type='str', default=None),
        stage=dict(type='str', default=None),
        deploy_desc=dict(type='str',
                         default="Automatic deployment by Ansible."),
        cache_enabled=dict(type='bool', default=False),
        cache_size=dict(type='str',
                        default='0.5',
                        choices=[
                            '0.5', '1.6', '6.1', '13.5', '28.4', '58.2', '118',
                            '237'
                        ]),
        stage_variables=dict(type='dict', default={}),
        stage_canary_settings=dict(type='dict', default={}),
        tracing_enabled=dict(type='bool', default=False),
        endpoint_type=dict(type='str',
                           default='EDGE',
                           choices=['EDGE', 'REGIONAL', 'PRIVATE']))

    mutually_exclusive = [['swagger_file', 'swagger_dict',
                           'swagger_text']]  # noqa: F841

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=False,
        mutually_exclusive=mutually_exclusive,
    )

    api_id = module.params.get('api_id')
    state = module.params.get('state')  # noqa: F841
    swagger_file = module.params.get('swagger_file')
    swagger_dict = module.params.get('swagger_dict')
    swagger_text = module.params.get('swagger_text')
    endpoint_type = module.params.get('endpoint_type')

    client = module.client('apigateway')

    changed = True  # for now it will stay that way until we can sometimes avoid change
    conf_res = None
    dep_res = None
    del_res = None

    if state == "present":
        if api_id is None:
            api_id = create_empty_api(module, client, endpoint_type)
        api_data = get_api_definitions(module,
                                       swagger_file=swagger_file,
                                       swagger_dict=swagger_dict,
                                       swagger_text=swagger_text)
        conf_res, dep_res = ensure_api_in_correct_state(
            module, client, api_id, api_data)
    if state == "absent":
        del_res = delete_rest_api(module, client, api_id)

    exit_args = {"changed": changed, "api_id": api_id}

    if conf_res is not None:
        exit_args['configure_response'] = camel_dict_to_snake_dict(conf_res)
    if dep_res is not None:
        exit_args['deploy_response'] = camel_dict_to_snake_dict(dep_res)
    if del_res is not None:
        exit_args['delete_response'] = camel_dict_to_snake_dict(del_res)

    module.exit_json(**exit_args)
Пример #25
0
def main():
    argument_spec = dict(
        state=dict(type='str',
                   required=True,
                   choices=['absent', 'create', 'delete', 'get', 'present'],
                   aliases=['command']),
        zone=dict(type='str'),
        hosted_zone_id=dict(type='str'),
        record=dict(type='str', required=True),
        ttl=dict(type='int', default=3600),
        type=dict(type='str',
                  required=True,
                  choices=[
                      'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA',
                      'SPF', 'SRV', 'TXT'
                  ]),
        alias=dict(type='bool'),
        alias_hosted_zone_id=dict(type='str'),
        alias_evaluate_target_health=dict(type='bool', default=False),
        value=dict(type='list', elements='str'),
        overwrite=dict(type='bool'),
        retry_interval=dict(type='int', default=500),
        private_zone=dict(type='bool', default=False),
        identifier=dict(type='str'),
        weight=dict(type='int'),
        region=dict(type='str'),
        health_check=dict(type='str'),
        failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']),
        vpc_id=dict(type='str'),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=300),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_one_of=[['zone', 'hosted_zone_id']],
        # If alias is True then you must specify alias_hosted_zone as well
        required_together=[['alias', 'alias_hosted_zone_id']],
        # state=present, absent, create, delete THEN value is required
        required_if=(
            ('state', 'present', ['value']),
            ('state', 'create', ['value']),
        ),
        # failover, region and weight are mutually exclusive
        mutually_exclusive=[
            ('failover', 'region', 'weight'),
            ('alias', 'ttl'),
        ],
        # failover, region and weight require identifier
        required_by=dict(
            failover=('identifier', ),
            region=('identifier', ),
            weight=('identifier', ),
        ),
    )

    if module.params['state'] in ('present', 'create'):
        command_in = 'create'
    elif module.params['state'] in ('absent', 'delete'):
        command_in = 'delete'
    elif module.params['state'] == 'get':
        command_in = 'get'

    zone_in = (module.params.get('zone') or '').lower()
    hosted_zone_id_in = module.params.get('hosted_zone_id')
    ttl_in = module.params.get('ttl')
    record_in = module.params.get('record').lower()
    type_in = module.params.get('type')
    value_in = module.params.get('value') or []
    alias_in = module.params.get('alias')
    alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
    alias_evaluate_target_health_in = module.params.get(
        'alias_evaluate_target_health')
    retry_interval_in = module.params.get('retry_interval')

    if module.params['vpc_id'] is not None:
        private_zone_in = True
    else:
        private_zone_in = module.params.get('private_zone')

    identifier_in = module.params.get('identifier')
    weight_in = module.params.get('weight')
    region_in = module.params.get('region')
    health_check_in = module.params.get('health_check')
    failover_in = module.params.get('failover')
    vpc_id_in = module.params.get('vpc_id')
    wait_in = module.params.get('wait')
    wait_timeout_in = module.params.get('wait_timeout')

    if zone_in[-1:] != '.':
        zone_in += "."

    if record_in[-1:] != '.':
        record_in += "."

    if command_in == 'create' or command_in == 'delete':
        if alias_in and len(value_in) != 1:
            module.fail_json(
                msg=
                "parameter 'value' must contain a single dns name for alias records"
            )
        if (weight_in is None and region_in is None
                and failover_in is None) and identifier_in is not None:
            module.fail_json(
                msg=
                "You have specified identifier which makes sense only if you specify one of: weight, region or failover."
            )

    retry_decorator = AWSRetry.jittered_backoff(
        retries=MAX_AWS_RETRIES,
        delay=retry_interval_in,
        catch_extra_error_codes=['PriorRequestNotComplete'],
        max_delay=max(60, retry_interval_in),
    )

    # connect to the route53 endpoint
    try:
        route53 = module.client('route53', retry_decorator=retry_decorator)
    except botocore.exceptions.HTTPClientError as e:
        module.fail_json_aws(e, msg='Failed to connect to AWS')

    # Find the named zone ID
    zone_id = hosted_zone_id_in or get_zone_id_by_name(
        route53, module, zone_in, private_zone_in, vpc_id_in)

    # Verify that the requested zone is already defined in Route53
    if zone_id is None:
        errmsg = "Zone %s does not exist in Route53" % (zone_in
                                                        or hosted_zone_id_in)
        module.fail_json(msg=errmsg)

    aws_record = get_record(route53, zone_id, record_in, type_in,
                            identifier_in)

    resource_record_set = scrub_none_parameters({
        'Name':
        record_in,
        'Type':
        type_in,
        'Weight':
        weight_in,
        'Region':
        region_in,
        'Failover':
        failover_in,
        'TTL':
        ttl_in,
        'ResourceRecords': [dict(Value=value) for value in value_in],
        'HealthCheckId':
        health_check_in,
        'SetIdentifier':
        identifier_in,
    })
    if command_in == 'delete' and aws_record is not None:
        resource_record_set['TTL'] = aws_record.get('TTL')
        if not resource_record_set['ResourceRecords']:
            resource_record_set['ResourceRecords'] = aws_record.get(
                'ResourceRecords')

    if alias_in:
        resource_record_set['AliasTarget'] = dict(
            HostedZoneId=alias_hosted_zone_id_in,
            DNSName=value_in[0],
            EvaluateTargetHealth=alias_evaluate_target_health_in)
        if 'ResourceRecords' in resource_record_set:
            del resource_record_set['ResourceRecords']
        if 'TTL' in resource_record_set:
            del resource_record_set['TTL']

    # On CAA records order doesn't matter
    if type_in == 'CAA':
        resource_record_set['ResourceRecords'] = sorted(
            resource_record_set['ResourceRecords'], key=itemgetter('Value'))
        if aws_record:
            aws_record['ResourceRecords'] = sorted(
                aws_record['ResourceRecords'], key=itemgetter('Value'))

    if command_in == 'create' and aws_record == resource_record_set:
        rr_sets = [camel_dict_to_snake_dict(resource_record_set)]
        module.exit_json(changed=False, resource_records_sets=rr_sets)

    if command_in == 'get':
        if type_in == 'NS':
            ns = aws_record.get('values', [])
        else:
            # Retrieve name servers associated to the zone.
            ns = get_hosted_zone_nameservers(route53, zone_id)

        formatted_aws = format_record(aws_record, zone_in, zone_id)

        if formatted_aws is None:
            # record does not exist
            module.exit_json(changed=False,
                             set=[],
                             nameservers=ns,
                             resource_record_sets=[])

        rr_sets = [camel_dict_to_snake_dict(aws_record)]
        module.exit_json(changed=False,
                         set=formatted_aws,
                         nameservers=ns,
                         resource_record_sets=rr_sets)

    if command_in == 'delete' and not aws_record:
        module.exit_json(changed=False)

    if command_in == 'create' or command_in == 'delete':
        if command_in == 'create' and aws_record:
            if not module.params['overwrite']:
                module.fail_json(
                    msg=
                    "Record already exists with different value. Set 'overwrite' to replace it"
                )
            command = 'UPSERT'
        else:
            command = command_in.upper()

    if not module.check_mode:
        try:
            change_resource_record_sets = route53.change_resource_record_sets(
                aws_retry=True,
                HostedZoneId=zone_id,
                ChangeBatch=dict(Changes=[
                    dict(Action=command, ResourceRecordSet=resource_record_set)
                ]))

            if wait_in:
                waiter = get_waiter(route53, 'resource_record_sets_changed')
                waiter.wait(Id=change_resource_record_sets['ChangeInfo']['Id'],
                            WaiterConfig=dict(
                                Delay=WAIT_RETRY,
                                MaxAttempts=wait_timeout_in // WAIT_RETRY,
                            ))
        except is_boto3_error_message('but it already exists'):
            module.exit_json(changed=False)
        except botocore.exceptions.WaiterError as e:
            module.fail_json_aws(
                e,
                msg='Timeout waiting for resource records changes to be applied'
            )
        except (botocore.exceptions.BotoCoreError,
                botocore.exceptions.ClientError) as e:  # pylint: disable=duplicate-except
            module.fail_json_aws(e, msg='Failed to update records')
        except Exception as e:
            module.fail_json(msg='Unhandled exception. (%s)' % to_native(e))

    rr_sets = [camel_dict_to_snake_dict(resource_record_set)]
    formatted_aws = format_record(aws_record, zone_in, zone_id)
    formatted_record = format_record(resource_record_set, zone_in, zone_id)

    module.exit_json(
        changed=True,
        diff=dict(
            before=formatted_aws,
            after=formatted_record if command_in != 'delete' else {},
            resource_record_sets=rr_sets,
        ),
    )
Пример #26
0
def main():
    argument_spec = dict(
        operation=dict(required=True, choices=['run', 'start', 'stop']),
        cluster=dict(required=False, type='str'),  # R S P
        task_definition=dict(required=False, type='str'),  # R* S*
        overrides=dict(required=False, type='dict'),  # R S
        count=dict(required=False, type='int'),  # R
        task=dict(required=False, type='str'),  # P*
        container_instances=dict(required=False, type='list',
                                 elements='str'),  # S*
        started_by=dict(required=False, type='str'),  # R S
        network_configuration=dict(required=False, type='dict'),
        launch_type=dict(required=False, choices=['EC2', 'FARGATE']),
        tags=dict(required=False, type='dict'))

    module = AnsibleAWSModule(argument_spec=argument_spec,
                              supports_check_mode=True,
                              required_if=[('launch_type', 'FARGATE',
                                            ['network_configuration'])])

    # Validate Inputs
    if module.params['operation'] == 'run':
        if 'task_definition' not in module.params and module.params[
                'task_definition'] is None:
            module.fail_json(
                msg="To run a task, a task_definition must be specified")
        task_to_list = module.params['task_definition']
        status_type = "RUNNING"

    if module.params['operation'] == 'start':
        if 'task_definition' not in module.params and module.params[
                'task_definition'] is None:
            module.fail_json(
                msg="To start a task, a task_definition must be specified")
        if 'container_instances' not in module.params and module.params[
                'container_instances'] is None:
            module.fail_json(
                msg="To start a task, container instances must be specified")
        task_to_list = module.params['task']
        status_type = "RUNNING"

    if module.params['operation'] == 'stop':
        if 'task' not in module.params and module.params['task'] is None:
            module.fail_json(msg="To stop a task, a task must be specified")
        if 'task_definition' not in module.params and module.params[
                'task_definition'] is None:
            module.fail_json(
                msg="To stop a task, a task definition must be specified")
        task_to_list = module.params['task_definition']
        status_type = "STOPPED"

    service_mgr = EcsExecManager(module)

    if module.params[
            'network_configuration'] and not service_mgr.ecs_api_handles_network_configuration(
            ):
        module.fail_json(
            msg=
            'botocore needs to be version 1.7.44 or higher to use network configuration'
        )

    if module.params[
            'launch_type'] and not service_mgr.ecs_api_handles_launch_type():
        module.fail_json(
            msg=
            'botocore needs to be version 1.8.4 or higher to use launch type')

    if module.params['tags']:
        if not service_mgr.ecs_api_handles_tags():
            module.fail_json(msg=missing_required_lib("botocore >= 1.12.46",
                                                      reason="to use tags"))
        if not service_mgr.ecs_task_long_format_enabled():
            module.fail_json(
                msg=
                "Cannot set task tags: long format task arns are required to set tags"
            )

    existing = service_mgr.list_tasks(module.params['cluster'], task_to_list,
                                      status_type)

    results = dict(changed=False)
    if module.params['operation'] == 'run':
        if existing:
            # TBD - validate the rest of the details
            results['task'] = existing
        else:
            if not module.check_mode:
                results['task'] = service_mgr.run_task(
                    module.params['cluster'],
                    module.params['task_definition'],
                    module.params['overrides'],
                    module.params['count'],
                    module.params['started_by'],
                    module.params['launch_type'],
                    module.params['tags'],
                )
            results['changed'] = True

    elif module.params['operation'] == 'start':
        if existing:
            # TBD - validate the rest of the details
            results['task'] = existing
        else:
            if not module.check_mode:
                results['task'] = service_mgr.start_task(
                    module.params['cluster'],
                    module.params['task_definition'],
                    module.params['overrides'],
                    module.params['container_instances'],
                    module.params['started_by'],
                    module.params['tags'],
                )
            results['changed'] = True

    elif module.params['operation'] == 'stop':
        if existing:
            results['task'] = existing
        else:
            if not module.check_mode:
                # it exists, so we should delete it and mark changed.
                # return info about the cluster deleted
                results['task'] = service_mgr.stop_task(
                    module.params['cluster'], module.params['task'])
            results['changed'] = True

    module.exit_json(**results)
Пример #27
0
def main():
    argument_spec = dict(
        alias=dict(aliases=['key_alias']),
        policy_mode=dict(aliases=['mode'],
                         choices=['grant', 'deny'],
                         default='grant'),
        policy_role_name=dict(aliases=['role_name']),
        policy_role_arn=dict(aliases=['role_arn']),
        policy_grant_types=dict(aliases=['grant_types'],
                                type='list',
                                elements='str'),
        policy_clean_invalid_entries=dict(aliases=['clean_invalid_entries'],
                                          type='bool',
                                          default=True),
        pending_window=dict(aliases=['deletion_delay'], type='int'),
        key_id=dict(aliases=['key_arn']),
        description=dict(),
        enabled=dict(type='bool', default=True),
        tags=dict(type='dict', default={}),
        purge_tags=dict(type='bool', default=False),
        grants=dict(type='list', default=[], elements='dict'),
        policy=dict(type='json'),
        purge_grants=dict(type='bool', default=False),
        state=dict(default='present', choices=['present', 'absent']),
        enable_key_rotation=(dict(type='bool')),
        key_spec=dict(type='str',
                      default='SYMMETRIC_DEFAULT',
                      aliases=['customer_master_key_spec'],
                      choices=[
                          'SYMMETRIC_DEFAULT', 'RSA_2048', 'RSA_3072',
                          'RSA_4096', 'ECC_NIST_P256', 'ECC_NIST_P384',
                          'ECC_NIST_P521', 'ECC_SECG_P256K1'
                      ]),
        key_usage=dict(type='str',
                       default='ENCRYPT_DECRYPT',
                       choices=['ENCRYPT_DECRYPT', 'SIGN_VERIFY']),
    )

    module = AnsibleAWSModule(
        supports_check_mode=True,
        argument_spec=argument_spec,
        required_one_of=[['alias', 'key_id']],
    )

    mode = module.params['policy_mode']

    kms = module.client('kms')

    key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'),
                                      module.params.get('alias'))
    # We can't create keys with a specific ID, if we can't access the key we'll have to fail
    if module.params.get('state') == 'present' and module.params.get(
            'key_id') and not key_metadata:
        module.fail_json(msg="Could not find key with id %s to update")

    if module.params.get('policy_grant_types') or mode == 'deny':
        module.deprecate(
            'Managing the KMS IAM Policy via policy_mode and policy_grant_types is fragile'
            ' and has been deprecated in favour of the policy option.',
            date='2021-12-01',
            collection_name='community.aws')
        result = update_policy_grants(kms, module, key_metadata, mode)
        module.exit_json(**result)

    if module.params.get('state') == 'absent':
        if key_metadata is None:
            module.exit_json(changed=False)
        result = delete_key(kms, module, key_metadata)
        module.exit_json(**result)

    if key_metadata:
        key_details = get_key_details(kms, module, key_metadata['Arn'])
        result = update_key(kms, module, key_details)
        module.exit_json(**result)

    result = create_key(kms, module)
    module.exit_json(**result)
Пример #28
0
def main():
    argument_spec = dict(
        state=dict(required=True, choices=['present', 'absent']),
        name=dict(),
        link_aggregation_group_id=dict(),
        num_connections=dict(type='int'),
        min_links=dict(type='int'),
        location=dict(),
        bandwidth=dict(),
        connection_id=dict(),
        delete_with_disassociation=dict(type='bool', default=False),
        force_delete=dict(type='bool', default=False),
        wait=dict(type='bool', default=False),
        wait_timeout=dict(type='int', default=120),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_one_of=[('link_aggregation_group_id', 'name')],
        required_if=[('state', 'present', ('location', 'bandwidth'))],
    )

    try:
        connection = module.client('directconnect')
    except (botocore.exceptions.ClientError,
            botocore.exceptions.BotoCoreError) as e:
        module.fail_json_aws(e, msg='Failed to connect to AWS')

    state = module.params.get('state')
    response = {}
    try:
        if state == 'present':
            changed, lag_id = ensure_present(
                connection,
                num_connections=module.params.get("num_connections"),
                lag_id=module.params.get("link_aggregation_group_id"),
                lag_name=module.params.get("name"),
                location=module.params.get("location"),
                bandwidth=module.params.get("bandwidth"),
                connection_id=module.params.get("connection_id"),
                min_links=module.params.get("min_links"),
                wait=module.params.get("wait"),
                wait_timeout=module.params.get("wait_timeout"))
            response = lag_status(connection, lag_id)
        elif state == "absent":
            changed = ensure_absent(
                connection,
                lag_id=module.params.get("link_aggregation_group_id"),
                lag_name=module.params.get("name"),
                force_delete=module.params.get("force_delete"),
                delete_with_disassociation=module.params.get(
                    "delete_with_disassociation"),
                wait=module.params.get('wait'),
                wait_timeout=module.params.get('wait_timeout'))
    except DirectConnectError as e:
        if e.last_traceback:
            module.fail_json(msg=e.msg,
                             exception=e.last_traceback,
                             **camel_dict_to_snake_dict(e.exception))
        else:
            module.fail_json(msg=e.msg)

    module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main():
    argument_spec = dict(
        id=dict(required=False, aliases=['network_id']),
        proposal_id=dict(required=False),
        list_invitations=dict(required=False, type=bool),
        list_members=dict(required=False, type=bool),
        list_networks=dict(required=False, type=bool),
        list_nodes=dict(required=False, type=bool),
        list_proposal_votes=dict(required=False, type=bool),
        list_proposals=dict(required=False, type=bool),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=(
            ('list_members', True, ['id']),
            ('list_nodes', True, ['id']),
            ('list_proposal_votes', True, ['id', 'proposal_id']),
            ('list_proposals', True, ['id']),
        ),
        mutually_exclusive=[(
            'list_invitations',
            'list_members',
            'list_networks',
            'list_nodes',
            'list_proposal_votes',
            'list_proposals',
        )],
    )

    client = module.client('managedblockchain',
                           retry_decorator=AWSRetry.exponential_backoff())
    it, paginate = _managedblockchain(client, module)

    if module.params['list_invitations']:
        module.exit_json(
            invitations=aws_response_list_parser(paginate, it, 'Invitations'))
    elif module.params['list_members']:
        module.exit_json(
            members=aws_response_list_parser(paginate, it, 'Members'))
    elif module.params['list_networks']:
        module.exit_json(
            networks=aws_response_list_parser(paginate, it, 'Networks'))
    elif module.params['list_nodes']:
        module.exit_json(nodes=aws_response_list_parser(paginate, it, 'Nodes'))
    elif module.params['list_proposal_votes']:
        module.exit_json(proposal_votes=aws_response_list_parser(
            paginate, it, 'ProposalVotes'))
    elif module.params['list_proposals']:
        module.exit_json(
            proposals=aws_response_list_parser(paginate, it, 'Proposals'))
    else:
        module.fail_json("unknown options are passed")
def main():
    argument_spec = dict(
        policy_name=dict(required=True),
        policy_description=dict(default=''),
        policy=dict(type='json'),
        make_default=dict(type='bool', default=True),
        only_version=dict(type='bool', default=False),
        fail_on_delete=dict(type='bool',
                            removed_at_date='2022-06-01',
                            removed_from_collection='community.aws'),
        state=dict(default='present', choices=['present', 'absent']),
    )

    module = AnsibleAWSModule(
        argument_spec=argument_spec,
        required_if=[['state', 'present', ['policy']]],
    )

    name = module.params.get('policy_name')
    description = module.params.get('policy_description')
    state = module.params.get('state')
    default = module.params.get('make_default')
    only = module.params.get('only_version')

    policy = None

    if module.params.get('policy') is not None:
        policy = json.dumps(json.loads(module.params.get('policy')))

    try:
        iam = module.client('iam')
    except (botocore.exceptions.ClientError,
            botocore.exceptions.BotoCoreError) as e:
        module.fail_json_aws(e, msg='Failed to connect to AWS')

    p = get_policy_by_name(module, iam, name)
    if state == 'present':
        if p is None:
            # No Policy so just create one
            try:
                rvalue = iam.create_policy(PolicyName=name,
                                           Path='/',
                                           PolicyDocument=policy,
                                           Description=description)
            except Exception as e:
                module.fail_json(msg="Couldn't create policy %s: %s" %
                                 (name, to_native(e)),
                                 exception=traceback.format_exc(),
                                 **camel_dict_to_snake_dict(e.response))

            module.exit_json(changed=True,
                             policy=camel_dict_to_snake_dict(rvalue['Policy']))
        else:
            policy_version, changed = get_or_create_policy_version(
                module, iam, p, policy)
            changed = set_if_default(module, iam, p, policy_version,
                                     default) or changed
            changed = set_if_only(module, iam, p, policy_version,
                                  only) or changed
            # If anything has changed we needto refresh the policy
            if changed:
                try:
                    p = iam.get_policy(PolicyArn=p['Arn'])['Policy']
                except Exception as e:
                    module.fail_json(msg="Couldn't get policy: %s" %
                                     to_native(e),
                                     exception=traceback.format_exc(),
                                     **camel_dict_to_snake_dict(e.response))

            module.exit_json(changed=changed,
                             policy=camel_dict_to_snake_dict(p))
    else:
        # Check for existing policy
        if p:
            # Detach policy
            detach_all_entities(module, iam, p)
            # Delete Versions
            try:
                versions = iam.list_policy_versions(
                    PolicyArn=p['Arn'])['Versions']
            except botocore.exceptions.ClientError as e:
                module.fail_json(msg="Couldn't list policy versions: %s" %
                                 to_native(e),
                                 exception=traceback.format_exc(),
                                 **camel_dict_to_snake_dict(e.response))
            for v in versions:
                if not v['IsDefaultVersion']:
                    try:
                        iam.delete_policy_version(PolicyArn=p['Arn'],
                                                  VersionId=v['VersionId'])
                    except botocore.exceptions.ClientError as e:
                        module.fail_json(
                            msg="Couldn't delete policy version %s: %s" %
                            (v['VersionId'], to_native(e)),
                            exception=traceback.format_exc(),
                            **camel_dict_to_snake_dict(e.response))
            # Delete policy
            try:
                iam.delete_policy(PolicyArn=p['Arn'])
            except Exception as e:
                module.fail_json(msg="Couldn't delete policy %s: %s" %
                                 (p['PolicyName'], to_native(e)),
                                 exception=traceback.format_exc(),
                                 **camel_dict_to_snake_dict(e.response))
            # This is the one case where we will return the old policy
            module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p))
        else:
            module.exit_json(changed=False, policy=None)