def main(): argument_spec = dict(nacl_ids=dict(default=[], type='list', aliases=['nacl_id']), filters=dict(default={}, type='dict')) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) if module._name == 'ec2_vpc_nacl_facts': module.deprecate( "The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", version='2.13') connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) list_ec2_vpc_nacls(connection, module)
def convert_tg_name_to_arn(connection, module, tg_name): """ Get ARN of a target group using the target group's name :param connection: AWS boto3 elbv2 connection :param module: Ansible module :param tg_name: Name of the target group :return: target group ARN string """ try: response = AWSRetry.jittered_backoff()( connection.describe_target_groups)(Names=[tg_name]) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) tg_arn = response['TargetGroups'][0]['TargetGroupArn'] return tg_arn
def main(): module = AnsibleAWSModule( argument_spec={ "identity": dict(required=True, type='str'), "state": dict(default='present', choices=['present', 'absent']), "bounce_notifications": dict(type='dict'), "complaint_notifications": dict(type='dict'), "delivery_notifications": dict(type='dict'), "feedback_forwarding": dict(default=True, type='bool'), }, supports_check_mode=True, ) for notification_type in ('bounce', 'complaint', 'delivery'): param_name = notification_type + '_notifications' arg_dict = module.params.get(param_name) if arg_dict: extra_keys = [ x for x in arg_dict.keys() if x not in ('topic', 'include_headers') ] if extra_keys: module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers') # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': region = get_aws_connection_info(module, boto3=True)[0] account_id = get_account_id(module) validate_params_for_identity_present(module) create_or_update_identity(connection, module, region, account_id) else: destroy_identity(connection, module)
def create_connection(client, location, bandwidth, name, lag_id): if not name: raise DirectConnectError( msg="Failed to create a Direct Connect connection: name required.") params = { 'location': location, 'bandwidth': bandwidth, 'connectionName': name, } if lag_id: params['lagId'] = lag_id try: connection = AWSRetry.backoff(**retry_params)( client.create_connection)(**params) except (BotoCoreError, ClientError) as e: raise DirectConnectError( msg="Failed to create DirectConnect connection {0}".format(name), last_traceback=traceback.format_exc(), exception=e) return connection['connectionId']
def describe_stack_tree(module, stack_set_name, operation_ids=None): cfn = module.client('cloudformation', retry_decorator=AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5)) result = dict() result['stack_set'] = camel_dict_to_snake_dict( cfn.describe_stack_set( StackSetName=stack_set_name, aws_retry=True, )['StackSet']) result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict( result['stack_set']['tags']) result['operations_log'] = sorted(camel_dict_to_snake_dict( cfn.list_stack_set_operations( StackSetName=stack_set_name, aws_retry=True, ))['summaries'], key=lambda x: x['creation_timestamp']) result['stack_instances'] = sorted( [ camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances( StackSetName=stack_set_name)['Summaries'] ], key=lambda i: i['region'] + i['account']) if operation_ids: result['operations'] = [] for op_id in operation_ids: try: result['operations'].append( camel_dict_to_snake_dict( cfn.describe_stack_set_operation( StackSetName=stack_set_name, OperationId=op_id, )['StackSetOperation'])) except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except pass return result
def main(): argument_spec = dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), active=dict(type='bool'), force=dict(type='bool', default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params.get('state') # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) if state == 'absent': remove_rule_set(client, module) else: create_or_update_rule_set(client, module)
def list_elbs(self): elb_array, token = [], None get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)( self.connection.get_all_load_balancers) while True: all_elbs = get_elb_with_backoff(marker=token) token = all_elbs.next_marker if all_elbs: if self.names: for existing_lb in all_elbs: if existing_lb.name in self.names: elb_array.append(existing_lb) else: elb_array.extend(all_elbs) else: break if token is None: break return list(map(self._get_elb_info, elb_array))
def update_vpc_tags(connection, module, vpc_id, tags, name): if tags is None: tags = dict() tags.update({'Name': name}) tags = dict((k, to_native(v)) for k, v in tags.items()) try: current_tags = dict( (t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{ 'Name': 'resource-id', 'Values': [vpc_id] }])['Tags']) tags_to_update, dummy = compare_aws_tags(current_tags, tags, False) if tags_to_update: if not module.check_mode: tags = ansible_dict_to_boto3_tag_list(tags_to_update) vpc_obj = AWSRetry.backoff( delay=1, tries=5, catch_extra_error_codes=['InvalidVpcID.NotFound'], )(connection.create_tags)(Resources=[vpc_id], Tags=tags) # Wait for tags to be updated expected_tags = boto3_tag_list_to_ansible_dict(tags) filters = [{ 'Name': 'tag:{0}'.format(key), 'Values': [value] } for key, value in expected_tags.items()] connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters) return True else: return False except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update tags")
def main(): module_args = dict( name=dict(type='str', required=True), definition=dict(type='json'), role_arn=dict(type='str'), state=dict(choices=['present', 'absent'], default='present'), tags=dict(default=None, type='dict'), purge_tags=dict(default=True, type='bool'), ) module = AnsibleAWSModule(argument_spec=module_args, required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition']) ], supports_check_mode=True) sfn_client = module.client( 'stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5)) state = module.params.get('state') try: manage_state_machine(state, sfn_client, module) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to manage state machine')
def main(): module = AnsibleAWSModule( argument_spec=dict( state=dict(choices=['present', 'absent'], default='present'), db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), db_instance_identifier=dict(aliases=['instance_id']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), ), required_if=[['state', 'present', ['db_instance_identifier']]]) client = module.client( 'rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) if module.params['state'] == 'absent': ret_dict = ensure_snapshot_absent(client, module) else: ret_dict = ensure_snapshot_present(client, module) module.exit_json(**ret_dict)
def main(): argument_spec = dict( name=dict(required=True), cidr_block=dict(type='list', required=True), ipv6_cidr=dict(type='bool', default=False), tenancy=dict(choices=['default', 'dedicated'], default='default'), dns_support=dict(type='bool', default=True), dns_hostnames=dict(type='bool', default=True), dhcp_opts_id=dict(), tags=dict(type='dict', aliases=['resource_tags']), state=dict(choices=['present', 'absent'], default='present'), multi_ok=dict(type='bool', default=False), purge_cidrs=dict(type='bool', default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) name = module.params.get('name') cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block')) ipv6_cidr = module.params.get('ipv6_cidr') purge_cidrs = module.params.get('purge_cidrs') tenancy = module.params.get('tenancy') dns_support = module.params.get('dns_support') dns_hostnames = module.params.get('dns_hostnames') dhcp_id = module.params.get('dhcp_opts_id') tags = module.params.get('tags') state = module.params.get('state') multi = module.params.get('multi_ok') changed = False connection = module.client( 'ec2', retry_decorator=AWSRetry.jittered_backoff( retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound'])) if dns_hostnames and not dns_support: module.fail_json( msg= 'In order to enable DNS Hostnames you must also enable DNS support' ) if state == 'present': # Check if VPC exists vpc_id = vpc_exists(module, connection, name, cidr_block, multi) if vpc_id is None: vpc_id = create_vpc(connection, module, cidr_block[0], tenancy) changed = True vpc_obj = get_vpc(module, connection, vpc_id) associated_cidrs = dict( (cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', []) if cidr['CidrBlockState']['State'] != 'disassociated') to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs] to_remove = [ associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block ] expected_cidrs = [ cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove ] + to_add if len(cidr_block) > 1: for cidr in to_add: changed = True try: connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, "Unable to associate CIDR {0}.".format(ipv6_cidr)) if ipv6_cidr: if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys(): module.warn( "Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}" .format( vpc_id, vpc_obj['Ipv6CidrBlockAssociationSet'][0] ['Ipv6CidrBlock'])) else: try: connection.associate_vpc_cidr_block( AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, "Unable to associate CIDR {0}.".format(ipv6_cidr)) if purge_cidrs: for association_id in to_remove: changed = True try: connection.disassociate_vpc_cidr_block( AssociationId=association_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that " "are associated with the CIDR block before you can disassociate it." .format(association_id)) if dhcp_id is not None: try: if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Failed to update DHCP options") if tags is not None or name is not None: try: if update_vpc_tags(connection, module, vpc_id, tags, name): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update tags") current_dns_enabled = connection.describe_vpc_attribute( Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value'] current_dns_hostnames = connection.describe_vpc_attribute( Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value'] if current_dns_enabled != dns_support: changed = True if not module.check_mode: try: connection.modify_vpc_attribute( VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, "Failed to update enabled dns support attribute") if current_dns_hostnames != dns_hostnames: changed = True if not module.check_mode: try: connection.modify_vpc_attribute( VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, "Failed to update enabled dns hostnames attribute") # wait for associated cidrs to match if to_add or to_remove: try: connection.get_waiter('vpc_available').wait( VpcIds=[vpc_id], Filters=[{ 'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs }]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Failed to wait for CIDRs to update") # try to wait for enableDnsSupport and enableDnsHostnames to match wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support) wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames) final_state = camel_dict_to_snake_dict( get_vpc(module, connection, vpc_id)) final_state['tags'] = boto3_tag_list_to_ansible_dict( final_state.get('tags', [])) final_state['id'] = final_state.pop('vpc_id') module.exit_json(changed=changed, vpc=final_state) elif state == 'absent': # Check if VPC exists vpc_id = vpc_exists(module, connection, name, cidr_block, multi) if vpc_id is not None: try: if not module.check_mode: connection.delete_vpc(VpcId=vpc_id) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, msg= "Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " "and/or ec2_vpc_route_table modules to ensure the other components are absent." .format(vpc_id)) module.exit_json(changed=changed, vpc={})
def main(): module = AnsibleAWSModule( argument_spec={ 'name': dict(type='str', required=True), 'state': dict(type='str', choices=['present', 'absent'], default='present'), 'account_sources': dict(type='list', required=True), 'organization_source': dict(type='dict', required=True) }, supports_check_mode=False, ) result = { 'changed': False } name = module.params.get('name') state = module.params.get('state') params = {} if name: params['ConfigurationAggregatorName'] = name if module.params.get('account_sources'): params['AccountAggregationSources'] = [] for i in module.params.get('account_sources'): tmp_dict = {} if i.get('account_ids'): tmp_dict['AccountIds'] = i.get('account_ids') if i.get('aws_regions'): tmp_dict['AwsRegions'] = i.get('aws_regions') if i.get('all_aws_regions') is not None: tmp_dict['AllAwsRegions'] = i.get('all_aws_regions') params['AccountAggregationSources'].append(tmp_dict) if module.params.get('organization_source'): params['OrganizationAggregationSource'] = {} if module.params.get('organization_source').get('role_arn'): params['OrganizationAggregationSource'].update({ 'RoleArn': module.params.get('organization_source').get('role_arn') }) if module.params.get('organization_source').get('aws_regions'): params['OrganizationAggregationSource'].update({ 'AwsRegions': module.params.get('organization_source').get('aws_regions') }) if module.params.get('organization_source').get('all_aws_regions') is not None: params['OrganizationAggregationSourcep'].update({ 'AllAwsRegions': module.params.get('organization_source').get('all_aws_regions') }) client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) if state == 'present': if not resource_status: create_resource(client, module, params, result) else: update_resource(client, module, params, result) if state == 'absent': if resource_status: delete_resource(client, module, params, result) module.exit_json(changed=result['changed'])
def create_or_update(module, template_options): ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) out = {} lt_data = params_to_launch_data( module, dict( (k, v) for k, v in module.params.items() if k in template_options)) if not (template or template_versions): # create a full new one try: resp = ec2.create_launch_template( LaunchTemplateName=module.params['template_name'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create launch template") template, template_versions = existing_templates(module) out['changed'] = True elif template and template_versions: most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1] if lt_data == most_recent['LaunchTemplateData']: out['changed'] = False return out try: resp = ec2.create_launch_template_version( LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, aws_retry=True, ) if module.params.get('default_version') in (None, ''): # no need to do anything, leave the existing version as default pass elif module.params.get('default_version') == 'latest': set_default = ec2.modify_launch_template( LaunchTemplateId=template['LaunchTemplateId'], DefaultVersion=to_text( resp['LaunchTemplateVersion']['VersionNumber']), ClientToken=uuid4().hex, aws_retry=True, ) else: try: int(module.params.get('default_version')) except ValueError: module.fail_json( msg= 'default_version param was not a valid integer, got "{0}"' .format(module.params.get('default_version'))) set_default = ec2.modify_launch_template( LaunchTemplateId=template['LaunchTemplateId'], DefaultVersion=to_text( int(module.params.get('default_version'))), ClientToken=uuid4().hex, aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_aws( e, msg="Couldn't create subsequent launch template version") template, template_versions = existing_templates(module) out['changed'] = True return out
def main(): module = AnsibleAWSModule( argument_spec={ 'name': dict(type='str', required=True), 'state': dict(type='str', choices=['present', 'absent'], default='present'), 'description': dict(type='str'), 'scope': dict(type='dict'), 'source': dict(type='dict', required=True), 'input_parameters': dict(type='str'), 'execution_frequency': dict( type='str', choices=[ 'One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours' ] ), }, supports_check_mode=False, ) result = { 'changed': False } name = module.params.get('name') resource_type = module.params.get('resource_type') state = module.params.get('state') params = {} if name: params['ConfigRuleName'] = name if module.params.get('description'): params['Description'] = module.params.get('description') if module.params.get('scope'): params['Scope'] = {} if module.params.get('scope').get('compliance_types'): params['Scope'].update({ 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types') }) if module.params.get('scope').get('tag_key'): params['Scope'].update({ 'TagKey': module.params.get('scope').get('tag_key') }) if module.params.get('scope').get('tag_value'): params['Scope'].update({ 'TagValue': module.params.get('scope').get('tag_value') }) if module.params.get('scope').get('compliance_id'): params['Scope'].update({ 'ComplianceResourceId': module.params.get('scope').get('compliance_id') }) if module.params.get('source'): params['Source'] = {} if module.params.get('source').get('owner'): params['Source'].update({ 'Owner': module.params.get('source').get('owner') }) if module.params.get('source').get('identifier'): params['Source'].update({ 'SourceIdentifier': module.params.get('source').get('identifier') }) if module.params.get('source').get('details'): params['Source'].update({ 'SourceDetails': module.params.get('source').get('details') }) if module.params.get('input_parameters'): params['InputParameters'] = module.params.get('input_parameters') if module.params.get('execution_frequency'): params['MaximumExecutionFrequency'] = module.params.get('execution_frequency') params['ConfigRuleState'] = 'ACTIVE' client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) existing_rule = rule_exists(client, module, params) if state == 'present': if not existing_rule: create_resource(client, module, params, result) else: update_resource(client, module, params, result) if state == 'absent': if existing_rule: delete_resource(client, module, params, result) module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(stack_name=dict(required=True), template_parameters=dict(required=False, type='dict', default={}), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=False, type='path'), notification_arns=dict(default=None, required=False), stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), on_create_failure=dict( default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), create_timeout=dict(default=None, type='int'), template_url=dict(default=None, required=False), template_body=dict(default=None, required=False), template_format=dict(removed_in_version='2.14'), create_changeset=dict(default=False, type='bool'), changeset_name=dict(default=None, required=False), role_arn=dict(default=None, required=False), tags=dict(default=None, type='dict'), termination_protection=dict(default=None, type='bool'), events_limit=dict(default=200, type='int'), backoff_retries=dict(type='int', default=10, required=False), backoff_delay=dict(type='int', default=3, required=False), backoff_max_delay=dict(type='int', default=30, required=False), capabilities=dict( type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']))) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template', 'template_body'], ['disable_rollback', 'on_create_failure']], supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore are required for this module') invalid_capabilities = [] user_capabilities = module.params.get('capabilities') for user_cap in user_capabilities: if user_cap not in [ 'CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND' ]: invalid_capabilities.append(user_cap) if invalid_capabilities: module.fail_json(msg="Specified capabilities are invalid : %r," " please check documentation for valid capabilities" % invalid_capabilities) # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = { 'Capabilities': user_capabilities, 'ClientRequestToken': to_native(uuid.uuid4()), } state = module.params['state'] stack_params['StackName'] = module.params['stack_name'] if module.params['template'] is not None: with open(module.params['template'], 'r') as template_fh: stack_params['TemplateBody'] = template_fh.read() elif module.params['template_body'] is not None: stack_params['TemplateBody'] = module.params['template_body'] elif module.params['template_url'] is not None: stack_params['TemplateURL'] = module.params['template_url'] if module.params.get('notification_arns'): stack_params['NotificationARNs'] = module.params[ 'notification_arns'].split(',') else: stack_params['NotificationARNs'] = [] # can't check the policy when verifying. if module.params[ 'stack_policy'] is not None and not module.check_mode and not module.params[ 'create_changeset']: with open(module.params['stack_policy'], 'r') as stack_policy_fh: stack_params['StackPolicyBody'] = stack_policy_fh.read() template_parameters = module.params['template_parameters'] stack_params['Parameters'] = [] for k, v in template_parameters.items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) if 'value' in v: param['ParameterValue'] = str(v['value']) if 'use_previous_value' in v and bool(v['use_previous_value']): param['UsePreviousValue'] = True param.pop('ParameterValue', None) stack_params['Parameters'].append(param) else: # allow default k/v configuration to set a template parameter stack_params['Parameters'].append({ 'ParameterKey': k, 'ParameterValue': str(v) }) if isinstance(module.params.get('tags'), dict): stack_params['Tags'] = ansible_dict_to_boto3_tag_list( module.params['tags']) if module.params.get('role_arn'): stack_params['RoleARN'] = module.params['role_arn'] result = {} try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg=boto_exception(e)) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes backoff_wrapper = AWSRetry.jittered_backoff( retries=module.params.get('backoff_retries'), delay=module.params.get('backoff_delay'), max_delay=module.params.get('backoff_max_delay')) cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events) cfn.create_stack = backoff_wrapper(cfn.create_stack) cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets) cfn.create_change_set = backoff_wrapper(cfn.create_change_set) cfn.update_stack = backoff_wrapper(cfn.update_stack) cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks) cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources) cfn.delete_stack = backoff_wrapper(cfn.delete_stack) if boto_supports_termination_protection(cfn): cfn.update_termination_protection = backoff_wrapper( cfn.update_termination_protection) stack_info = get_stack_facts(cfn, stack_params['StackName']) if module.check_mode: if state == 'absent' and stack_info: module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) elif state == 'absent' and not stack_info: module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) elif state == 'present' and not stack_info: module.exit_json(changed=True, msg='New stack would be created', meta=[]) else: module.exit_json(**check_mode_changeset(module, stack_params, cfn)) if state == 'present': if not stack_info: result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) elif module.params.get('create_changeset'): result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) else: if module.params.get('termination_protection') is not None: update_termination_protection( module, cfn, stack_params['StackName'], bool(module.params.get('termination_protection'))) result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) # format the stack output stack = get_stack_facts(cfn, stack_params['StackName']) if stack is not None: if result.get('stack_outputs') is None: # always define stack_outputs, but it may be empty result['stack_outputs'] = {} for output in stack.get('Outputs', []): result['stack_outputs'][ output['OutputKey']] = output['OutputValue'] stack_resources = [] reslist = cfn.list_stack_resources( StackName=stack_params['StackName']) for res in reslist.get('StackResourceSummaries', []): stack_resources.append({ "logical_resource_id": res['LogicalResourceId'], "physical_resource_id": res.get('PhysicalResourceId', ''), "resource_type": res['ResourceType'], "last_updated_time": res['LastUpdatedTimestamp'], "status": res['ResourceStatus'], "status_reason": res.get('ResourceStatusReason') # can be blank, apparently }) result['stack_resources'] = stack_resources elif state == 'absent': # absent state is different because of the way delete_stack works. # problem is it it doesn't give an error if stack isn't found # so must describe the stack first try: stack = get_stack_facts(cfn, stack_params['StackName']) if not stack: result = {'changed': False, 'output': 'Stack not found.'} else: if stack_params.get('RoleARN') is None: cfn.delete_stack(StackName=stack_params['StackName']) else: cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) result = stack_operation( cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), stack_params.get('ClientRequestToken', None)) except Exception as err: module.fail_json(msg=boto_exception(err), exception=traceback.format_exc()) module.exit_json(**result)
def main(): argument_spec = dict( name=dict(required=True), description=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=900), state=dict(default='present', choices=['present', 'absent']), purge_stacks=dict(type='bool', default=True), parameters=dict(type='dict', default={}), template=dict(type='path'), template_url=dict(), template_body=dict(), capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), regions=dict(type='list'), accounts=dict(type='list'), failure_tolerance=dict( type='dict', default={}, options=dict( fail_count=dict(type='int'), fail_percentage=dict(type='int'), parallel_percentage=dict(type='int'), parallel_count=dict(type='int'), ), mutually_exclusive=[ ['fail_count', 'fail_percentage'], ['parallel_count', 'parallel_percentage'], ], ), administration_role_arn=dict( aliases=['admin_role_arn', 'administration_role', 'admin_role']), execution_role_name=dict( aliases=['execution_role', 'exec_role', 'exec_role_name']), tags=dict(type='dict'), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template', 'template_body']], supports_check_mode=True) if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')): module.fail_json( msg= "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26" ) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes cfn = module.client('cloudformation', retry_decorator=AWSRetry.jittered_backoff( retries=10, delay=3, max_delay=30)) existing_stack_set = stack_set_facts(cfn, module.params['name']) operation_uuid = to_native(uuid.uuid4()) operation_ids = [] # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = {} state = module.params['state'] if state == 'present' and not module.params['accounts']: module.fail_json( msg= "Can't create a stack set without choosing at least one account. " "To get the ID of the current account, use the aws_caller_info module." ) module.params['accounts'] = [ to_native(a) for a in module.params['accounts'] ] stack_params['StackSetName'] = module.params['name'] if module.params.get('description'): stack_params['Description'] = module.params['description'] if module.params.get('capabilities'): stack_params['Capabilities'] = module.params['capabilities'] if module.params['template'] is not None: with open(module.params['template'], 'r') as tpl: stack_params['TemplateBody'] = tpl.read() elif module.params['template_body'] is not None: stack_params['TemplateBody'] = module.params['template_body'] elif module.params['template_url'] is not None: stack_params['TemplateURL'] = module.params['template_url'] else: # no template is provided, but if the stack set exists already, we can use the existing one. if existing_stack_set: stack_params['UsePreviousTemplate'] = True else: module.fail_json( msg= "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " "`template_body`, or `template_url`".format( module.params['name'])) stack_params['Parameters'] = [] for k, v in module.params['parameters'].items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) if 'value' in v: param['ParameterValue'] = to_native(v['value']) if 'use_previous_value' in v and bool(v['use_previous_value']): param['UsePreviousValue'] = True param.pop('ParameterValue', None) stack_params['Parameters'].append(param) else: # allow default k/v configuration to set a template parameter stack_params['Parameters'].append({ 'ParameterKey': k, 'ParameterValue': str(v) }) if module.params.get('tags') and isinstance(module.params.get('tags'), dict): stack_params['Tags'] = ansible_dict_to_boto3_tag_list( module.params['tags']) if module.params.get('administration_role_arn'): # TODO loosen the semantics here to autodetect the account ID and build the ARN stack_params['AdministrationRoleARN'] = module.params[ 'administration_role_arn'] if module.params.get('execution_role_name'): stack_params['ExecutionRoleName'] = module.params[ 'execution_role_name'] result = {} if module.check_mode: if state == 'absent' and existing_stack_set: module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) elif state == 'absent' and not existing_stack_set: module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) elif state == 'present' and not existing_stack_set: module.exit_json(changed=True, msg='New stack set would be created', meta=[]) elif state == 'present' and existing_stack_set: new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( cfn, module.params['name'], module.params['accounts'], module.params['regions'], ) if new_stacks: module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[]) elif unspecified_stacks and module.params.get( 'purge_stack_instances'): module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[]) else: # TODO: need to check the template and other settings for correct check mode module.exit_json(changed=False, msg='No changes detected', meta=[]) changed = False if state == 'present': if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log stack_params[ 'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format( operation_uuid) changed = True create_stack_set(module, stack_params, cfn) else: stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format( operation_uuid) operation_ids.append(stack_params['OperationId']) if module.params.get('regions'): stack_params[ 'OperationPreferences'] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn, module.params['name'], module.params['accounts'], module.params['regions'], ) if new_stack_instances: operation_ids.append( 'Ansible-StackInstance-Create-{0}'.format(operation_uuid)) changed = True cfn.create_stack_instances( StackSetName=module.params['name'], Accounts=list(set(acct for acct, region in new_stack_instances)), Regions=list( set(region for acct, region in new_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) else: operation_ids.append( 'Ansible-StackInstance-Update-{0}'.format(operation_uuid)) cfn.update_stack_instances( StackSetName=module.params['name'], Accounts=list( set(acct for acct, region in existing_stack_instances)), Regions=list( set(region for acct, region in existing_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) for op in operation_ids: await_stack_set_operation( module, cfn, operation_id=op, stack_set_name=module.params['name'], max_wait=module.params.get('wait_timeout'), ) elif state == 'absent': if not existing_stack_set: module.exit_json(msg='Stack set {0} does not exist'.format( module.params['name'])) if module.params.get('purge_stack_instances') is False: pass try: cfn.delete_stack_set(StackSetName=module.params['name'], ) module.exit_json( msg='Stack set {0} deleted'.format(module.params['name'])) except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg= 'Cannot delete stack {0} while there is an operation in progress' .format(module.params['name'])) except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format( operation_uuid) cfn.delete_stack_instances( StackSetName=module.params['name'], Accounts=module.params['accounts'], Regions=module.params['regions'], RetainStacks=(not module.params.get('purge_stacks')), OperationId=delete_instances_op) await_stack_set_operation( module, cfn, operation_id=delete_instances_op, stack_set_name=stack_params['StackSetName'], max_wait=module.params.get('wait_timeout'), ) try: cfn.delete_stack_set(StackSetName=module.params['name'], ) except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except # this time, it is likely that either the delete failed or there are more stacks. instances = cfn.list_stack_instances( StackSetName=module.params['name'], ) stack_states = ', '.join( '(account={Account}, region={Region}, state={Status})'. format(**i) for i in instances['Summaries']) module.fail_json_aws( exc, msg= 'Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states) module.exit_json(changed=True, msg='Stack set {0} deleted'.format( module.params['name'])) result.update(**describe_stack_tree( module, stack_params['StackSetName'], operation_ids=operation_ids)) if any(o['status'] == 'FAILED' for o in result['operations']): module.fail_json(msg="One or more operations failed to execute", **result) module.exit_json(changed=changed, **result)
def ensure_tags(self, igw_id, tags, add_only): final_tags = [] filters = ansible_dict_to_boto3_filter_list({ 'resource-id': igw_id, 'resource-type': 'internet-gateway' }) cur_tags = None try: cur_tags = self._connection.describe_tags(Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Couldn't describe tags") purge_tags = bool(not add_only) to_update, to_delete = compare_aws_tags( boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) if to_update: try: if self._check_mode: # update tags final_tags.update(to_update) else: AWSRetry.exponential_backoff()( self._connection.create_tags)( Resources=[igw_id], Tags=ansible_dict_to_boto3_tag_list(to_update)) self._results['changed'] = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Couldn't create tags") if to_delete: try: if self._check_mode: # update tags for key in to_delete: del final_tags[key] else: tags_list = [] for key in to_delete: tags_list.append({'Key': key}) AWSRetry.exponential_backoff()( self._connection.delete_tags)(Resources=[igw_id], Tags=tags_list) self._results['changed'] = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Couldn't delete tags") if not self._check_mode and (to_update or to_delete): try: response = self._connection.describe_tags(Filters=filters) final_tags = boto3_tag_list_to_ansible_dict( response.get('Tags')) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Couldn't describe tags") return final_tags
def main(): argument_spec = dict( name=dict(type='str', required=True), path=dict(type='str', default="/"), assume_role_policy_document=dict(type='json'), managed_policies=dict(type='list', aliases=['managed_policy']), max_session_duration=dict(type='int'), state=dict(type='str', choices=['present', 'absent'], default='present'), description=dict(type='str'), boundary=dict(type='str', aliases=['boundary_policy_arn']), create_instance_profile=dict(type='bool', default=True), delete_instance_profile=dict(type='bool', default=False), purge_policies=dict(type='bool', aliases=['purge_policy', 'purge_managed_policies']), tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[('state', 'present', ['assume_role_policy_document'])], supports_check_mode=True) if module.params.get('purge_policies') is None: module.deprecate( 'In Ansible 2.14 the default value of purge_policies will change from true to false.' ' To maintain the existing behaviour explicity set purge_policies=true', version='2.14') if module.params.get('boundary'): if module.params.get('create_instance_profile'): module.fail_json( msg= "When using a boundary policy, `create_instance_profile` must be set to `false`." ) if not module.params.get('boundary').startswith('arn:aws:iam'): module.fail_json(msg="Boundary policy must be an ARN") if module.params.get( 'tags') is not None and not module.botocore_at_least('1.12.46'): module.fail_json( msg="When managing tags botocore must be at least v1.12.46. " "Current versions: boto3-{boto3_version} botocore-{botocore_version}" .format(**module._gather_versions())) if module.params.get( 'boundary' ) is not None and not module.botocore_at_least('1.10.57'): module.fail_json( msg= "When using a boundary policy, botocore must be at least v1.10.57. " "Current versions: boto3-{boto3_version} botocore-{botocore_version}" .format(**module._gather_versions())) if module.params.get('max_session_duration'): max_session_duration = module.params.get('max_session_duration') if max_session_duration < 3600 or max_session_duration > 43200: module.fail_json( msg= "max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)" ) if module.params.get('path'): path = module.params.get('path') if not path.endswith('/') or not path.startswith('/'): module.fail_json(msg="path must begin and end with /") connection = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': create_or_update_role(connection, module) else: destroy_role(connection, module)
def delete(self): try: AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def main(): argument_spec = dict(device_id=dict(required=False, aliases=['instance_id']), public_ip=dict(required=False, aliases=['ip']), state=dict(required=False, default='present', choices=['present', 'absent']), in_vpc=dict(required=False, type='bool', default=False), reuse_existing_ip_allowed=dict(required=False, type='bool', default=False), release_on_disassociation=dict(required=False, type='bool', default=False), allow_reassociation=dict(type='bool', default=False), wait_timeout=dict(type='int', removed_in_version='2.14'), private_ip_address=dict(), tag_name=dict(), tag_value=dict(), public_ipv4_pool=dict()) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, required_by={ 'private_ip_address': ['device_id'], }, ) ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) device_id = module.params.get('device_id') instance_id = module.params.get('instance_id') public_ip = module.params.get('public_ip') private_ip_address = module.params.get('private_ip_address') state = module.params.get('state') in_vpc = module.params.get('in_vpc') domain = 'vpc' if in_vpc else None reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') release_on_disassociation = module.params.get('release_on_disassociation') allow_reassociation = module.params.get('allow_reassociation') tag_name = module.params.get('tag_name') tag_value = module.params.get('tag_value') public_ipv4_pool = module.params.get('public_ipv4_pool') if instance_id: warnings = [ "instance_id is no longer used, please use device_id going forward" ] is_instance = True device_id = instance_id else: if device_id and device_id.startswith('i-'): is_instance = True elif device_id: if device_id.startswith('eni-') and not in_vpc: module.fail_json( msg="If you are specifying an ENI, in_vpc must be true") is_instance = False tag_dict = generate_tag_dict(module, tag_name, tag_value) try: if device_id: address = find_address(ec2, module, public_ip, device_id, is_instance=is_instance) else: address = find_address(ec2, module, public_ip, None) if state == 'present': if device_id: result = ensure_present(ec2, module, domain, address, private_ip_address, device_id, reuse_existing_ip_allowed, allow_reassociation, module.check_mode, is_instance=is_instance) else: if address: changed = False else: address, changed = allocate_address( ec2, module, domain, reuse_existing_ip_allowed, module.check_mode, tag_dict, public_ipv4_pool) result = { 'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId'] } else: if device_id: disassociated = ensure_absent(ec2, module, address, device_id, module.check_mode, is_instance=is_instance) if release_on_disassociation and disassociated['changed']: released = release_address(ec2, module, address, module.check_mode) result = { 'changed': True, 'disassociated': disassociated, 'released': released } else: result = { 'changed': disassociated['changed'], 'disassociated': disassociated, 'released': { 'changed': False } } else: released = release_address(ec2, module, address, module.check_mode) result = { 'changed': released['changed'], 'disassociated': { 'changed': False }, 'released': released } except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(str(e)) if instance_id: result['warnings'] = warnings module.exit_json(**result)
''' RETURN = '''#''' try: import botocore from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # handled by AnsibleAWSModule from ansible_collections.notmintest.not_a_real_collection.plugins.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code from ansible_collections.notmintest.not_a_real_collection.plugins.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry # this waits for an IAM role to become fully available, at the cost of # taking a long time to fail when the IAM role/policy really is invalid retry_unavailable_iam_on_put_delivery = AWSRetry.backoff( catch_extra_error_codes=['InsufficientDeliveryPolicyException'], ) def resource_exists(client, module, params): try: channel = client.describe_delivery_channels( DeliveryChannelNames=[params['name']], aws_retry=True, ) return channel['DeliveryChannels'][0] except is_boto3_error_code('NoSuchDeliveryChannelException'): return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def _get_elb_listener_rules(self): try: return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules'] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def main(): module = AnsibleAWSModule( argument_spec={ 'name': dict(type='str', required=True), 'state': dict(type='str', choices=['present', 'absent'], default='present'), 'role_arn': dict(type='str'), 'recording_group': dict(type='dict'), }, supports_check_mode=False, required_if=[ ('state', 'present', ['role_arn', 'recording_group']), ], ) result = {'changed': False} name = module.params.get('name') state = module.params.get('state') params = {} if name: params['name'] = name if module.params.get('role_arn'): params['roleARN'] = module.params.get('role_arn') if module.params.get('recording_group'): params['recordingGroup'] = {} if module.params.get('recording_group').get( 'all_supported') is not None: params['recordingGroup'].update({ 'allSupported': module.params.get('recording_group').get('all_supported') }) if module.params.get('recording_group').get( 'include_global_types') is not None: params['recordingGroup'].update({ 'includeGlobalResourceTypes': module.params.get('recording_group').get( 'include_global_types') }) if module.params.get('recording_group').get('resource_types'): params['recordingGroup'].update({ 'resourceTypes': module.params.get('recording_group').get('resource_types') }) else: params['recordingGroup'].update({'resourceTypes': []}) client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) if state == 'present': if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) if state == 'absent': if resource_status: delete_resource(client, module, params, result) module.exit_json(changed=result['changed'])