def __init__(self, module, instance_id, get_unused_target_groups): self.module = module try: self.jctanner.cloud_amazon.ec2 = self.module.client( "jctanner.cloud_amazon.ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10) ) except (ClientError, BotoCoreError) as e: self.module.fail_json_jctanner.cloud_amazon.aws(e, msg="Couldn't connect to jctanner.cloud_amazon.ec2" ) try: self.elbv2 = self.module.client( "elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10) ) except (BotoCoreError, ClientError) as e: self.module.fail_json_jctanner.cloud_amazon.aws(e, msg="Could not connect to elbv2" ) self.instance_id = instance_id self.get_unused_target_groups = get_unused_target_groups self.tgs = self._get_target_groups()
def ensure_tags(self, igw_id, tags, add_only): final_tags = [] filters = ansible_dict_to_boto3_filter_list({'resource-id': igw_id, 'resource-type': 'internet-gateway'}) cur_tags = None try: cur_tags = self._connection.describe_tags(Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_jctanner.cloud_amazon.aws(e, msg="Couldn't describe tags") purge_tags = bool(not add_only) to_update, to_delete = compare_jctanner.cloud_amazon.aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) final_tags = boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')) if to_update: try: if self._check_mode: # update tags final_tags.update(to_update) else: AWSRetry.exponential_backoff()(self._connection.create_tags)( Resources=[igw_id], Tags=ansible_dict_to_boto3_tag_list(to_update) ) self._results['changed'] = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_jctanner.cloud_amazon.aws(e, msg="Couldn't create tags") if to_delete: try: if self._check_mode: # update tags for key in to_delete: del final_tags[key] else: tags_list = [] for key in to_delete: tags_list.append({'Key': key}) AWSRetry.exponential_backoff()(self._connection.delete_tags)(Resources=[igw_id], Tags=tags_list) self._results['changed'] = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_jctanner.cloud_amazon.aws(e, msg="Couldn't delete tags") if not self._check_mode and (to_update or to_delete): try: response = self._connection.describe_tags(Filters=filters) final_tags = boto3_tag_list_to_ansible_dict(response.get('Tags')) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_jctanner.cloud_amazon.aws(e, msg="Couldn't describe tags") return final_tags
def ensure_tags(self, tgw_id, tags, purge_tags): """ Ensures tags are applied to the transit gateway. Optionally will remove any existing tags not in the tags argument if purge_tags is set to true :param tgw_id: The AWS id of the transit gateway :param tags: list of tags to apply to the transit gateway. :param purge_tags: when true existing tags not in tags parms are removed :return: true if tags were updated """ tags_changed = False filters = ansible_dict_to_boto3_filter_list({'resource-id': tgw_id}) try: cur_tags = self._connection.describe_tags(Filters=filters) except (ClientError, BotoCoreError) as e: self._module.fail_json_jctanner.cloud_amazon.aws(e, msg="Couldn't describe tags") to_update, to_delete = compare_jctanner.cloud_amazon.aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) if to_update: try: if not self._check_mode: AWSRetry.exponential_backoff()(self._connection.create_tags)( Resources=[tgw_id], Tags=ansible_dict_to_boto3_tag_list(to_update) ) self._results['changed'] = True tags_changed = True except (ClientError, BotoCoreError) as e: self._module.fail_json_jctanner.cloud_amazon.aws(e, msg="Couldn't create tags {0} for resource {1}".format( ansible_dict_to_boto3_tag_list(to_update), tgw_id)) if to_delete: try: if not self._check_mode: tags_list = [] for key in to_delete: tags_list.append({'Key': key}) AWSRetry.exponential_backoff()(self._connection.delete_tags)( Resources=[tgw_id], Tags=tags_list ) self._results['changed'] = True tags_changed = True except (ClientError, BotoCoreError) as e: self._module.fail_json_jctanner.cloud_amazon.aws(e, msg="Couldn't delete tags {0} for resource {1}".format( ansible_dict_to_boto3_tag_list(to_delete), tgw_id)) return tags_changed
def main(): module = AnsibleAWSModule( argument_spec={ 'identity': dict(required=True, type='str'), 'state': dict(default='present', choices=['present', 'absent']), 'policy_name': dict(required=True, type='str'), 'policy': dict(type='json', default=None), }, required_if=[['state', 'present', ['policy']]], supports_check_mode=True, ) # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': create_or_update_identity_policy(connection, module) else: delete_identity_policy(connection, module)
def update_vpc_tags(connection, module, vpc_id, tags, name): if tags is None: tags = dict() tags.update({'Name': name}) tags = dict((k, to_native(v)) for k, v in tags.items()) try: current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags']) tags_to_update, dummy = compare_jctanner.cloud_amazon.aws_tags(current_tags, tags, False) if tags_to_update: if not module.check_mode: tags = ansible_dict_to_boto3_tag_list(tags_to_update) vpc_obj = AWSRetry.backoff( delay=1, tries=5, catch_extra_error_codes=['InvalidVpcID.NotFound'], )(connection.create_tags)(Resources=[vpc_id], Tags=tags) # Wait for tags to be updated expected_tags = boto3_tag_list_to_ansible_dict(tags) filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()] connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters) return True else: return False except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, msg="Failed to update tags")
def main(): argument_spec = dict( db_snapshot_identifier=dict(aliases=['snapshot_name']), db_instance_identifier=dict(), db_cluster_identifier=dict(), db_cluster_snapshot_identifier=dict(), snapshot_type=dict( choices=['automated', 'manual', 'shared', 'public'])) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[[ 'db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier' ]]) if module._name == 'rds_snapshot_facts': module.deprecate( "The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", version='2.13') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) results = dict() if not module.params['db_cluster_identifier'] and not module.params[ 'db_cluster_snapshot_identifier']: results['snapshots'] = standalone_snapshot_info(module, conn) if not module.params['db_snapshot_identifier'] and not module.params[ 'db_instance_identifier']: results['cluster_snapshots'] = cluster_snapshot_info(module, conn) module.exit_json(changed=False, **results)
def delete_template(module): jctanner.cloud_amazon.ec2 = module.client( 'jctanner.cloud_amazon.ec2', retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) deleted_versions = [] if template or template_versions: non_default_versions = [ to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion'] ] if non_default_versions: try: v_resp = jctanner.cloud_amazon.ec2.delete_launch_template_versions( LaunchTemplateId=template['LaunchTemplateId'], Versions=non_default_versions, ) if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']: module.warn( 'Failed to delete template versions {0} on launch template {1}' .format( v_resp[ 'UnsuccessfullyDeletedLaunchTemplateVersions'], template['LaunchTemplateId'], )) deleted_versions = [ camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions'] ] except (ClientError, BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws( e, msg= "Could not delete existing versions of the launch template {0}" .format(template['LaunchTemplateId'])) try: resp = jctanner.cloud_amazon.ec2.delete_launch_template( LaunchTemplateId=template['LaunchTemplateId'], ) except (ClientError, BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws( e, msg="Could not delete launch template {0}".format( template['LaunchTemplateId'])) return { 'deleted_versions': deleted_versions, 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']), 'changed': True, } else: return {'changed': False}
def existing_templates(module): jctanner.cloud_amazon.ec2 = module.client( 'jctanner.cloud_amazon.ec2', retry_decorator=AWSRetry.jittered_backoff()) matches = None try: if module.params.get('template_id'): matches = jctanner.cloud_amazon.ec2.describe_launch_templates( LaunchTemplateIds=[module.params.get('template_id')]) elif module.params.get('template_name'): matches = jctanner.cloud_amazon.ec2.describe_launch_templates( LaunchTemplateNames=[module.params.get('template_name')]) except is_boto3_error_code( 'InvalidLaunchTemplateName.NotFoundException') as e: # no named template was found, return nothing/empty versions return None, [] except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except module.fail_json_jctanner.cloud_amazon.aws( e, msg= 'Launch template with ID {0} is not a valid ID. It should start with `lt-....`' .format(module.params.get('launch_template_id'))) except is_boto3_error_code( 'InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except module.fail_json_jctanner.cloud_amazon.aws( e, msg= 'Launch template with ID {0} could not be found, please supply a name ' 'instead so that a new template can be created'.format( module.params.get('launch_template_id'))) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except module.fail_json_jctanner.cloud_amazon.aws( e, msg= 'Could not check existing launch templates. This may be an IAM permission problem.' ) else: template = matches['LaunchTemplates'][0] template_id, template_version, template_default = template[ 'LaunchTemplateId'], template['LatestVersionNumber'], template[ 'DefaultVersionNumber'] try: return template, jctanner.cloud_amazon.ec2.describe_launch_template_versions( LaunchTemplateId=template_id)['LaunchTemplateVersions'] except (ClientError, BotoCoreError, WaiterError) as e: module.fail_json_jctanner.cloud_amazon.aws( e, msg='Could not find launch template versions for {0} (ID: {1}).' .format(template['LaunchTemplateName'], template_id))
def main(): argument_spec = dict(db_instance_identifier=dict(aliases=['id']), filters=dict(type='dict')) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, ) if module._name == 'rds_instance_facts': module.deprecate( "The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", version='2.13') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) module.exit_json(**instance_info(module, conn))
def create_connection(client, location, bandwidth, name, lag_id): if not name: raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.") params = { 'location': location, 'bandwidth': bandwidth, 'connectionName': name, } if lag_id: params['lagId'] = lag_id try: connection = AWSRetry.backoff(**retry_params)(client.create_connection)(**params) except (BotoCoreError, ClientError) as e: raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name), last_traceback=traceback.format_exc(), exception=e) return connection['connectionId']
def determine_iam_role(module, name_or_arn): if re.match( r'^arn:jctanner.cloud_amazon.aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): return name_or_arn iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, jctanner.cloud_amazon.aws_retry=True) return {'arn': role['InstanceProfile']['Arn']} except is_boto3_error_code('NoSuchEntity') as e: module.fail_json_jctanner.cloud_amazon.aws( e, msg="Could not find instance_role {0}".format(name_or_arn)) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_jctanner.cloud_amazon.aws( e, msg= "An error occurred while searching for instance_role {0}. Please try supplying the full ARN." .format(name_or_arn))
def get_vpc(module, connection, vpc_id): # wait for vpc to be available try: connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id)) try: vpc_obj = AWSRetry.backoff( delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'], )(connection.describe_vpcs)(VpcIds=[vpc_id])['Vpcs'][0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, msg="Failed to describe VPCs") try: vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, msg="Failed to describe VPCs") return vpc_obj
def list_elbs(self): elb_array, token = [], None get_elb_with_backoff = AWSRetry.backoff(tries=5, delay=5, backoff=2.0)(self.connection.get_all_load_balancers) while True: all_elbs = get_elb_with_backoff(marker=token) token = all_elbs.next_marker if all_elbs: if self.names: for existing_lb in all_elbs: if existing_lb.name in self.names: elb_array.append(existing_lb) else: elb_array.extend(all_elbs) else: break if token is None: break return list(map(self._get_elb_info, elb_array))
def describe_stack_tree(module, stack_set_name, operation_ids=None): cfn = module.client('cloudformation', retry_decorator=AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5)) result = dict() result['stack_set'] = camel_dict_to_snake_dict( cfn.describe_stack_set( StackSetName=stack_set_name, jctanner.cloud_amazon.aws_retry=True, )['StackSet']) result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict( result['stack_set']['tags']) result['operations_log'] = sorted(camel_dict_to_snake_dict( cfn.list_stack_set_operations( StackSetName=stack_set_name, jctanner.cloud_amazon.aws_retry=True, ))['summaries'], key=lambda x: x['creation_timestamp']) result['stack_instances'] = sorted( [ camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances( StackSetName=stack_set_name)['Summaries'] ], key=lambda i: i['region'] + i['account']) if operation_ids: result['operations'] = [] for op_id in operation_ids: try: result['operations'].append( camel_dict_to_snake_dict( cfn.describe_stack_set_operation( StackSetName=stack_set_name, OperationId=op_id, )['StackSetOperation'])) except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except pass return result
def connection_exists(client, connection_id=None, connection_name=None, verify=True): params = {} if connection_id: params['connectionId'] = connection_id try: response = AWSRetry.backoff(**retry_params)(client.describe_connections)(**params) except (BotoCoreError, ClientError) as e: if connection_id: msg = "Failed to describe DirectConnect ID {0}".format(connection_id) else: msg = "Failed to describe DirectConnect connections" raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) match = [] connection = [] # look for matching connections if len(response.get('connections', [])) == 1 and connection_id: if response['connections'][0]['connectionState'] != 'deleted': match.append(response['connections'][0]['connectionId']) connection.extend(response['connections']) for conn in response.get('connections', []): if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted': match.append(conn['connectionId']) connection.append(conn) # verifying if the connections exists; if true, return connection identifier, otherwise return False if verify and len(match) == 1: return match[0] elif verify: return False # not verifying if the connection exists; just return current connection info elif len(connection) == 1: return {'connection': connection[0]} return {'connection': {}}
def main(): argument_spec = dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), active=dict(type='bool'), force=dict(type='bool', default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params.get('state') # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) if state == 'absent': remove_rule_set(client, module) else: create_or_update_rule_set(client, module)
def main(): module = AnsibleAWSModule( argument_spec={ "identity": dict(required=True, type='str'), "state": dict(default='present', choices=['present', 'absent']), "bounce_notifications": dict(type='dict'), "complaint_notifications": dict(type='dict'), "delivery_notifications": dict(type='dict'), "feedback_forwarding": dict(default=True, type='bool'), }, supports_check_mode=True, ) for notification_type in ('bounce', 'complaint', 'delivery'): param_name = notification_type + '_notifications' arg_dict = module.params.get(param_name) if arg_dict: extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')] if extra_keys: module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers') # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': region = get_jctanner.cloud_amazon.aws_connection_info(module, boto3=True)[0] account_id = get_account_id(module) validate_params_for_identity_present(module) create_or_update_identity(connection, module, region, account_id) else: destroy_identity(connection, module)
def run_func(*args, **kwargs): try: result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=['DirectConnectClientException'])(f)(*args, **kwargs) except (ClientError, BotoCoreError) as e: raise DirectConnectError(failure_msg, traceback.format_exc(), e) return result
def create_or_update(module, template_options): jctanner.cloud_amazon.ec2 = module.client( 'jctanner.cloud_amazon.ec2', retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) out = {} lt_data = params_to_launch_data( module, dict( (k, v) for k, v in module.params.items() if k in template_options)) if not (template or template_versions): # create a full new one try: resp = jctanner.cloud_amazon.ec2.create_launch_template( LaunchTemplateName=module.params['template_name'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, jctanner.cloud_amazon.aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws( e, msg="Couldn't create launch template") template, template_versions = existing_templates(module) out['changed'] = True elif template and template_versions: most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1] if lt_data == most_recent['LaunchTemplateData']: out['changed'] = False return out try: resp = jctanner.cloud_amazon.ec2.create_launch_template_version( LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, jctanner.cloud_amazon.aws_retry=True, ) if module.params.get('default_version') in (None, ''): # no need to do anything, leave the existing version as default pass elif module.params.get('default_version') == 'latest': set_default = jctanner.cloud_amazon.ec2.modify_launch_template( LaunchTemplateId=template['LaunchTemplateId'], DefaultVersion=to_text( resp['LaunchTemplateVersion']['VersionNumber']), ClientToken=uuid4().hex, jctanner.cloud_amazon.aws_retry=True, ) else: try: int(module.params.get('default_version')) except ValueError: module.fail_json( msg= 'default_version param was not a valid integer, got "{0}"' .format(module.params.get('default_version'))) set_default = jctanner.cloud_amazon.ec2.modify_launch_template( LaunchTemplateId=template['LaunchTemplateId'], DefaultVersion=to_text( int(module.params.get('default_version'))), ClientToken=uuid4().hex, jctanner.cloud_amazon.aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws( e, msg="Couldn't create subsequent launch template version") template, template_versions = existing_templates(module) out['changed'] = True return out
def main(): argument_spec = dict( name=dict(required=True), cidr_block=dict(type='list', required=True), tenancy=dict(choices=['default', 'dedicated'], default='default'), dns_support=dict(type='bool', default=True), dns_hostnames=dict(type='bool', default=True), dhcp_opts_id=dict(), tags=dict(type='dict', aliases=['resource_tags']), state=dict(choices=['present', 'absent'], default='present'), multi_ok=dict(type='bool', default=False), purge_cidrs=dict(type='bool', default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True ) name = module.params.get('name') cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block')) purge_cidrs = module.params.get('purge_cidrs') tenancy = module.params.get('tenancy') dns_support = module.params.get('dns_support') dns_hostnames = module.params.get('dns_hostnames') dhcp_id = module.params.get('dhcp_opts_id') tags = module.params.get('tags') state = module.params.get('state') multi = module.params.get('multi_ok') changed = False connection = module.client( 'jctanner.cloud_amazon.ec2', retry_decorator=AWSRetry.jittered_backoff( retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound'] ) ) if dns_hostnames and not dns_support: module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support') if state == 'present': # Check if VPC exists vpc_id = vpc_exists(module, connection, name, cidr_block, multi) if vpc_id is None: vpc_id = create_vpc(connection, module, cidr_block[0], tenancy) changed = True vpc_obj = get_vpc(module, connection, vpc_id) associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', []) if cidr['CidrBlockState']['State'] != 'disassociated') to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs] to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block] expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add if len(cidr_block) > 1: for cidr in to_add: changed = True connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id) if purge_cidrs: for association_id in to_remove: changed = True try: connection.disassociate_vpc_cidr_block(AssociationId=association_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that " "are associated with the CIDR block before you can disassociate it.".format(association_id)) if dhcp_id is not None: try: if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, "Failed to update DHCP options") if tags is not None or name is not None: try: if update_vpc_tags(connection, module, vpc_id, tags, name): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, msg="Failed to update tags") current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, jctanner.cloud_amazon.aws_retry=True)['EnableDnsSupport']['Value'] current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, jctanner.cloud_amazon.aws_retry=True)['EnableDnsHostnames']['Value'] if current_dns_enabled != dns_support: changed = True if not module.check_mode: try: connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, "Failed to update enabled dns support attribute") if current_dns_hostnames != dns_hostnames: changed = True if not module.check_mode: try: connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, "Failed to update enabled dns hostnames attribute") # wait for associated cidrs to match if to_add or to_remove: try: connection.get_waiter('vpc_available').wait( VpcIds=[vpc_id], Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}] ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, "Failed to wait for CIDRs to update") # try to wait for enableDnsSupport and enableDnsHostnames to match wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support) wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames) final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id)) final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', [])) final_state['id'] = final_state.pop('vpc_id') module.exit_json(changed=changed, vpc=final_state) elif state == 'absent': # Check if VPC exists vpc_id = vpc_exists(module, connection, name, cidr_block, multi) if vpc_id is not None: try: if not module.check_mode: connection.delete_vpc(VpcId=vpc_id) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_jctanner.cloud_amazon.aws(e, msg="Failed to delete VPC {0} You may want to use the jctanner.cloud_amazon.ec2_vpc_subnet, jctanner.cloud_amazon.ec2_vpc_igw, " "and/or jctanner.cloud_amazon.ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id)) module.exit_json(changed=changed, vpc={})
def main(): argument_spec = dict( name=dict(required=True), description=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=900), state=dict(default='present', choices=['present', 'absent']), purge_stacks=dict(type='bool', default=True), parameters=dict(type='dict', default={}), template=dict(type='path'), template_url=dict(), template_body=dict(), capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), regions=dict(type='list'), accounts=dict(type='list'), failure_tolerance=dict( type='dict', default={}, options=dict( fail_count=dict(type='int'), fail_percentage=dict(type='int'), parallel_percentage=dict(type='int'), parallel_count=dict(type='int'), ), mutually_exclusive=[ ['fail_count', 'fail_percentage'], ['parallel_count', 'parallel_percentage'], ], ), administration_role_arn=dict( aliases=['admin_role_arn', 'administration_role', 'admin_role']), execution_role_name=dict( aliases=['execution_role', 'exec_role', 'exec_role_name']), tags=dict(type='dict'), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template', 'template_body']], supports_check_mode=True) if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')): module.fail_json( msg= "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26" ) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes cfn = module.client('cloudformation', retry_decorator=AWSRetry.jittered_backoff( retries=10, delay=3, max_delay=30)) existing_stack_set = stack_set_facts(cfn, module.params['name']) operation_uuid = to_native(uuid.uuid4()) operation_ids = [] # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = {} state = module.params['state'] if state == 'present' and not module.params['accounts']: module.fail_json( msg= "Can't create a stack set without choosing at least one account. " "To get the ID of the current account, use the jctanner.cloud_amazon.aws_caller_info module." ) module.params['accounts'] = [ to_native(a) for a in module.params['accounts'] ] stack_params['StackSetName'] = module.params['name'] if module.params.get('description'): stack_params['Description'] = module.params['description'] if module.params.get('capabilities'): stack_params['Capabilities'] = module.params['capabilities'] if module.params['template'] is not None: with open(module.params['template'], 'r') as tpl: stack_params['TemplateBody'] = tpl.read() elif module.params['template_body'] is not None: stack_params['TemplateBody'] = module.params['template_body'] elif module.params['template_url'] is not None: stack_params['TemplateURL'] = module.params['template_url'] else: # no template is provided, but if the stack set exists already, we can use the existing one. if existing_stack_set: stack_params['UsePreviousTemplate'] = True else: module.fail_json( msg= "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " "`template_body`, or `template_url`".format( module.params['name'])) stack_params['Parameters'] = [] for k, v in module.params['parameters'].items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) if 'value' in v: param['ParameterValue'] = to_native(v['value']) if 'use_previous_value' in v and bool(v['use_previous_value']): param['UsePreviousValue'] = True param.pop('ParameterValue', None) stack_params['Parameters'].append(param) else: # allow default k/v configuration to set a template parameter stack_params['Parameters'].append({ 'ParameterKey': k, 'ParameterValue': str(v) }) if module.params.get('tags') and isinstance(module.params.get('tags'), dict): stack_params['Tags'] = ansible_dict_to_boto3_tag_list( module.params['tags']) if module.params.get('administration_role_arn'): # TODO loosen the semantics here to autodetect the account ID and build the ARN stack_params['AdministrationRoleARN'] = module.params[ 'administration_role_arn'] if module.params.get('execution_role_name'): stack_params['ExecutionRoleName'] = module.params[ 'execution_role_name'] result = {} if module.check_mode: if state == 'absent' and existing_stack_set: module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) elif state == 'absent' and not existing_stack_set: module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) elif state == 'present' and not existing_stack_set: module.exit_json(changed=True, msg='New stack set would be created', meta=[]) elif state == 'present' and existing_stack_set: new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( cfn, module.params['name'], module.params['accounts'], module.params['regions'], ) if new_stacks: module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[]) elif unspecified_stacks and module.params.get( 'purge_stack_instances'): module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[]) else: # TODO: need to check the template and other settings for correct check mode module.exit_json(changed=False, msg='No changes detected', meta=[]) changed = False if state == 'present': if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log stack_params[ 'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format( operation_uuid) changed = True create_stack_set(module, stack_params, cfn) else: stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format( operation_uuid) operation_ids.append(stack_params['OperationId']) if module.params.get('regions'): stack_params[ 'OperationPreferences'] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn, module.params['name'], module.params['accounts'], module.params['regions'], ) if new_stack_instances: operation_ids.append( 'Ansible-StackInstance-Create-{0}'.format(operation_uuid)) changed = True cfn.create_stack_instances( StackSetName=module.params['name'], Accounts=list(set(acct for acct, region in new_stack_instances)), Regions=list( set(region for acct, region in new_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) else: operation_ids.append( 'Ansible-StackInstance-Update-{0}'.format(operation_uuid)) cfn.update_stack_instances( StackSetName=module.params['name'], Accounts=list( set(acct for acct, region in existing_stack_instances)), Regions=list( set(region for acct, region in existing_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) for op in operation_ids: await_stack_set_operation( module, cfn, operation_id=op, stack_set_name=module.params['name'], max_wait=module.params.get('wait_timeout'), ) elif state == 'absent': if not existing_stack_set: module.exit_json(msg='Stack set {0} does not exist'.format( module.params['name'])) if module.params.get('purge_stack_instances') is False: pass try: cfn.delete_stack_set(StackSetName=module.params['name'], ) module.exit_json( msg='Stack set {0} deleted'.format(module.params['name'])) except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except module.fail_json_jctanner.cloud_amazon.aws( e, msg= 'Cannot delete stack {0} while there is an operation in progress' .format(module.params['name'])) except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format( operation_uuid) cfn.delete_stack_instances( StackSetName=module.params['name'], Accounts=module.params['accounts'], Regions=module.params['regions'], RetainStacks=(not module.params.get('purge_stacks')), OperationId=delete_instances_op) await_stack_set_operation( module, cfn, operation_id=delete_instances_op, stack_set_name=stack_params['StackSetName'], max_wait=module.params.get('wait_timeout'), ) try: cfn.delete_stack_set(StackSetName=module.params['name'], ) except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except # this time, it is likely that either the delete failed or there are more stacks. instances = cfn.list_stack_instances( StackSetName=module.params['name'], ) stack_states = ', '.join( '(account={Account}, region={Region}, state={Status})'. format(**i) for i in instances['Summaries']) module.fail_json_jctanner.cloud_amazon.aws( exc, msg= 'Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states) module.exit_json(changed=True, msg='Stack set {0} deleted'.format( module.params['name'])) result.update(**describe_stack_tree( module, stack_params['StackSetName'], operation_ids=operation_ids)) if any(o['status'] == 'FAILED' for o in result['operations']): module.fail_json(msg="One or more operations failed to execute", **result) module.exit_json(changed=changed, **result)
def get_role_with_backoff(connection, module, name): try: return AWSRetry.jittered_backoff(catch_extra_error_codes=['NoSuchEntity'])(connection.get_role)(RoleName=name)['Role'] except (BotoCoreError, ClientError) as e: module.fail_json(msg="Unable to get role {0}: {1}".format(name, to_native(e)), exception=traceback.format_exc())
def main(): argument_spec = ansible_collections.jctanner.cloud_amazon.plugins.module_utils.jctanner.cloud_amazon.ec2.jctanner.cloud_amazon.ec2_argument_spec( ) argument_spec.update( dict(stack_name=dict(required=True), template_parameters=dict(required=False, type='dict', default={}), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=False, type='path'), notification_arns=dict(default=None, required=False), stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), on_create_failure=dict( default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), create_timeout=dict(default=None, type='int'), template_url=dict(default=None, required=False), template_body=dict(default=None, require=False), template_format=dict(default=None, choices=['json', 'yaml'], required=False), create_changeset=dict(default=False, type='bool'), changeset_name=dict(default=None, required=False), role_arn=dict(default=None, required=False), tags=dict(default=None, type='dict'), termination_protection=dict(default=None, type='bool'), events_limit=dict(default=200, type='int'), backoff_retries=dict(type='int', default=10, required=False), backoff_delay=dict(type='int', default=3, required=False), backoff_max_delay=dict(type='int', default=30, required=False), capabilities=dict( type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']))) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template', 'template_body']], supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore are required for this module') invalid_capabilities = [] user_capabilities = module.params.get('capabilities') for user_cap in user_capabilities: if user_cap not in [ 'CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND' ]: invalid_capabilities.append(user_cap) if invalid_capabilities: module.fail_json(msg="Specified capabilities are invalid : %r," " please check documentation for valid capabilities" % invalid_capabilities) # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = { 'Capabilities': user_capabilities, 'ClientRequestToken': to_native(uuid.uuid4()), } state = module.params['state'] stack_params['StackName'] = module.params['stack_name'] if module.params['template'] is not None: with open(module.params['template'], 'r') as template_fh: stack_params['TemplateBody'] = template_fh.read() elif module.params['template_body'] is not None: stack_params['TemplateBody'] = module.params['template_body'] elif module.params['template_url'] is not None: stack_params['TemplateURL'] = module.params['template_url'] if module.params.get('notification_arns'): stack_params['NotificationARNs'] = module.params[ 'notification_arns'].split(',') else: stack_params['NotificationARNs'] = [] # can't check the policy when verifying. if module.params[ 'stack_policy'] is not None and not module.check_mode and not module.params[ 'create_changeset']: with open(module.params['stack_policy'], 'r') as stack_policy_fh: stack_params['StackPolicyBody'] = stack_policy_fh.read() template_parameters = module.params['template_parameters'] stack_params['Parameters'] = [] for k, v in template_parameters.items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) if 'value' in v: param['ParameterValue'] = str(v['value']) if 'use_previous_value' in v and bool(v['use_previous_value']): param['UsePreviousValue'] = True param.pop('ParameterValue', None) stack_params['Parameters'].append(param) else: # allow default k/v configuration to set a template parameter stack_params['Parameters'].append({ 'ParameterKey': k, 'ParameterValue': str(v) }) if isinstance(module.params.get('tags'), dict): stack_params[ 'Tags'] = ansible_collections.jctanner.cloud_amazon.plugins.module_utils.jctanner.cloud_amazon.ec2.ansible_dict_to_boto3_tag_list( module.params['tags']) if module.params.get('role_arn'): stack_params['RoleARN'] = module.params['role_arn'] result = {} try: region, jctanner.cloud_amazon.ec2_url, jctanner.cloud_amazon.aws_connect_kwargs = ansible_collections.jctanner.cloud_amazon.plugins.module_utils.jctanner.cloud_amazon.ec2.get_jctanner.cloud_amazon.aws_connection_info( module, boto3=True) cfn = ansible_collections.jctanner.cloud_amazon.plugins.module_utils.jctanner.cloud_amazon.ec2.boto3_conn( module, conn_type='client', resource='cloudformation', region=region, endpoint=jctanner.cloud_amazon.ec2_url, **jctanner.cloud_amazon.aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg=boto_exception(e)) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes backoff_wrapper = AWSRetry.jittered_backoff( retries=module.params.get('backoff_retries'), delay=module.params.get('backoff_delay'), max_delay=module.params.get('backoff_max_delay')) cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events) cfn.create_stack = backoff_wrapper(cfn.create_stack) cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets) cfn.create_change_set = backoff_wrapper(cfn.create_change_set) cfn.update_stack = backoff_wrapper(cfn.update_stack) cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks) cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources) cfn.delete_stack = backoff_wrapper(cfn.delete_stack) if boto_supports_termination_protection(cfn): cfn.update_termination_protection = backoff_wrapper( cfn.update_termination_protection) stack_info = get_stack_facts(cfn, stack_params['StackName']) if module.check_mode: if state == 'absent' and stack_info: module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) elif state == 'absent' and not stack_info: module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) elif state == 'present' and not stack_info: module.exit_json(changed=True, msg='New stack would be created', meta=[]) else: module.exit_json(**check_mode_changeset(module, stack_params, cfn)) if state == 'present': if not stack_info: result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) elif module.params.get('create_changeset'): result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) else: if module.params.get('termination_protection') is not None: update_termination_protection( module, cfn, stack_params['StackName'], bool(module.params.get('termination_protection'))) result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) # format the stack output stack = get_stack_facts(cfn, stack_params['StackName']) if stack is not None: if result.get('stack_outputs') is None: # always define stack_outputs, but it may be empty result['stack_outputs'] = {} for output in stack.get('Outputs', []): result['stack_outputs'][ output['OutputKey']] = output['OutputValue'] stack_resources = [] reslist = cfn.list_stack_resources( StackName=stack_params['StackName']) for res in reslist.get('StackResourceSummaries', []): stack_resources.append({ "logical_resource_id": res['LogicalResourceId'], "physical_resource_id": res.get('PhysicalResourceId', ''), "resource_type": res['ResourceType'], "last_updated_time": res['LastUpdatedTimestamp'], "status": res['ResourceStatus'], "status_reason": res.get('ResourceStatusReason') # can be blank, apparently }) result['stack_resources'] = stack_resources elif state == 'absent': # absent state is different because of the way delete_stack works. # problem is it it doesn't give an error if stack isn't found # so must describe the stack first try: stack = get_stack_facts(cfn, stack_params['StackName']) if not stack: result = {'changed': False, 'output': 'Stack not found.'} else: if stack_params.get('RoleARN') is None: cfn.delete_stack(StackName=stack_params['StackName']) else: cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) result = stack_operation( cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), stack_params.get('ClientRequestToken', None)) except Exception as err: module.fail_json(msg=boto_exception(err), exception=traceback.format_exc()) if module.params['template_format'] is not None: result['warnings'] = [ ('Argument `template_format` is deprecated ' 'since Ansible 2.3, JSON and YAML templates are now passed ' 'directly to the CloudFormation API.') ] module.exit_json(**result)
def main(): module = AnsibleAWSModule( argument_spec={ 'name': dict(type='str', required=True), 'state': dict(type='str', choices=['present', 'absent'], default='present'), 'description': dict(type='str'), 'scope': dict(type='dict'), 'source': dict(type='dict', required=True), 'input_parameters': dict(type='str'), 'execution_frequency': dict(type='str', choices=[ 'One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours' ]), }, supports_check_mode=False, ) result = {'changed': False} name = module.params.get('name') resource_type = module.params.get('resource_type') state = module.params.get('state') params = {} if name: params['ConfigRuleName'] = name if module.params.get('description'): params['Description'] = module.params.get('description') if module.params.get('scope'): params['Scope'] = {} if module.params.get('scope').get('compliance_types'): params['Scope'].update({ 'ComplianceResourceTypes': module.params.get('scope').get('compliance_types') }) if module.params.get('scope').get('tag_key'): params['Scope'].update( {'TagKey': module.params.get('scope').get('tag_key')}) if module.params.get('scope').get('tag_value'): params['Scope'].update( {'TagValue': module.params.get('scope').get('tag_value')}) if module.params.get('scope').get('compliance_id'): params['Scope'].update({ 'ComplianceResourceId': module.params.get('scope').get('compliance_id') }) if module.params.get('source'): params['Source'] = {} if module.params.get('source').get('owner'): params['Source'].update( {'Owner': module.params.get('source').get('owner')}) if module.params.get('source').get('identifier'): params['Source'].update({ 'SourceIdentifier': module.params.get('source').get('identifier') }) if module.params.get('source').get('details'): params['Source'].update( {'SourceDetails': module.params.get('source').get('details')}) if module.params.get('input_parameters'): params['InputParameters'] = module.params.get('input_parameters') if module.params.get('execution_frequency'): params['MaximumExecutionFrequency'] = module.params.get( 'execution_frequency') params['ConfigRuleState'] = 'ACTIVE' client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) existing_rule = rule_exists(client, module, params) if state == 'present': if not existing_rule: create_resource(client, module, params, result) else: update_resource(client, module, params, result) if state == 'absent': if existing_rule: delete_resource(client, module, params, result) module.exit_json(**result)