def call_method(client, module, method_name, parameters): result = {} changed = True if not module.check_mode: wait = module.params['wait'] # TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes method = getattr(client, method_name) try: if method_name == 'modify_db_instance': # check if instance is in an available state first, if possible if wait: wait_for_status(client, module, module.params['db_instance_identifier'], method_name) result = AWSRetry.jittered_backoff( catch_extra_error_codes=['InvalidDBInstanceState'])( method)(**parameters) else: result = AWSRetry.jittered_backoff()(method)(**parameters) except (BotoCoreError, ClientError) as e: changed = handle_errors(module, e, method_name, parameters) if wait and changed: identifier = get_final_identifier(method_name, module) wait_for_status(client, module, identifier, method_name) return result, changed
def __init__(self, module, instance_id, get_unused_target_groups): self.module = module try: self.ec2 = self.module.client( "ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10) ) except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't connect to ec2" ) try: self.elbv2 = self.module.client( "elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10) ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Could not connect to elbv2" ) self.instance_id = instance_id self.get_unused_target_groups = get_unused_target_groups self.tgs = self._get_target_groups()
def delete(self): try: AWSRetry.jittered_backoff()( self.connection.delete_listener)(ListenerArn=self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def ensure_tags(conn, module, subnet, tags, purge_tags, start_time): changed = False filters = ansible_dict_to_boto3_filter_list({ 'resource-id': subnet['id'], 'resource-type': 'subnet' }) try: cur_tags = conn.describe_tags(Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't describe tags") to_update, to_delete = compare_aws_tags( boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) if to_update: try: if not module.check_mode: AWSRetry.exponential_backoff( catch_extra_error_codes=['InvalidSubnetID.NotFound'])( conn.create_tags)( Resources=[subnet['id']], Tags=ansible_dict_to_boto3_tag_list(to_update)) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create tags") if to_delete: try: if not module.check_mode: tags_list = [] for key in to_delete: tags_list.append({'Key': key}) AWSRetry.exponential_backoff( catch_extra_error_codes=['InvalidSubnetID.NotFound'])( conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete tags") if module.params['wait'] and not module.check_mode: # Wait for tags to be updated filters = [{ 'Name': 'tag:{0}'.format(k), 'Values': [v] } for k, v in tags.items()] handle_waiter(conn, module, 'subnet_exists', { 'SubnetIds': [subnet['id']], 'Filters': filters }, start_time) return changed
def ensure_tags(self, tgw_id, tags, purge_tags): """ Ensures tags are applied to the transit gateway. Optionally will remove any existing tags not in the tags argument if purge_tags is set to true :param tgw_id: The AWS id of the transit gateway :param tags: list of tags to apply to the transit gateway. :param purge_tags: when true existing tags not in tags parms are removed :return: true if tags were updated """ tags_changed = False filters = ansible_dict_to_boto3_filter_list({'resource-id': tgw_id}) try: cur_tags = self._connection.describe_tags(Filters=filters) except (ClientError, BotoCoreError) as e: self._module.fail_json_aws(e, msg="Couldn't describe tags") to_update, to_delete = compare_aws_tags( boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags) if to_update: try: if not self._check_mode: AWSRetry.exponential_backoff()( self._connection.create_tags)( Resources=[tgw_id], Tags=ansible_dict_to_boto3_tag_list(to_update)) self._results['changed'] = True tags_changed = True except (ClientError, BotoCoreError) as e: self._module.fail_json_aws( e, msg="Couldn't create tags {0} for resource {1}".format( ansible_dict_to_boto3_tag_list(to_update), tgw_id)) if to_delete: try: if not self._check_mode: tags_list = [] for key in to_delete: tags_list.append({'Key': key}) AWSRetry.exponential_backoff()( self._connection.delete_tags)(Resources=[tgw_id], Tags=tags_list) self._results['changed'] = True tags_changed = True except (ClientError, BotoCoreError) as e: self._module.fail_json_aws( e, msg="Couldn't delete tags {0} for resource {1}".format( ansible_dict_to_boto3_tag_list(to_delete), tgw_id)) return tags_changed
def delete(self): """ Delete elb :return: """ try: AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( LoadBalancerArn=self.elb['LoadBalancerArn']) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def delete(self): """ Delete a listener rule :return: """ try: AWSRetry.jittered_backoff()( self.connection.delete_rule)(RuleArn=self.rule['RuleArn']) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def modify_security_groups(self): """ Modify elb security groups to match module parameters :return: """ try: AWSRetry.jittered_backoff()(self.connection.set_security_groups)( LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def modify_tags(self): """ Modify elb tags :return: """ try: AWSRetry.jittered_backoff()(self.connection.add_tags)( ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def delete_tags(self, tags_to_delete): """ Delete elb tags :return: """ try: AWSRetry.jittered_backoff()(self.connection.remove_tags)( ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def create_elb(self): """ Create a load balancer :return: """ # Required parameters params = dict() params['Name'] = self.name params['Type'] = self.type # Other parameters if self.subnets is not None: params['Subnets'] = self.subnets if self.subnet_mappings is not None: params['SubnetMappings'] = self.subnet_mappings params['Scheme'] = self.scheme if self.tags: params['Tags'] = self.tags try: self.elb = AWSRetry.jittered_backoff()( self.connection.create_load_balancer)( **params)['LoadBalancers'][0] self.changed = True self.new_load_balancer = True except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) if self.wait: self.wait_for_status(self.elb['LoadBalancerArn'])
def get_vpc(module, connection, vpc_id): # wait for vpc to be available try: connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id)) try: vpc_obj = AWSRetry.backoff( delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'], )(connection.describe_vpcs)(VpcIds=[vpc_id])['Vpcs'][0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe VPCs") try: vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff( connection, vpc_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe VPCs") return vpc_obj
def delete_template(module): ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) deleted_versions = [] if template or template_versions: non_default_versions = [to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion']] if non_default_versions: try: v_resp = ec2.delete_launch_template_versions( LaunchTemplateId=template['LaunchTemplateId'], Versions=non_default_versions, ) if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']: module.warn('Failed to delete template versions {0} on launch template {1}'.format( v_resp['UnsuccessfullyDeletedLaunchTemplateVersions'], template['LaunchTemplateId'], )) deleted_versions = [camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions']] except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not delete existing versions of the launch template {0}".format(template['LaunchTemplateId'])) try: resp = ec2.delete_launch_template( LaunchTemplateId=template['LaunchTemplateId'], ) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template['LaunchTemplateId'])) return { 'deleted_versions': deleted_versions, 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']), 'changed': True, } else: return {'changed': False}
def existing_templates(module): ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) matches = None try: if module.params.get('template_id'): matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')]) elif module.params.get('template_name'): matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')]) except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e: # no named template was found, return nothing/empty versions return None, [] except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format( module.params.get('launch_template_id'))) except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg='Launch template with ID {0} could not be found, please supply a name ' 'instead so that a new template can be created'.format(module.params.get('launch_template_id'))) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.') else: template = matches['LaunchTemplates'][0] template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber'] try: return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id)['LaunchTemplateVersions'] except (ClientError, BotoCoreError, WaiterError) as e: module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id))
def modify(self): try: # Rules is not a valid parameter for modify_listener if 'Rules' in self.listener: self.listener.pop('Rules') AWSRetry.jittered_backoff()( self.connection.modify_listener)(**self.listener) except (BotoCoreError, ClientError) as e: if '"Order", must be one of: Type, TargetGroupArn' in str(e): self.module.fail_json( msg="installed version of botocore does not support " "multiple actions, please upgrade botocore to version " "1.10.30 or higher") else: self.module.fail_json_aws(e)
def main(): argument_spec = dict( db_snapshot_identifier=dict(aliases=['snapshot_name']), db_instance_identifier=dict(), db_cluster_identifier=dict(), db_cluster_snapshot_identifier=dict(), snapshot_type=dict( choices=['automated', 'manual', 'shared', 'public'])) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[[ 'db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier' ]]) if module._name == 'rds_snapshot_facts': module.deprecate( "The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", version='2.13') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) results = dict() if not module.params['db_cluster_identifier'] and not module.params[ 'db_cluster_snapshot_identifier']: results['snapshots'] = standalone_snapshot_info(module, conn) if not module.params['db_snapshot_identifier'] and not module.params[ 'db_instance_identifier']: results['cluster_snapshots'] = cluster_snapshot_info(module, conn) module.exit_json(changed=False, **results)
def get_elb_listener(connection, module, elb_arn, listener_port): """ Get an ELB listener based on the port provided. If not found, return None. :param connection: AWS boto3 elbv2 connection :param module: Ansible module :param elb_arn: ARN of the ELB to look at :param listener_port: Port of the listener to look for :return: boto3 ELB listener dict or None if not found """ try: listener_paginator = connection.get_paginator('describe_listeners') listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners'] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) l = None for listener in listeners: if listener['Port'] == listener_port: l = listener break return l
def main(): argument_spec = dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), active=dict(type='bool'), force=dict(type='bool', default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params.get('state') # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) if state == 'absent': remove_rule_set(client, module) else: create_or_update_rule_set(client, module)
def get_role_with_backoff(connection, module, name): try: return AWSRetry.jittered_backoff( catch_extra_error_codes=['NoSuchEntity'])( connection.get_role)(RoleName=name)['Role'] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to get role {0}".format(name))
def main(): module = AnsibleAWSModule( argument_spec={ 'identity': dict(required=True, type='str'), 'state': dict(default='present', choices=['present', 'absent']), 'policy_name': dict(required=True, type='str'), 'policy': dict(type='json', default=None), }, required_if=[['state', 'present', ['policy']]], supports_check_mode=True, ) # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': create_or_update_identity_policy(connection, module) else: delete_identity_policy(connection, module)
def _get_elb_listener_rules(self): try: return AWSRetry.jittered_backoff()(self.connection.describe_rules)( ListenerArn=self.current_listener['ListenerArn'])['Rules'] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def main(): module = AnsibleAWSModule( argument_spec={ 'name': dict(type='str', required=True), 'state': dict(type='str', choices=['present', 'absent'], default='present'), 's3_bucket': dict(type='str', required=True), 's3_prefix': dict(type='str'), 'sns_topic_arn': dict(type='str'), 'delivery_frequency': dict(type='str', choices=[ 'One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours' ]), }, supports_check_mode=False, ) result = {'changed': False} name = module.params.get('name') state = module.params.get('state') params = {} if name: params['name'] = name if module.params.get('s3_bucket'): params['s3BucketName'] = module.params.get('s3_bucket') if module.params.get('s3_prefix'): params['s3KeyPrefix'] = module.params.get('s3_prefix') if module.params.get('sns_topic_arn'): params['snsTopicARN'] = module.params.get('sns_topic_arn') if module.params.get('delivery_frequency'): params['configSnapshotDeliveryProperties'] = { 'deliveryFrequency': module.params.get('delivery_frequency') } client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) if state == 'present': if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) if state == 'absent': if resource_status: delete_resource(client, module, params, result) module.exit_json(**result)
def create_or_update(module, template_options): ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) out = {} lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) if not (template or template_versions): # create a full new one try: resp = ec2.create_launch_template( LaunchTemplateName=module.params['template_name'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create launch template") template, template_versions = existing_templates(module) out['changed'] = True elif template and template_versions: most_recent = sorted(template_versions, key=lambda x: x['VersionNumber'])[-1] if lt_data == most_recent['LaunchTemplateData']: out['changed'] = False return out try: resp = ec2.create_launch_template_version( LaunchTemplateId=template['LaunchTemplateId'], LaunchTemplateData=lt_data, ClientToken=uuid4().hex, aws_retry=True, ) if module.params.get('default_version') in (None, ''): # no need to do anything, leave the existing version as default pass elif module.params.get('default_version') == 'latest': set_default = ec2.modify_launch_template( LaunchTemplateId=template['LaunchTemplateId'], DefaultVersion=to_text(resp['LaunchTemplateVersion']['VersionNumber']), ClientToken=uuid4().hex, aws_retry=True, ) else: try: int(module.params.get('default_version')) except ValueError: module.fail_json(msg='default_version param was not a valid integer, got "{0}"'.format(module.params.get('default_version'))) set_default = ec2.modify_launch_template( LaunchTemplateId=template['LaunchTemplateId'], DefaultVersion=to_text(int(module.params.get('default_version'))), ClientToken=uuid4().hex, aws_retry=True, ) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create subsequent launch template version") template, template_versions = existing_templates(module) out['changed'] = True return out
def run_func(*args, **kwargs): try: result = AWSRetry.backoff(tries=8, delay=5, catch_extra_error_codes=[ 'DirectConnectClientException' ])(f)(*args, **kwargs) except (ClientError, BotoCoreError) as e: raise DirectConnectError(failure_msg, traceback.format_exc(), e) return result
def determine_iam_role(module, name_or_arn): if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): return name_or_arn iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return {'arn': role['InstanceProfile']['Arn']} except is_boto3_error_code('NoSuchEntity') as e: module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
def modify_elb_attributes(self): """ Update Network ELB attributes if required :return: """ update_attributes = [] if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \ self.elb_attributes['load_balancing_cross_zone_enabled']: update_attributes.append({ 'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower() }) if self.deletion_protection is not None and str( self.deletion_protection).lower( ) != self.elb_attributes['deletion_protection_enabled']: update_attributes.append({ 'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower() }) if update_attributes: try: AWSRetry.jittered_backoff()( self.connection.modify_load_balancer_attributes)( LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes) self.changed = True except (BotoCoreError, ClientError) as e: # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state if self.new_load_balancer: AWSRetry.jittered_backoff()( self.connection.delete_load_balancer)( LoadBalancerArn=self.elb['LoadBalancerArn']) self.module.fail_json_aws(e)
def modify(self): """ Modify a listener rule :return: """ try: del self.rule['Priority'] AWSRetry.jittered_backoff()( self.connection.modify_rule)(**self.rule) except (BotoCoreError, ClientError) as e: if '"Order", must be one of: Type, TargetGroupArn' in str(e): self.module.fail_json( msg="installed version of botocore does not support " "multiple actions, please upgrade botocore to version " "1.10.30 or higher") else: self.module.fail_json_aws(e) self.changed = True
def wait(client, db_instance_id, waiter_name, extra_retry_codes): retry = AWSRetry.jittered_backoff( catch_extra_error_codes=extra_retry_codes) try: waiter = client.get_waiter(waiter_name) except ValueError: # using a waiter in ansible.module_utils.aws.waiters waiter = get_waiter(client, waiter_name) waiter.wait(WaiterConfig={ 'Delay': 60, 'MaxAttempts': 60 }, DBInstanceIdentifier=db_instance_id)
def get_elb_tags(self): """ Get load balancer tags :return: """ try: return AWSRetry.jittered_backoff()(self.connection.describe_tags)( ResourceArns=[self.elb['LoadBalancerArn'] ])['TagDescriptions'][0]['Tags'] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def _get_elb_listeners(self): """ Get ELB listeners :return: """ try: listener_paginator = self.connection.get_paginator( 'describe_listeners') return (AWSRetry.jittered_backoff()(listener_paginator.paginate)( LoadBalancerArn=self.elb_arn).build_full_result())['Listeners'] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)