def delete(self): try: AWSRetry.jittered_backoff()( self.connection.delete_listener)(ListenerArn=self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def delete(self): """ Delete elb :return: """ try: AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( LoadBalancerArn=self.elb['LoadBalancerArn']) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def delete(self): """ Delete a listener rule :return: """ try: AWSRetry.jittered_backoff()( self.connection.delete_rule)(RuleArn=self.rule['RuleArn']) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def modify_security_groups(self): """ Modify elb security groups to match module parameters :return: """ try: AWSRetry.jittered_backoff()(self.connection.set_security_groups)( LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def modify_tags(self): """ Modify elb tags :return: """ try: AWSRetry.jittered_backoff()(self.connection.add_tags)( ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def delete_tags(self, tags_to_delete): """ Delete elb tags :return: """ try: AWSRetry.jittered_backoff()(self.connection.remove_tags)( ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True
def modify(self): try: # Rules is not a valid parameter for modify_listener if 'Rules' in self.listener: self.listener.pop('Rules') AWSRetry.jittered_backoff()( self.connection.modify_listener)(**self.listener) except (BotoCoreError, ClientError) as e: if '"Order", must be one of: Type, TargetGroupArn' in str(e): self.module.fail_json( msg="installed version of botocore does not support " "multiple actions, please upgrade botocore to version " "1.10.30 or higher") else: self.module.fail_json_aws(e)
def get_elb_listener(connection, module, elb_arn, listener_port): """ Get an ELB listener based on the port provided. If not found, return None. :param connection: AWS boto3 elbv2 connection :param module: Ansible module :param elb_arn: ARN of the ELB to look at :param listener_port: Port of the listener to look for :return: boto3 ELB listener dict or None if not found """ try: listener_paginator = connection.get_paginator('describe_listeners') listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners'] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) l = None for listener in listeners: if listener['Port'] == listener_port: l = listener break return l
def main(): argument_spec = dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), active=dict(type='bool'), force=dict(type='bool', default=False), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params.get('state') # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. client = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) if state == 'absent': remove_rule_set(client, module) else: create_or_update_rule_set(client, module)
def create_elb(self): """ Create a load balancer :return: """ # Required parameters params = dict() params['Name'] = self.name params['Type'] = self.type # Other parameters if self.subnets is not None: params['Subnets'] = self.subnets if self.subnet_mappings is not None: params['SubnetMappings'] = self.subnet_mappings params['Scheme'] = self.scheme if self.tags: params['Tags'] = self.tags try: self.elb = AWSRetry.jittered_backoff()( self.connection.create_load_balancer)( **params)['LoadBalancers'][0] self.changed = True self.new_load_balancer = True except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) if self.wait: self.wait_for_status(self.elb['LoadBalancerArn'])
def main(): argument_spec = dict( db_snapshot_identifier=dict(aliases=['snapshot_name']), db_instance_identifier=dict(), db_cluster_identifier=dict(), db_cluster_snapshot_identifier=dict(), snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public']) ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] ) if module._name == 'rds_snapshot_facts': module.deprecate("The 'rds_snapshot_facts' module has been renamed to 'rds_snapshot_info'", version='2.13') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) results = dict() if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']: results['snapshots'] = standalone_snapshot_info(module, conn) if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']: results['cluster_snapshots'] = cluster_snapshot_info(module, conn) module.exit_json(changed=False, **results)
def main(): module = AnsibleAWSModule( argument_spec={ 'identity': dict(required=True, type='str'), 'state': dict(default='present', choices=['present', 'absent']), 'policy_name': dict(required=True, type='str'), 'policy': dict(type='json', default=None), }, required_if=[['state', 'present', ['policy']]], supports_check_mode=True, ) # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': create_or_update_identity_policy(connection, module) else: delete_identity_policy(connection, module)
def main(): module = AnsibleAWSModule( argument_spec={ 'state': dict(type='str', choices=['present', 'absent'], default='present'), 'authorized_account_id': dict(type='str', required=True), 'authorized_aws_region': dict(type='str', required=True), }, supports_check_mode=False, ) result = {'changed': False} params = { 'AuthorizedAccountId': module.params.get('authorized_account_id'), 'AuthorizedAwsRegion': module.params.get('authorized_aws_region'), } client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) if module.params.get('state') == 'present': if not resource_status: create_resource(client, module, params, result) else: update_resource(client, module, params, result) if module.params.get('state') == 'absent': if resource_status: delete_resource(client, module, params, result) module.exit_json(changed=result['changed'])
def _get_elb_listener_rules(self): try: return AWSRetry.jittered_backoff()(self.connection.describe_rules)( ListenerArn=self.current_listener['ListenerArn'])['Rules'] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def __init__(self, module, instance_id, get_unused_target_groups): self.module = module try: self.ec2 = self.module.client( "ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't connect to ec2") try: self.elbv2 = self.module.client( "elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Could not connect to elbv2") self.instance_id = instance_id self.get_unused_target_groups = get_unused_target_groups self.tgs = self._get_target_groups()
def wait(client, db_instance_id, waiter_name, extra_retry_codes): retry = AWSRetry.jittered_backoff(catch_extra_error_codes=extra_retry_codes) try: waiter = client.get_waiter(waiter_name) except ValueError: # using a waiter in ansible.module_utils.aws.waiters waiter = get_waiter(client, waiter_name) waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
def main(): module = AnsibleAWSModule( argument_spec={ 'name': dict(type='str', required=True), 'state': dict(type='str', choices=['present', 'absent'], default='present'), 's3_bucket': dict(type='str', required=True), 's3_prefix': dict(type='str'), 'sns_topic_arn': dict(type='str'), 'delivery_frequency': dict(type='str', choices=[ 'One_Hour', 'Three_Hours', 'Six_Hours', 'Twelve_Hours', 'TwentyFour_Hours' ]), }, supports_check_mode=False, ) result = {'changed': False} name = module.params.get('name') state = module.params.get('state') params = {} if name: params['name'] = name if module.params.get('s3_bucket'): params['s3BucketName'] = module.params.get('s3_bucket') if module.params.get('s3_prefix'): params['s3KeyPrefix'] = module.params.get('s3_prefix') if module.params.get('sns_topic_arn'): params['snsTopicARN'] = module.params.get('sns_topic_arn') if module.params.get('delivery_frequency'): params['configSnapshotDeliveryProperties'] = { 'deliveryFrequency': module.params.get('delivery_frequency') } client = module.client('config', retry_decorator=AWSRetry.jittered_backoff()) resource_status = resource_exists(client, module, params) if state == 'present': if not resource_status: create_resource(client, module, params, result) if resource_status: update_resource(client, module, params, result) if state == 'absent': if resource_status: delete_resource(client, module, params, result) module.exit_json(**result)
def modify_elb_attributes(self): """ Update Network ELB attributes if required :return: """ update_attributes = [] if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \ self.elb_attributes['load_balancing_cross_zone_enabled']: update_attributes.append({ 'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower() }) if self.deletion_protection is not None and str( self.deletion_protection).lower( ) != self.elb_attributes['deletion_protection_enabled']: update_attributes.append({ 'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower() }) if update_attributes: try: AWSRetry.jittered_backoff()( self.connection.modify_load_balancer_attributes)( LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes) self.changed = True except (BotoCoreError, ClientError) as e: # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state if self.new_load_balancer: AWSRetry.jittered_backoff()( self.connection.delete_load_balancer)( LoadBalancerArn=self.elb['LoadBalancerArn']) self.module.fail_json_aws(e)
def modify(self): """ Modify a listener rule :return: """ try: del self.rule['Priority'] AWSRetry.jittered_backoff()( self.connection.modify_rule)(**self.rule) except (BotoCoreError, ClientError) as e: if '"Order", must be one of: Type, TargetGroupArn' in str(e): self.module.fail_json( msg="installed version of botocore does not support " "multiple actions, please upgrade botocore to version " "1.10.30 or higher") else: self.module.fail_json_aws(e) self.changed = True
def call_method(client, module, method_name, parameters): result = {} changed = True if not module.check_mode: wait = module.params['wait'] # TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes method = getattr(client, method_name) try: if method_name == 'modify_db_instance': # check if instance is in an available state first, if possible if wait: wait_for_status(client, module, module.params['db_instance_identifier'], method_name) result = AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidDBInstanceState'])(method)(**parameters) else: result = AWSRetry.jittered_backoff()(method)(**parameters) except (BotoCoreError, ClientError) as e: changed = handle_errors(module, e, method_name, parameters) if wait and changed: identifier = get_final_identifier(method_name, module) wait_for_status(client, module, identifier, method_name) return result, changed
def get_elb_tags(self): """ Get load balancer tags :return: """ try: return AWSRetry.jittered_backoff()(self.connection.describe_tags)( ResourceArns=[self.elb['LoadBalancerArn'] ])['TagDescriptions'][0]['Tags'] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def get_elb_listener_rules(connection, module, listener_arn): """ Get rules for a particular ELB listener using the listener ARN. :param connection: AWS boto3 elbv2 connection :param module: Ansible module :param listener_arn: ARN of the ELB listener :return: boto3 ELB rules list """ try: return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules'] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e)
def _get_elb_listeners(self): """ Get ELB listeners :return: """ try: listener_paginator = self.connection.get_paginator( 'describe_listeners') return (AWSRetry.jittered_backoff()(listener_paginator.paginate)( LoadBalancerArn=self.elb_arn).build_full_result())['Listeners'] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e)
def delete_template(module): ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) template, template_versions = existing_templates(module) deleted_versions = [] if template or template_versions: non_default_versions = [ to_text(t['VersionNumber']) for t in template_versions if not t['DefaultVersion'] ] if non_default_versions: try: v_resp = ec2.delete_launch_template_versions( LaunchTemplateId=template['LaunchTemplateId'], Versions=non_default_versions, ) if v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']: module.warn( 'Failed to delete template versions {0} on launch template {1}' .format( v_resp[ 'UnsuccessfullyDeletedLaunchTemplateVersions'], template['LaunchTemplateId'], )) deleted_versions = [ camel_dict_to_snake_dict(v) for v in v_resp['SuccessfullyDeletedLaunchTemplateVersions'] ] except (ClientError, BotoCoreError) as e: module.fail_json_aws( e, msg= "Could not delete existing versions of the launch template {0}" .format(template['LaunchTemplateId'])) try: resp = ec2.delete_launch_template( LaunchTemplateId=template['LaunchTemplateId'], ) except (ClientError, BotoCoreError) as e: module.fail_json_aws( e, msg="Could not delete launch template {0}".format( template['LaunchTemplateId'])) return { 'deleted_versions': deleted_versions, 'deleted_template': camel_dict_to_snake_dict(resp['LaunchTemplate']), 'changed': True, } else: return {'changed': False}
def existing_templates(module): ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) matches = None try: if module.params.get('template_id'): matches = ec2.describe_launch_templates( LaunchTemplateIds=[module.params.get('template_id')]) elif module.params.get('template_name'): matches = ec2.describe_launch_templates( LaunchTemplateNames=[module.params.get('template_name')]) except is_boto3_error_code( 'InvalidLaunchTemplateName.NotFoundException') as e: # no named template was found, return nothing/empty versions return None, [] except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg= 'Launch template with ID {0} is not a valid ID. It should start with `lt-....`' .format(module.params.get('launch_template_id'))) except is_boto3_error_code( 'InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg= 'Launch template with ID {0} could not be found, please supply a name ' 'instead so that a new template can be created'.format( module.params.get('launch_template_id'))) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg= 'Could not check existing launch templates. This may be an IAM permission problem.' ) else: template = matches['LaunchTemplates'][0] template_id, template_version, template_default = template[ 'LaunchTemplateId'], template['LatestVersionNumber'], template[ 'DefaultVersionNumber'] try: return template, ec2.describe_launch_template_versions( LaunchTemplateId=template_id)['LaunchTemplateVersions'] except (ClientError, BotoCoreError, WaiterError) as e: module.fail_json_aws( e, msg='Could not find launch template versions for {0} (ID: {1}).' .format(template['LaunchTemplateName'], template_id))
def main(): argument_spec = dict(db_instance_identifier=dict(aliases=['id']), filters=dict(type='dict')) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, ) if module._name == 'rds_instance_facts': module.deprecate( "The 'rds_instance_facts' module has been renamed to 'rds_instance_info'", version='2.13') conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) module.exit_json(**instance_info(module, conn))
def convert_tg_name_to_arn(connection, module, tg_name): """ Get ARN of a target group using the target group's name :param connection: AWS boto3 elbv2 connection :param module: Ansible module :param tg_name: Name of the target group :return: target group ARN string """ try: response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name]) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) tg_arn = response['TargetGroups'][0]['TargetGroupArn'] return tg_arn
def determine_iam_role(module, name_or_arn): if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): return name_or_arn iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return {'arn': role['InstanceProfile']['Arn']} except is_boto3_error_code('NoSuchEntity') as e: module.fail_json_aws( e, msg="Could not find instance_role {0}".format(name_or_arn)) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg= "An error occurred while searching for instance_role {0}. Please try supplying the full ARN." .format(name_or_arn))
def get_elb_attributes(self): """ Get load balancer attributes :return: """ try: attr_list = AWSRetry.jittered_backoff()( self.connection.describe_load_balancer_attributes)( LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes'] elb_attributes = boto3_tag_list_to_ansible_dict(attr_list) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) # Replace '.' with '_' in attribute key names to make it more Ansibley return dict( (k.replace('.', '_'), v) for k, v in elb_attributes.items())
def main(): module = AnsibleAWSModule( argument_spec={ "identity": dict(required=True, type='str'), "state": dict(default='present', choices=['present', 'absent']), "bounce_notifications": dict(type='dict'), "complaint_notifications": dict(type='dict'), "delivery_notifications": dict(type='dict'), "feedback_forwarding": dict(default=True, type='bool'), }, supports_check_mode=True, ) for notification_type in ('bounce', 'complaint', 'delivery'): param_name = notification_type + '_notifications' arg_dict = module.params.get(param_name) if arg_dict: extra_keys = [ x for x in arg_dict.keys() if x not in ('topic', 'include_headers') ] if extra_keys: module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers') # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get("state") if state == 'present': region = get_aws_connection_info(module, boto3=True)[0] account_id = get_account_id(module) validate_params_for_identity_present(module) create_or_update_identity(connection, module, region, account_id) else: destroy_identity(connection, module)