def existing_templates(module): ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) matches = None try: if module.params.get('template_id'): matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')]) elif module.params.get('template_name'): matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')]) except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e: # no named template was found, return nothing/empty versions return None, [] except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format( module.params.get('launch_template_id'))) except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg='Launch template with ID {0} could not be found, please supply a name ' 'instead so that a new template can be created'.format(module.params.get('launch_template_id'))) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.') else: template = matches['LaunchTemplates'][0] template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber'] try: return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id)['LaunchTemplateVersions'] except (ClientError, BotoCoreError, WaiterError) as e: module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id))
def update_resource(client, module, params, result): current_params = client.describe_delivery_channels( DeliveryChannelNames=[params['name']], aws_retry=True, ) if params != current_params['DeliveryChannels'][0]: try: retry_unavailable_iam_on_put_delivery( client.put_delivery_channel, )(DeliveryChannel=params, ) result['changed'] = True result['channel'] = camel_dict_to_snake_dict( resource_exists(client, module, params)) return result except is_boto3_error_code('InvalidS3KeyPrefixException') as e: module.fail_json_aws( e, msg= "The `s3_prefix` parameter was invalid. Try '/' for no prefix") except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " "Make sure the bucket exists and is available") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg="Couldn't create AWS Config delivery channel")
def update_stack_set(module, stack_params, cfn): # if the state is present and the stack already exists, we try to update it. # AWS will tell us if the stack template and parameters are the same and # don't need to be updated. try: cfn.update_stack_set(**stack_params) except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except module.fail_json_aws( err, msg="Failed to find stack set. Check the name & region.") except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except module.fail_json_aws( err, msg= "One or more stack instances were not found for this stack set. Double check " "the `accounts` and `regions` parameters.") except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except module.fail_json_aws( err, msg= "Another operation is already in progress on this stack set - please try again later. When making " "multiple cloudformation_stack_set calls, it's best to enable `wait: yes` to avoid unfinished op errors." ) except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Could not update stack set.") if module.params.get('wait'): await_stack_set_operation( module, cfn, operation_id=stack_params['OperationId'], stack_set_name=stack_params['StackSetName'], max_wait=module.params.get('wait_timeout'), ) return True
def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wait): wait_start = datetime.datetime.now() operation = None for i in range(max_wait // 15): try: operation = cfn.describe_stack_set_operation( StackSetName=stack_set_name, OperationId=operation_id) if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'): # Stack set has completed operation break except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except pass except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except pass time.sleep(15) if operation and operation['StackSetOperation']['Status'] not in ( 'FAILED', 'STOPPED'): await_stack_instance_completion( module, cfn, stack_set_name=stack_set_name, # subtract however long we waited already max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()), ) elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'): pass else: module.warn( "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation" .format(operation_id, stack_set_name, max_wait))
def common_snapshot_facts(module, conn, method, prefix, params): paginator = conn.get_paginator(method) try: results = paginator.paginate(**params).build_full_result()['%ss' % prefix] except is_boto3_error_code('%sNotFound' % prefix): results = [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "trying to get snapshot information") for snapshot in results: try: snapshot['Tags'] = boto3_tag_list_to_ansible_dict( conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix], aws_retry=True)['TagList']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix]) return [ camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results ]
def find_address(ec2, module, public_ip, device_id, is_instance=True): """ Find an existing Elastic IP address """ filters = [] kwargs = {} if public_ip: kwargs["PublicIps"] = [public_ip] elif device_id: if is_instance: filters.append({"Name": 'instance-id', "Values": [device_id]}) else: filters.append({ 'Name': 'network-interface-id', "Values": [device_id] }) if len(filters) > 0: kwargs["Filters"] = filters elif len(filters) == 0 and public_ip is None: return None try: addresses = ec2.describe_addresses(**kwargs) except is_boto3_error_code('InvalidAddress.NotFound') as e: module.fail_json_aws( e, msg="Couldn't obtain list of existing Elastic IP addresses") addresses = addresses["Addresses"] if len(addresses) == 1: return addresses[0] elif len(addresses) > 1: msg = "Found more than one address using args {0}".format(kwargs) msg += "Addresses found: {0}".format(addresses) module.fail_json_aws(botocore.exceptions.ClientError, msg=msg)
def instance_facts(module, conn): instance_name = module.params.get('db_instance_identifier') filters = module.params.get('filters') params = dict() if instance_name: params['DBInstanceIdentifier'] = instance_name if filters: params['Filters'] = ansible_dict_to_boto3_filter_list(filters) paginator = conn.get_paginator('describe_db_instances') try: results = paginator.paginate(**params).build_full_result()['DBInstances'] except is_boto3_error_code('DBInstanceNotFound'): results = [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "Couldn't get instance information") for instance in results: try: instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'], aws_retry=True)['TagList']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier']) return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results])
def paginated_versions_list(s3_client, **pagination_params): try: pg = s3_client.get_paginator('list_object_versions') for page in pg.paginate(**pagination_params): # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))] except is_boto3_error_code('NoSuchBucket'): yield []
def get_kms_policies(connection, module, key_id): try: policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for policy in policies] except is_boto3_error_code('AccessDeniedException'): return [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key policies")
def peer_status(client, module): params = dict() params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')] try: vpc_peering_connection = client.describe_vpc_peering_connections(**params) return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code'] except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: # pylint: disable=duplicate-except module.fail_json(msg='Malformed connection ID: {0}'.format(e), traceback=traceback.format_exc()) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json(msg='Error while describing peering connection by peering_id: {0}'.format(e), traceback=traceback.format_exc())
def get_final_snapshot(client, module, snapshot_identifier): try: snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier) if len(snapshots.get('DBSnapshots', [])) == 1: return snapshots['DBSnapshots'][0] return {} except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True return {} except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot')
def get_cluster(client, module): name = module.params.get('name') try: return client.describe_cluster(name=name)['cluster'] except is_boto3_error_code('ResourceNotFoundException'): return None except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json(e, msg="Couldn't get cluster %s" % name)
def resource_exists(client, module, params): try: recorder = client.describe_configuration_recorders( ConfigurationRecorderNames=[params['name']]) return recorder['ConfigurationRecorders'][0] except is_boto3_error_code('NoSuchConfigurationRecorderException'): return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def resource_exists(client, module, params): try: channel = client.describe_delivery_channels( DeliveryChannelNames=[params['name']], aws_retry=True, ) return channel['DeliveryChannels'][0] except is_boto3_error_code('NoSuchDeliveryChannelException'): return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def determine_iam_role(module, name_or_arn): if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): return name_or_arn iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return {'arn': role['InstanceProfile']['Arn']} except is_boto3_error_code('NoSuchEntity') as e: module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
def rule_exists(client, module, params): try: rule = client.describe_config_rules( ConfigRuleNames=[params['ConfigRuleName']], aws_retry=True, ) return rule['ConfigRules'][0] except is_boto3_error_code('NoSuchConfigRuleException'): return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def _list_topic_subscriptions(self): try: return self._list_topic_subscriptions_with_backoff() except is_boto3_error_code('AuthorizationError'): try: # potentially AuthorizationError when listing subscriptions for third party topic return [sub for sub in self._list_subscriptions_with_backoff() if sub['TopicArn'] == self.topic_arn] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
def describe_pipeline(client, name, version, module): pipeline = {} try: if version is not None: pipeline = client.get_pipeline(name=name, version=version) return pipeline else: pipeline = client.get_pipeline(name=name) return pipeline except is_boto3_error_code('PipelineNotFoundException'): return pipeline except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def promote_replication_instance(client, module, instance, read_replica): changed = False if read_replica is False: changed = bool(instance.get('ReadReplicaSourceDBInstanceIdentifier') or instance.get('StatusInfos')) if changed: try: call_method(client, module, method_name='promote_read_replica', parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) changed = True except is_boto3_error_code('InvalidDBInstanceState') as e: if 'DB Instance is not a read replica' in e.response['Error']['Message']: pass else: raise e return changed
def _add_tags_for_hosts(connection, hosts, strict): for host in hosts: if 'DBInstanceArn' in host: resource_arn = host['DBInstanceArn'] else: resource_arn = host['DBClusterArn'] try: tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList'] except is_boto3_error_code('AccessDenied') as e: if not strict: tags = [] else: raise e host['Tags'] = tags
def find_existing_policy(module, client): if module.params['policy_id']: try: response = client.get_lifecycle_policy(PolicyId=module.params['policy_id']) except is_boto3_error_code('ResourceNotFoundException'): return None except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to fetch existing policy.') return response['Policy'] query_tags = [] for tag in process_target_tags(module.params['target_tags']): query_tags.append('{Key}={Value}'.format(**tag)) try: response = client.get_lifecycle_policies( ResourceTypes=[module.params['resource_type'].upper()], TargetTags=query_tags, ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to fetch existing policies.') for policy in response['Policies']: try: response = client.get_lifecycle_policy(PolicyId=policy['PolicyId']) except is_boto3_error_code('ResourceNotFoundException'): continue except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to fetch existing policy.') # If it has more tags than we requested it's not a match if len(response['Policy']['PolicyDetails']['TargetTags']) == len(query_tags): return response['Policy'] return None
def wrapper(f, *args, **kwargs): try: results = f(*args, **kwargs) if 'DBInstances' in results: results = results['DBInstances'] else: results = results['DBClusters'] _add_tags_for_hosts(connection, results, strict) except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except if not strict: results = [] else: raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) return results
def get_kms_tags(connection, module, key_id): # Handle pagination here as list_resource_tags does not have # a paginator kwargs = {} tags = [] more = True while more: try: tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) tags.extend(tag_response['Tags']) except is_boto3_error_code('AccessDeniedException'): tag_response = {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key tags") if tag_response.get('NextMarker'): kwargs['Marker'] = tag_response['NextMarker'] else: more = False return tags
def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): to_await = None for i in range(max_wait // 15): try: stack_instances = cfn.list_stack_instances(StackSetName=stack_set_name) to_await = [inst for inst in stack_instances['Summaries'] if inst['Status'] != 'CURRENT'] if not to_await: return stack_instances['Summaries'] except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except # this means the deletion beat us, or the stack set is not yet propagated pass time.sleep(15) module.warn( "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format( stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait ) )
def describe_stack_tree(module, stack_set_name, operation_ids=None): jittered_backoff_decorator = AWSRetry.jittered_backoff( retries=5, delay=3, max_delay=5, catch_extra_error_codes=['StackSetNotFound']) cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) result = dict() result['stack_set'] = camel_dict_to_snake_dict( cfn.describe_stack_set( StackSetName=stack_set_name, aws_retry=True, )['StackSet']) result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict( result['stack_set']['tags']) result['operations_log'] = sorted(camel_dict_to_snake_dict( cfn.list_stack_set_operations( StackSetName=stack_set_name, aws_retry=True, ))['summaries'], key=lambda x: x['creation_timestamp']) result['stack_instances'] = sorted( [ camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances( StackSetName=stack_set_name)['Summaries'] ], key=lambda i: i['region'] + i['account']) if operation_ids: result['operations'] = [] for op_id in operation_ids: try: result['operations'].append( camel_dict_to_snake_dict( cfn.describe_stack_set_operation( StackSetName=stack_set_name, OperationId=op_id, )['StackSetOperation'])) except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except pass return result
def delete_cluster(module, redshift): """ Delete a cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') params = {} for p in ('skip_final_cluster_snapshot', 'final_cluster_snapshot_identifier'): if p in module.params: # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) try: _delete_cluster(redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True)) except is_boto3_error_code('ClusterNotFound'): return (False, {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_deleted') try: waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") return (True, {})
def get_instance(client, module, db_instance_id): try: for i in range(3): try: instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0] instance['Tags'] = get_tags(client, module, instance['DBInstanceArn']) if instance.get('ProcessorFeatures'): instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures']) if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): instance['PendingModifiedValues']['ProcessorFeatures'] = dict( (feature['Name'], feature['Value']) for feature in instance['PendingModifiedValues']['ProcessorFeatures'] ) break except is_boto3_error_code('DBInstanceNotFound'): sleep(3) else: instance = {} except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Failed to describe DB instances') return instance
def create_vgw(client, module): params = dict() params['Type'] = module.params.get('type') if module.params.get('asn'): params['AmazonSideAsn'] = module.params.get('asn') try: response = client.create_vpn_gateway(**params) get_waiter( client, 'vpn_gateway_exists' ).wait( VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']] ) except botocore.exceptions.WaiterError as e: module.fail_json(msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId']), exception=traceback.format_exc()) except is_boto3_error_code('VpnGatewayLimitExceeded'): module.fail_json(msg="Too many VPN gateways exist in this account.", exception=traceback.format_exc()) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json(msg=to_native(e), exception=traceback.format_exc()) result = response return result
def create_cluster(module, redshift): """ Create a new cluster module: AnsibleModule object redshift: authenticated redshift connection object Returns: """ identifier = module.params.get('identifier') node_type = module.params.get('node_type') username = module.params.get('username') password = module.params.get('password') d_b_name = module.params.get('db_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') changed = True # Package up the optional parameters params = {} for p in ('cluster_type', 'cluster_security_groups', 'vpc_security_group_ids', 'cluster_subnet_group_name', 'availability_zone', 'preferred_maintenance_window', 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', 'number_of_nodes', 'publicly_accessible', 'encrypted', 'elastic_ip', 'enhanced_vpc_routing'): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) if d_b_name: params['d_b_name'] = d_b_name try: redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] changed = False except is_boto3_error_code('ClusterNotFound'): try: redshift.create_cluster(ClusterIdentifier=identifier, NodeType=node_type, MasterUsername=username, MasterUserPassword=password, **snake_dict_to_camel_dict( params, capitalize_first=True)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create cluster") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe cluster") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_available') try: waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, msg="Timeout waiting for the cluster creation") try: resource = redshift.describe_clusters( ClusterIdentifier=identifier)['Clusters'][0] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe cluster") return (changed, _collect_facts(resource))
def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): """ Returns tuple of (target_type, target, group_created) after validating rule params. rule: Dict describing a rule. name: Name of the security group being managed. groups: Dict of all available security groups. AWS accepts an ip range or a security group as target of a rule. This function validate the rule specification and return either a non-None group_id or a non-None ip range. """ FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)' group_id = None group_name = None target_group_created = False validate_rule(module, rule) if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): # this is a foreign Security Group. Since you can't fetch it you must create an instance of it owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) groups[group_id] = group_instance groups[group_name] = group_instance # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific if group_id and group_name: group_name = None return 'group', (owner_id, group_id, group_name), False elif 'group_id' in rule: return 'group', rule['group_id'], False elif 'group_name' in rule: group_name = rule['group_name'] if group_name == name: group_id = group['GroupId'] groups[group_id] = group groups[group_name] = group elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): # both are VPC groups, this is ok group_id = groups[group_name]['GroupId'] elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): # both are EC2 classic, this is ok group_id = groups[group_name]['GroupId'] else: auto_group = None filters = {'group-name': group_name} if vpc_id: filters['vpc-id'] = vpc_id # if we got here, either the target group does not exist, or there # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC # is bad, so we have to create a new SG because no compatible group # exists if not rule.get('group_desc', '').strip(): # retry describing the group once try: auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError): module.fail_json(msg="group %s will be automatically created by rule %s but " "no description was provided" % (group_name, rule)) except ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e) elif not module.check_mode: params = dict(GroupName=group_name, Description=rule['group_desc']) if vpc_id: params['VpcId'] = vpc_id try: auto_group = client.create_security_group(**params) get_waiter( client, 'security_group_exists', ).wait( GroupIds=[auto_group['GroupId']], ) except is_boto3_error_code('InvalidGroup.Duplicate'): # The group exists, but didn't show up in any of our describe-security-groups calls # Try searching on a filter for the name, and allow a retry window for AWS to update # the model on their end. try: auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] except IndexError as e: module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) except ClientError as e: module.fail_json_aws( e, msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) if auto_group is not None: group_id = auto_group['GroupId'] groups[group_id] = auto_group groups[group_name] = auto_group target_group_created = True return 'group', group_id, target_group_created elif 'cidr_ip' in rule: return 'ipv4', validate_ip(module, rule['cidr_ip']), False elif 'cidr_ipv6' in rule: return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False elif 'ip_prefix' in rule: return 'ip_prefix', rule['ip_prefix'], False module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule)