def existing_templates(module): ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) matches = None try: if module.params.get('template_id'): matches = ec2.describe_launch_templates(LaunchTemplateIds=[module.params.get('template_id')]) elif module.params.get('template_name'): matches = ec2.describe_launch_templates(LaunchTemplateNames=[module.params.get('template_name')]) except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException') as e: # no named template was found, return nothing/empty versions return None, [] except is_boto3_error_code('InvalidLaunchTemplateId.Malformed') as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Launch template with ID {0} is not a valid ID. It should start with `lt-....`'.format( module.params.get('launch_template_id'))) except is_boto3_error_code('InvalidLaunchTemplateId.NotFoundException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg='Launch template with ID {0} could not be found, please supply a name ' 'instead so that a new template can be created'.format(module.params.get('launch_template_id'))) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg='Could not check existing launch templates. This may be an IAM permission problem.') else: template = matches['LaunchTemplates'][0] template_id, template_version, template_default = template['LaunchTemplateId'], template['LatestVersionNumber'], template['DefaultVersionNumber'] try: return template, ec2.describe_launch_template_versions(LaunchTemplateId=template_id)['LaunchTemplateVersions'] except (ClientError, BotoCoreError, WaiterError) as e: module.fail_json_aws(e, msg='Could not find launch template versions for {0} (ID: {1}).'.format(template['LaunchTemplateName'], template_id))
def update_stack_set(module, stack_params, cfn): # if the state is present and the stack already exists, we try to update it. # AWS will tell us if the stack template and parameters are the same and # don't need to be updated. try: cfn.update_stack_set(**stack_params) except is_boto3_error_code('StackSetNotFound') as err: # pylint: disable=duplicate-except module.fail_json_aws( err, msg="Failed to find stack set. Check the name & region.") except is_boto3_error_code('StackInstanceNotFound') as err: # pylint: disable=duplicate-except module.fail_json_aws( err, msg= "One or more stack instances were not found for this stack set. Double check " "the `accounts` and `regions` parameters.") except is_boto3_error_code('OperationInProgressException') as err: # pylint: disable=duplicate-except module.fail_json_aws( err, msg= "Another operation is already in progress on this stack set - please try again later. When making " "multiple cloudformation_stack_set calls, it's best to enable `wait: yes` to avoid unfinished op errors." ) except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Could not update stack set.") if module.params.get('wait'): await_stack_set_operation( module, cfn, operation_id=stack_params['OperationId'], stack_set_name=stack_params['StackSetName'], max_wait=module.params.get('wait_timeout'), ) return True
def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wait): wait_start = datetime.datetime.now() operation = None for i in range(max_wait // 15): try: operation = cfn.describe_stack_set_operation( StackSetName=stack_set_name, OperationId=operation_id) if operation['StackSetOperation']['Status'] not in ('RUNNING', 'STOPPING'): # Stack set has completed operation break except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except pass except is_boto3_error_code('OperationNotFound'): # pylint: disable=duplicate-except pass time.sleep(15) if operation and operation['StackSetOperation']['Status'] not in ( 'FAILED', 'STOPPED'): await_stack_instance_completion( module, cfn, stack_set_name=stack_set_name, # subtract however long we waited already max_wait=int(max_wait - (datetime.datetime.now() - wait_start).total_seconds()), ) elif operation and operation['StackSetOperation']['Status'] in ('FAILED', 'STOPPED'): pass else: module.warn( "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation" .format(operation_id, stack_set_name, max_wait))
def update_resource(client, module, params, result): current_params = client.describe_delivery_channels( DeliveryChannelNames=[params['name']], aws_retry=True, ) if params != current_params['DeliveryChannels'][0]: try: retry_unavailable_iam_on_put_delivery( client.put_delivery_channel, )(DeliveryChannel=params, ) result['changed'] = True result['channel'] = camel_dict_to_snake_dict( resource_exists(client, module, params)) return result except is_boto3_error_code('InvalidS3KeyPrefixException') as e: module.fail_json_aws( e, msg= "The `s3_prefix` parameter was invalid. Try '/' for no prefix") except is_boto3_error_code('InsufficientDeliveryPolicyException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " "Make sure the bucket exists and is available") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg="Couldn't create AWS Config delivery channel")
def common_snapshot_info(module, conn, method, prefix, params): paginator = conn.get_paginator(method) try: results = paginator.paginate(**params).build_full_result()['%ss' % prefix] except is_boto3_error_code('%sNotFound' % prefix): results = [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "trying to get snapshot information") for snapshot in results: try: snapshot['Tags'] = boto3_tag_list_to_ansible_dict( conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix], aws_retry=True)['TagList']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix]) return [ camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results ]
def instance_info(module, conn): instance_name = module.params.get('db_instance_identifier') filters = module.params.get('filters') params = dict() if instance_name: params['DBInstanceIdentifier'] = instance_name if filters: params['Filters'] = ansible_dict_to_boto3_filter_list(filters) paginator = conn.get_paginator('describe_db_instances') try: results = paginator.paginate(**params).build_full_result()['DBInstances'] except is_boto3_error_code('DBInstanceNotFound'): results = [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "Couldn't get instance information") for instance in results: try: instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'], aws_retry=True)['TagList']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier']) return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results])
def find_address(ec2, module, public_ip, device_id, is_instance=True): """ Find an existing Elastic IP address """ filters = [] kwargs = {} if public_ip: kwargs["PublicIps"] = [public_ip] elif device_id: if is_instance: filters.append({"Name": 'instance-id', "Values": [device_id]}) else: filters.append({'Name': 'network-interface-id', "Values": [device_id]}) if len(filters) > 0: kwargs["Filters"] = filters elif len(filters) == 0 and public_ip is None: return None try: addresses = ec2.describe_addresses(**kwargs) except is_boto3_error_code('InvalidAddress.NotFound') as e: # If we're releasing and we can't find it, it's already gone... if module.params.get('state') == 'absent': module.exit_json(changed=False) module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") addresses = addresses["Addresses"] if len(addresses) == 1: return addresses[0] elif len(addresses) > 1: msg = "Found more than one address using args {0}".format(kwargs) msg += "Addresses found: {0}".format(addresses) module.fail_json_aws(botocore.exceptions.ClientError, msg=msg)
def resource_exists(client, module, params): try: recorder = client.describe_configuration_recorders( ConfigurationRecorderNames=[params['name']] ) return recorder['ConfigurationRecorders'][0] except is_boto3_error_code('NoSuchConfigurationRecorderException'): return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def paginated_versions_list(s3_client, **pagination_params): try: pg = s3_client.get_paginator('list_object_versions') for page in pg.paginate(**pagination_params): # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))] except is_boto3_error_code('NoSuchBucket'): yield []
def get_cluster(client, module): name = module.params.get('name') try: return client.describe_cluster(name=name)['cluster'] except is_boto3_error_code('ResourceNotFoundException'): return None except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't get cluster %s" % name)
def determine_iam_role(module, name_or_arn): if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn): return name_or_arn iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return {'arn': role['InstanceProfile']['Arn']} except is_boto3_error_code('NoSuchEntity') as e: module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
def _list_topic_subscriptions(self): try: return self._list_topic_subscriptions_with_backoff() except is_boto3_error_code('AuthorizationError'): try: # potentially AuthorizationError when listing subscriptions for third party topic return [sub for sub in self._list_subscriptions_with_backoff() if sub['TopicArn'] == self.topic_arn] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn)
def rule_exists(client, module, params): try: rule = client.describe_config_rules( ConfigRuleNames=[params['ConfigRuleName']], aws_retry=True, ) return rule['ConfigRules'][0] except is_boto3_error_code('NoSuchConfigRuleException'): return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def resource_exists(client, module, params): try: channel = client.describe_delivery_channels( DeliveryChannelNames=[params['name']], aws_retry=True, ) return channel['DeliveryChannels'][0] except is_boto3_error_code('NoSuchDeliveryChannelException'): return except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def get_kms_policies(connection, module, key_id): try: policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] return [ get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for policy in policies ] except is_boto3_error_code('AccessDeniedException'): return [] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key policies")
def describe_pipeline(client, name, version, module): pipeline = {} try: if version is not None: pipeline = client.get_pipeline(name=name, version=version) return pipeline else: pipeline = client.get_pipeline(name=name) return pipeline except is_boto3_error_code('PipelineNotFoundException'): return pipeline except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e)
def peer_status(client, module): params = dict() params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')] try: vpc_peering_connection = client.describe_vpc_peering_connections( **params) return vpc_peering_connection['VpcPeeringConnections'][0]['Status'][ 'Code'] except is_boto3_error_code('InvalidVpcPeeringConnectionId.Malformed') as e: # pylint: disable=duplicate-except module.fail_json(msg='Malformed connection ID: {0}'.format(e), traceback=traceback.format_exc()) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json( msg='Error while describing peering connection by peering_id: {0}'. format(e), traceback=traceback.format_exc())
def _add_tags_for_hosts(connection, hosts, strict): for host in hosts: if 'DBInstanceArn' in host: resource_arn = host['DBInstanceArn'] else: resource_arn = host['DBClusterArn'] try: tags = connection.list_tags_for_resource( ResourceName=resource_arn)['TagList'] except is_boto3_error_code('AccessDenied') as e: if not strict: tags = [] else: raise e host['Tags'] = tags
def wrapper(f, *args, **kwargs): try: results = f(*args, **kwargs) if 'DBInstances' in results: results = results['DBInstances'] else: results = results['DBClusters'] _add_tags_for_hosts(connection, results, strict) except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except if not strict: results = [] else: raise AnsibleError("Failed to query RDS: {0}".format( to_native(e))) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except raise AnsibleError("Failed to query RDS: {0}".format( to_native(e))) return results
def delete_cluster(module, redshift): """ Delete a cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') params = {} for p in ('skip_final_cluster_snapshot', 'final_cluster_snapshot_identifier'): if p in module.params: # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) try: redshift.delete_cluster(ClusterIdentifier=identifier, **snake_dict_to_camel_dict( params, capitalize_first=True)) except is_boto3_error_code('ClusterNotFound'): return (False, {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_deleted') try: waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") return (True, {})
def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): to_await = None for i in range(max_wait // 15): try: stack_instances = cfn.list_stack_instances( StackSetName=stack_set_name) to_await = [ inst for inst in stack_instances['Summaries'] if inst['Status'] != 'CURRENT' ] if not to_await: return stack_instances['Summaries'] except is_boto3_error_code('StackSetNotFound'): # pylint: disable=duplicate-except # this means the deletion beat us, or the stack set is not yet propagated pass time.sleep(15) module.warn( "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation" .format(stack_set_name, ', '.join(s['StackId'] for s in to_await), max_wait))
def get_kms_tags(connection, module, key_id): # Handle pagination here as list_resource_tags does not have # a paginator kwargs = {} tags = [] more = True while more: try: tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) tags.extend(tag_response['Tags']) except is_boto3_error_code('AccessDeniedException'): tag_response = {} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key tags") if tag_response.get('NextMarker'): kwargs['Marker'] = tag_response['NextMarker'] else: more = False return tags
def describe_stack_tree(module, stack_set_name, operation_ids=None): cfn = module.client('cloudformation', retry_decorator=AWSRetry.jittered_backoff(retries=5, delay=3, max_delay=5)) result = dict() result['stack_set'] = camel_dict_to_snake_dict( cfn.describe_stack_set( StackSetName=stack_set_name, aws_retry=True, )['StackSet']) result['stack_set']['tags'] = boto3_tag_list_to_ansible_dict( result['stack_set']['tags']) result['operations_log'] = sorted(camel_dict_to_snake_dict( cfn.list_stack_set_operations( StackSetName=stack_set_name, aws_retry=True, ))['summaries'], key=lambda x: x['creation_timestamp']) result['stack_instances'] = sorted( [ camel_dict_to_snake_dict(i) for i in cfn.list_stack_instances( StackSetName=stack_set_name)['Summaries'] ], key=lambda i: i['region'] + i['account']) if operation_ids: result['operations'] = [] for op_id in operation_ids: try: result['operations'].append( camel_dict_to_snake_dict( cfn.describe_stack_set_operation( StackSetName=stack_set_name, OperationId=op_id, )['StackSetOperation'])) except is_boto3_error_code('OperationNotFoundException'): # pylint: disable=duplicate-except pass return result
def create_vgw(client, module): params = dict() params['Type'] = module.params.get('type') if module.params.get('asn'): params['AmazonSideAsn'] = module.params.get('asn') try: response = client.create_vpn_gateway(**params) get_waiter( client, 'vpn_gateway_exists' ).wait( VpnGatewayIds=[response['VpnGateway']['VpnGatewayId']] ) except botocore.exceptions.WaiterError as e: module.fail_json(msg="Failed to wait for Vpn Gateway {0} to be available".format(response['VpnGateway']['VpnGatewayId']), exception=traceback.format_exc()) except is_boto3_error_code('VpnGatewayLimitExceeded'): module.fail_json(msg="Too many VPN gateways exist in this account.", exception=traceback.format_exc()) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json(msg=to_native(e), exception=traceback.format_exc()) result = response return result
def describe_eni_with_backoff(ec2, module, device_id): try: return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id]) except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e: module.fail_json_aws(e, msg="Couldn't get list of network interfaces.")
def create_cluster(module, redshift): """ Create a new cluster module: AnsibleModule object redshift: authenticated redshift connection object Returns: """ identifier = module.params.get('identifier') node_type = module.params.get('node_type') username = module.params.get('username') password = module.params.get('password') d_b_name = module.params.get('db_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') changed = True # Package up the optional parameters params = {} for p in ('cluster_type', 'cluster_security_groups', 'vpc_security_group_ids', 'cluster_subnet_group_name', 'availability_zone', 'preferred_maintenance_window', 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', 'number_of_nodes', 'publicly_accessible', 'encrypted', 'elastic_ip', 'enhanced_vpc_routing'): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) if d_b_name: params['d_b_name'] = d_b_name try: redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] changed = False except is_boto3_error_code('ClusterNotFound'): try: redshift.create_cluster(ClusterIdentifier=identifier, NodeType=node_type, MasterUsername=username, MasterUserPassword=password, **snake_dict_to_camel_dict( params, capitalize_first=True)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create cluster") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe cluster") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_available') try: waiter.wait(ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, msg="Timeout waiting for the cluster creation") try: resource = redshift.describe_clusters( ClusterIdentifier=identifier)['Clusters'][0] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe cluster") return (changed, _collect_facts(resource))
def main(): argument_spec = dict( name=dict(required=True), description=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=900), state=dict(default='present', choices=['present', 'absent']), purge_stacks=dict(type='bool', default=True), parameters=dict(type='dict', default={}), template=dict(type='path'), template_url=dict(), template_body=dict(), capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), regions=dict(type='list'), accounts=dict(type='list'), failure_tolerance=dict( type='dict', default={}, options=dict( fail_count=dict(type='int'), fail_percentage=dict(type='int'), parallel_percentage=dict(type='int'), parallel_count=dict(type='int'), ), mutually_exclusive=[ ['fail_count', 'fail_percentage'], ['parallel_count', 'parallel_percentage'], ], ), administration_role_arn=dict( aliases=['admin_role_arn', 'administration_role', 'admin_role']), execution_role_name=dict( aliases=['execution_role', 'exec_role', 'exec_role_name']), tags=dict(type='dict'), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template', 'template_body']], supports_check_mode=True) if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')): module.fail_json( msg= "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26" ) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes cfn = module.client('cloudformation', retry_decorator=AWSRetry.jittered_backoff( retries=10, delay=3, max_delay=30)) existing_stack_set = stack_set_facts(cfn, module.params['name']) operation_uuid = to_native(uuid.uuid4()) operation_ids = [] # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = {} state = module.params['state'] if state == 'present' and not module.params['accounts']: module.fail_json( msg= "Can't create a stack set without choosing at least one account. " "To get the ID of the current account, use the aws_caller_info module." ) module.params['accounts'] = [ to_native(a) for a in module.params['accounts'] ] stack_params['StackSetName'] = module.params['name'] if module.params.get('description'): stack_params['Description'] = module.params['description'] if module.params.get('capabilities'): stack_params['Capabilities'] = module.params['capabilities'] if module.params['template'] is not None: with open(module.params['template'], 'r') as tpl: stack_params['TemplateBody'] = tpl.read() elif module.params['template_body'] is not None: stack_params['TemplateBody'] = module.params['template_body'] elif module.params['template_url'] is not None: stack_params['TemplateURL'] = module.params['template_url'] else: # no template is provided, but if the stack set exists already, we can use the existing one. if existing_stack_set: stack_params['UsePreviousTemplate'] = True else: module.fail_json( msg= "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " "`template_body`, or `template_url`".format( module.params['name'])) stack_params['Parameters'] = [] for k, v in module.params['parameters'].items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) if 'value' in v: param['ParameterValue'] = to_native(v['value']) if 'use_previous_value' in v and bool(v['use_previous_value']): param['UsePreviousValue'] = True param.pop('ParameterValue', None) stack_params['Parameters'].append(param) else: # allow default k/v configuration to set a template parameter stack_params['Parameters'].append({ 'ParameterKey': k, 'ParameterValue': str(v) }) if module.params.get('tags') and isinstance(module.params.get('tags'), dict): stack_params['Tags'] = ansible_dict_to_boto3_tag_list( module.params['tags']) if module.params.get('administration_role_arn'): # TODO loosen the semantics here to autodetect the account ID and build the ARN stack_params['AdministrationRoleARN'] = module.params[ 'administration_role_arn'] if module.params.get('execution_role_name'): stack_params['ExecutionRoleName'] = module.params[ 'execution_role_name'] result = {} if module.check_mode: if state == 'absent' and existing_stack_set: module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) elif state == 'absent' and not existing_stack_set: module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) elif state == 'present' and not existing_stack_set: module.exit_json(changed=True, msg='New stack set would be created', meta=[]) elif state == 'present' and existing_stack_set: new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( cfn, module.params['name'], module.params['accounts'], module.params['regions'], ) if new_stacks: module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[]) elif unspecified_stacks and module.params.get( 'purge_stack_instances'): module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[]) else: # TODO: need to check the template and other settings for correct check mode module.exit_json(changed=False, msg='No changes detected', meta=[]) changed = False if state == 'present': if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log stack_params[ 'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format( operation_uuid) changed = True create_stack_set(module, stack_params, cfn) else: stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format( operation_uuid) operation_ids.append(stack_params['OperationId']) if module.params.get('regions'): stack_params[ 'OperationPreferences'] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn, module.params['name'], module.params['accounts'], module.params['regions'], ) if new_stack_instances: operation_ids.append( 'Ansible-StackInstance-Create-{0}'.format(operation_uuid)) changed = True cfn.create_stack_instances( StackSetName=module.params['name'], Accounts=list(set(acct for acct, region in new_stack_instances)), Regions=list( set(region for acct, region in new_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) else: operation_ids.append( 'Ansible-StackInstance-Update-{0}'.format(operation_uuid)) cfn.update_stack_instances( StackSetName=module.params['name'], Accounts=list( set(acct for acct, region in existing_stack_instances)), Regions=list( set(region for acct, region in existing_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) for op in operation_ids: await_stack_set_operation( module, cfn, operation_id=op, stack_set_name=module.params['name'], max_wait=module.params.get('wait_timeout'), ) elif state == 'absent': if not existing_stack_set: module.exit_json(msg='Stack set {0} does not exist'.format( module.params['name'])) if module.params.get('purge_stack_instances') is False: pass try: cfn.delete_stack_set(StackSetName=module.params['name'], ) module.exit_json( msg='Stack set {0} deleted'.format(module.params['name'])) except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg= 'Cannot delete stack {0} while there is an operation in progress' .format(module.params['name'])) except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format( operation_uuid) cfn.delete_stack_instances( StackSetName=module.params['name'], Accounts=module.params['accounts'], Regions=module.params['regions'], RetainStacks=(not module.params.get('purge_stacks')), OperationId=delete_instances_op) await_stack_set_operation( module, cfn, operation_id=delete_instances_op, stack_set_name=stack_params['StackSetName'], max_wait=module.params.get('wait_timeout'), ) try: cfn.delete_stack_set(StackSetName=module.params['name'], ) except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except # this time, it is likely that either the delete failed or there are more stacks. instances = cfn.list_stack_instances( StackSetName=module.params['name'], ) stack_states = ', '.join( '(account={Account}, region={Region}, state={Status})'. format(**i) for i in instances['Summaries']) module.fail_json_aws( exc, msg= 'Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states) module.exit_json(changed=True, msg='Stack set {0} deleted'.format( module.params['name'])) result.update(**describe_stack_tree( module, stack_params['StackSetName'], operation_ids=operation_ids)) if any(o['status'] == 'FAILED' for o in result['operations']): module.fail_json(msg="One or more operations failed to execute", **result) module.exit_json(changed=changed, **result)