def main(): template_options = dict( block_device_mappings=dict( type='list', options=dict( device_name=dict(), ebs=dict( type='dict', options=dict( delete_on_termination=dict(type='bool'), encrypted=dict(type='bool'), iops=dict(type='int'), kms_key_id=dict(), snapshot_id=dict(), volume_size=dict(type='int'), volume_type=dict(), ), ), no_device=dict(), virtual_name=dict(), ), ), cpu_options=dict( type='dict', options=dict( core_count=dict(type='int'), threads_per_core=dict(type='int'), ), ), credit_specification=dict( dict(type='dict'), options=dict(cpu_credits=dict(), ), ), disable_api_termination=dict(type='bool'), ebs_optimized=dict(type='bool'), elastic_gpu_specifications=dict( options=dict(type=dict()), type='list', ), iam_instance_profile=dict(), image_id=dict(), instance_initiated_shutdown_behavior=dict( choices=['stop', 'terminate']), instance_market_options=dict( type='dict', options=dict( market_type=dict(), spot_options=dict( type='dict', options=dict( block_duration_minutes=dict(type='int'), instance_interruption_behavior=dict( choices=['hibernate', 'stop', 'terminate']), max_price=dict(), spot_instance_type=dict( choices=['one-time', 'persistent']), ), ), ), ), instance_type=dict(), kernel_id=dict(), key_name=dict(), monitoring=dict( type='dict', options=dict(enabled=dict(type='bool')), ), network_interfaces=dict( type='list', options=dict( associate_public_ip_address=dict(type='bool'), delete_on_termination=dict(type='bool'), description=dict(), device_index=dict(type='int'), groups=dict(type='list'), ipv6_address_count=dict(type='int'), ipv6_addresses=dict(type='list'), network_interface_id=dict(), private_ip_address=dict(), subnet_id=dict(), ), ), placement=dict( options=dict( affinity=dict(), availability_zone=dict(), group_name=dict(), host_id=dict(), tenancy=dict(), ), type='dict', ), ram_disk_id=dict(), security_group_ids=dict(type='list'), security_groups=dict(type='list'), tags=dict(type='dict'), user_data=dict(), ) arg_spec = dict( state=dict(choices=['present', 'absent'], default='present'), template_name=dict(aliases=['name']), template_id=dict(aliases=['id']), default_version=dict(default='latest'), ) arg_spec.update(template_options) module = AnsibleAWSModule(argument_spec=arg_spec, required_one_of=[('template_name', 'template_id') ], supports_check_mode=True) if not module.boto3_at_least('1.6.0'): module.fail_json(msg="ec2_launch_template requires boto3 >= 1.6.0") for interface in (module.params.get('network_interfaces') or []): if interface.get('ipv6_addresses'): interface['ipv6_addresses'] = [{ 'ipv6_address': x } for x in interface['ipv6_addresses']] if module.params.get('state') == 'present': out = create_or_update(module, template_options) out.update(format_module_output(module)) elif module.params.get('state') == 'absent': out = delete_template(module) else: module.fail_json( msg='Unsupported value "{0}" for `state` parameter'.format( module.params.get('state'))) module.exit_json(**out)
def main(): arg_spec = dict( state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'), creation_source=dict(choices=['snapshot', 's3', 'instance']), force_update_password=dict(type='bool', default=False), purge_cloudwatch_logs_exports=dict(type='bool', default=True), purge_tags=dict(type='bool', default=True), read_replica=dict(type='bool'), wait=dict(type='bool', default=True), ) parameter_options = dict( allocated_storage=dict(type='int'), allow_major_version_upgrade=dict(type='bool'), apply_immediately=dict(type='bool', default=False), auto_minor_version_upgrade=dict(type='bool'), availability_zone=dict(aliases=['az', 'zone']), backup_retention_period=dict(type='int'), ca_certificate_identifier=dict(), character_set_name=dict(), copy_tags_to_snapshot=dict(type='bool'), db_cluster_identifier=dict(aliases=['cluster_id']), db_instance_class=dict(aliases=['class', 'instance_type']), db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']), db_name=dict(), db_parameter_group_name=dict(), db_security_groups=dict(type='list'), db_snapshot_identifier=dict(), db_subnet_group_name=dict(aliases=['subnet_group']), domain=dict(), domain_iam_role_name=dict(), enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports']), enable_iam_database_authentication=dict(type='bool'), enable_performance_insights=dict(type='bool'), engine=dict(), engine_version=dict(), final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), force_failover=dict(type='bool'), iops=dict(type='int'), kms_key_id=dict(), license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license']), master_user_password=dict(aliases=['password'], no_log=True), master_username=dict(aliases=['username']), monitoring_interval=dict(type='int'), monitoring_role_arn=dict(), multi_az=dict(type='bool'), new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']), option_group_name=dict(), performance_insights_kms_key_id=dict(), performance_insights_retention_period=dict(), port=dict(type='int'), preferred_backup_window=dict(aliases=['backup_window']), preferred_maintenance_window=dict(aliases=['maintenance_window']), processor_features=dict(type='dict'), promotion_tier=dict(), publicly_accessible=dict(type='bool'), restore_time=dict(), s3_bucket_name=dict(), s3_ingestion_role_arn=dict(), s3_prefix=dict(), skip_final_snapshot=dict(type='bool', default=False), snapshot_identifier=dict(), source_db_instance_identifier=dict(), source_engine=dict(choices=['mysql']), source_engine_version=dict(), source_region=dict(), storage_encrypted=dict(type='bool'), storage_type=dict(choices=['standard', 'gp2', 'io1']), tags=dict(type='dict'), tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']), tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), timezone=dict(), use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']), vpc_security_group_ids=dict(type='list') ) arg_spec.update(parameter_options) required_if = [ ('engine', 'aurora', ('db_cluster_identifier',)), ('engine', 'aurora-mysql', ('db_cluster_identifier',)), ('engine', 'aurora-postresql', ('db_cluster_identifier',)), ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), ('creation_source', 's3', ( 's3_bucket_name', 'engine', 'master_username', 'master_user_password', 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), ] mutually_exclusive = [ ('s3_bucket_name', 'source_db_instance_identifier', 'snapshot_identifier'), ('use_latest_restorable_time', 'restore_to_time'), ('availability_zone', 'multi_az'), ] module = AnsibleAWSModule( argument_spec=arg_spec, required_if=required_if, mutually_exclusive=mutually_exclusive, supports_check_mode=True ) if not module.boto3_at_least('1.5.0'): module.fail_json(msg="rds_instance requires boto3 > 1.5.0") # Sanitize instance identifiers module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower() if module.params['new_db_instance_identifier']: module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower() # Sanitize processor features if module.params['processor_features'] is not None: module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items()) client = module.client('rds') changed = False state = module.params['state'] instance_id = module.params['db_instance_identifier'] instance = get_instance(client, module, instance_id) validate_options(client, module, instance) method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica']) if method_name: raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) parameters = get_parameters(client, module, raw_parameters, method_name) if parameters: result, changed = call_method(client, module, method_name, parameters) instance_id = get_final_identifier(method_name, module) # Check tagging/promoting/rebooting/starting/stopping instance if state != 'absent' and (not module.check_mode or instance): changed |= update_instance(client, module, instance, instance_id) if changed: instance = get_instance(client, module, instance_id) if state != 'absent' and (instance or not module.check_mode): for attempt_to_wait in range(0, 10): instance = get_instance(client, module, instance_id) if instance: break else: sleep(5) if state == 'absent' and changed and not module.params['skip_final_snapshot']: instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier'])) pending_processor_features = None if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures') instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']) if pending_processor_features is not None: instance['pending_modified_values']['processor_features'] = pending_processor_features module.exit_json(changed=changed, **instance)
def main(): argument_spec = dict( name=dict(required=True), description=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=900), state=dict(default='present', choices=['present', 'absent']), purge_stacks=dict(type='bool', default=True), parameters=dict(type='dict', default={}), template=dict(type='path'), template_url=dict(), template_body=dict(), capabilities=dict(type='list', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), regions=dict(type='list'), accounts=dict(type='list'), failure_tolerance=dict( type='dict', default={}, options=dict( fail_count=dict(type='int'), fail_percentage=dict(type='int'), parallel_percentage=dict(type='int'), parallel_count=dict(type='int'), ), mutually_exclusive=[ ['fail_count', 'fail_percentage'], ['parallel_count', 'parallel_percentage'], ], ), administration_role_arn=dict( aliases=['admin_role_arn', 'administration_role', 'admin_role']), execution_role_name=dict( aliases=['execution_role', 'exec_role', 'exec_role_name']), tags=dict(type='dict'), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template', 'template_body']], supports_check_mode=True) if not (module.boto3_at_least('1.6.0') and module.botocore_at_least('1.10.26')): module.fail_json( msg= "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26" ) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes jittered_backoff_decorator = AWSRetry.jittered_backoff( retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound']) cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) existing_stack_set = stack_set_facts(cfn, module.params['name']) operation_uuid = to_native(uuid.uuid4()) operation_ids = [] # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = {} state = module.params['state'] if state == 'present' and not module.params['accounts']: module.fail_json( msg= "Can't create a stack set without choosing at least one account. " "To get the ID of the current account, use the aws_caller_info module." ) module.params['accounts'] = [ to_native(a) for a in module.params['accounts'] ] stack_params['StackSetName'] = module.params['name'] if module.params.get('description'): stack_params['Description'] = module.params['description'] if module.params.get('capabilities'): stack_params['Capabilities'] = module.params['capabilities'] if module.params['template'] is not None: with open(module.params['template'], 'r') as tpl: stack_params['TemplateBody'] = tpl.read() elif module.params['template_body'] is not None: stack_params['TemplateBody'] = module.params['template_body'] elif module.params['template_url'] is not None: stack_params['TemplateURL'] = module.params['template_url'] else: # no template is provided, but if the stack set exists already, we can use the existing one. if existing_stack_set: stack_params['UsePreviousTemplate'] = True else: module.fail_json( msg= "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " "`template_body`, or `template_url`".format( module.params['name'])) stack_params['Parameters'] = [] for k, v in module.params['parameters'].items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) if 'value' in v: param['ParameterValue'] = to_native(v['value']) if 'use_previous_value' in v and bool(v['use_previous_value']): param['UsePreviousValue'] = True param.pop('ParameterValue', None) stack_params['Parameters'].append(param) else: # allow default k/v configuration to set a template parameter stack_params['Parameters'].append({ 'ParameterKey': k, 'ParameterValue': str(v) }) if module.params.get('tags') and isinstance(module.params.get('tags'), dict): stack_params['Tags'] = ansible_dict_to_boto3_tag_list( module.params['tags']) if module.params.get('administration_role_arn'): # TODO loosen the semantics here to autodetect the account ID and build the ARN stack_params['AdministrationRoleARN'] = module.params[ 'administration_role_arn'] if module.params.get('execution_role_name'): stack_params['ExecutionRoleName'] = module.params[ 'execution_role_name'] result = {} if module.check_mode: if state == 'absent' and existing_stack_set: module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) elif state == 'absent' and not existing_stack_set: module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) elif state == 'present' and not existing_stack_set: module.exit_json(changed=True, msg='New stack set would be created', meta=[]) elif state == 'present' and existing_stack_set: new_stacks, existing_stacks, unspecified_stacks = compare_stack_instances( cfn, module.params['name'], module.params['accounts'], module.params['regions'], ) if new_stacks: module.exit_json(changed=True, msg='New stack instance(s) would be created', meta=[]) elif unspecified_stacks and module.params.get( 'purge_stack_instances'): module.exit_json(changed=True, msg='Old stack instance(s) would be deleted', meta=[]) else: # TODO: need to check the template and other settings for correct check mode module.exit_json(changed=False, msg='No changes detected', meta=[]) changed = False if state == 'present': if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log stack_params[ 'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format( operation_uuid) changed = True create_stack_set(module, stack_params, cfn) else: stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format( operation_uuid) operation_ids.append(stack_params['OperationId']) if module.params.get('regions'): stack_params[ 'OperationPreferences'] = get_operation_preferences(module) changed |= update_stack_set(module, stack_params, cfn) # now create/update any appropriate stack instances new_stack_instances, existing_stack_instances, unspecified_stack_instances = compare_stack_instances( cfn, module.params['name'], module.params['accounts'], module.params['regions'], ) if new_stack_instances: operation_ids.append( 'Ansible-StackInstance-Create-{0}'.format(operation_uuid)) changed = True cfn.create_stack_instances( StackSetName=module.params['name'], Accounts=list(set(acct for acct, region in new_stack_instances)), Regions=list( set(region for acct, region in new_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) else: operation_ids.append( 'Ansible-StackInstance-Update-{0}'.format(operation_uuid)) cfn.update_stack_instances( StackSetName=module.params['name'], Accounts=list( set(acct for acct, region in existing_stack_instances)), Regions=list( set(region for acct, region in existing_stack_instances)), OperationPreferences=get_operation_preferences(module), OperationId=operation_ids[-1], ) for op in operation_ids: await_stack_set_operation( module, cfn, operation_id=op, stack_set_name=module.params['name'], max_wait=module.params.get('wait_timeout'), ) elif state == 'absent': if not existing_stack_set: module.exit_json(msg='Stack set {0} does not exist'.format( module.params['name'])) if module.params.get('purge_stack_instances') is False: pass try: cfn.delete_stack_set(StackSetName=module.params['name'], ) module.exit_json( msg='Stack set {0} deleted'.format(module.params['name'])) except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg= 'Cannot delete stack {0} while there is an operation in progress' .format(module.params['name'])) except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except delete_instances_op = 'Ansible-StackInstance-Delete-{0}'.format( operation_uuid) cfn.delete_stack_instances( StackSetName=module.params['name'], Accounts=module.params['accounts'], Regions=module.params['regions'], RetainStacks=(not module.params.get('purge_stacks')), OperationId=delete_instances_op) await_stack_set_operation( module, cfn, operation_id=delete_instances_op, stack_set_name=stack_params['StackSetName'], max_wait=module.params.get('wait_timeout'), ) try: cfn.delete_stack_set(StackSetName=module.params['name'], ) except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except # this time, it is likely that either the delete failed or there are more stacks. instances = cfn.list_stack_instances( StackSetName=module.params['name'], ) stack_states = ', '.join( '(account={Account}, region={Region}, state={Status})'. format(**i) for i in instances['Summaries']) module.fail_json_aws( exc, msg= 'Could not purge all stacks, or not all accounts/regions were chosen for deletion: ' + stack_states) module.exit_json(changed=True, msg='Stack set {0} deleted'.format( module.params['name'])) result.update(**describe_stack_tree( module, stack_params['StackSetName'], operation_ids=operation_ids)) if any(o['status'] == 'FAILED' for o in result['operations']): module.fail_json(msg="One or more operations failed to execute", **result) module.exit_json(changed=changed, **result)
def main(): argument_spec = dict( stack_set_name=dict(required=True), description=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=900), state=dict(default='present', choices=['present', 'absent']), parameters=dict(type='dict', default={}), permission_model=dict(type='str', choices=['SERVICE_MANAGED', 'SELF_MANAGED']), auto_deployment=dict( type=dict, default={}, options=dict(enabled=dict(type='bool'), retain_stacks_on_account_removal=dict(type='bool'))), template=dict(type='path'), template_url=dict(), template_body=dict(), capabilities=dict(type='list', elements='str', choices=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']), administration_role_arn=dict( aliases=['admin_role_arn', 'administration_role', 'admin_role']), execution_role_name=dict( aliases=['execution_role', 'exec_role', 'exec_role_name']), tags=dict(type='dict'), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[['template_url', 'template', 'template_body']], supports_check_mode=True) if not (module.boto3_at_least('1.14.0') and module.botocore_at_least('1.17.7')): module.fail_json( msg= "Boto3 or botocore version is too low. This module requires at least boto3 1.6 and botocore 1.10.26" ) # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes jittered_backoff_decorator = AWSRetry.jittered_backoff( retries=10, delay=3, max_delay=30, catch_extra_error_codes=['StackSetNotFound']) cfn = module.client('cloudformation', retry_decorator=jittered_backoff_decorator) existing_stack_set = stack_set_facts(cfn, module.params['stack_set_name']) operation_uuid = to_native(uuid.uuid4()) operation_ids = [] # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = {} state = module.params['state'] stack_params['StackSetName'] = module.params['stack_set_name'] if module.params.get('description'): stack_params['Description'] = module.params['description'] if module.params.get('capabilities'): stack_params['Capabilities'] = module.params['capabilities'] if module.params['template'] is not None: with open(module.params['template'], 'r') as tpl: stack_params['TemplateBody'] = tpl.read() elif module.params['template_body'] is not None: stack_params['TemplateBody'] = module.params['template_body'] elif module.params['template_url'] is not None: stack_params['TemplateURL'] = module.params['template_url'] else: # no template is provided, but if the stack set exists already, we can use the existing one. if existing_stack_set: stack_params['UsePreviousTemplate'] = True else: module.fail_json( msg= "The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " "`template_body`, or `template_url`".format( module.params['stack_set_name'])) stack_params['Parameters'] = [] for k, v in module.params['parameters'].items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) if 'value' in v: param['ParameterValue'] = to_native(v['value']) if 'use_previous_value' in v and bool(v['use_previous_value']): param['UsePreviousValue'] = True param.pop('ParameterValue', None) stack_params['Parameters'].append(param) else: # allow default k/v configuration to set a template parameter stack_params['Parameters'].append({ 'ParameterKey': k, 'ParameterValue': str(v) }) if module.params.get('tags') and isinstance(module.params.get('tags'), dict): stack_params['Tags'] = ansible_dict_to_boto3_tag_list( module.params['tags']) if module.params.get('administration_role_arn'): # TODO loosen the semantics here to autodetect the account ID and build the ARN stack_params['AdministrationRoleARN'] = module.params[ 'administration_role_arn'] if module.params.get('execution_role_name'): stack_params['ExecutionRoleName'] = module.params[ 'execution_role_name'] if module.params.get('permission_model'): stack_params['PermissionModel'] = module.params.get('permission_model') if module.params.get('auto_deployment'): param_auto_deployment = {} auto_deployment = module.params.get('auto_deployment') if 'enabled' in auto_deployment.keys(): param_auto_deployment['Enabled'] = auto_deployment['enabled'] if 'retain_stacks_on_account_removal' in auto_deployment.keys(): param_auto_deployment[ 'RetainStacksOnAccountRemoval'] = auto_deployment[ 'retain_stacks_on_account_removal'] stack_params['AutoDeployment'] = param_auto_deployment result = {} if module.check_mode: if state == 'absent' and existing_stack_set: module.exit_json(changed=True, msg='Stack set would be deleted', meta=[]) elif state == 'absent' and not existing_stack_set: module.exit_json(changed=False, msg='Stack set doesn\'t exist', meta=[]) elif state == 'present' and not existing_stack_set: module.exit_json(changed=True, msg='New stack set would be created', meta=[]) elif state == 'present' and existing_stack_set: module.exit_json(changed=True, msg='Existing stack set would be updated', meta=[]) else: # TODO: need to check the template and other settings for correct check mode module.exit_json(changed=False, msg='No changes detected', meta=[]) changed = False if state == 'present': if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log stack_params[ 'ClientRequestToken'] = 'Ansible-StackSet-Create-{0}'.format( operation_uuid) changed = True create_stack_set(module, stack_params, cfn) else: stack_params['OperationId'] = 'Ansible-StackSet-Update-{0}'.format( operation_uuid) operation_ids.append(stack_params['OperationId']) changed |= update_stack_set(module, stack_params, cfn) elif state == 'absent': if not existing_stack_set: module.exit_json(msg='Stack set {0} does not exist'.format( module.params['stack_set_name'])) try: cfn.delete_stack_set( StackSetName=module.params['stack_set_name'], ) module.exit_json(msg='Stack set {0} deleted'.format( module.params['stack_set_name'])) except is_boto3_error_code('OperationInProgressException') as e: # pylint: disable=duplicate-except module.fail_json_aws( e, msg= 'Cannot delete stack {0} while there is an operation in progress' .format(module.params['stack_set_name'])) except is_boto3_error_code('StackSetNotEmptyException'): # pylint: disable=duplicate-except try: cfn.delete_stack_set( StackSetName=module.params['stack_set_name'], ) except is_boto3_error_code('StackSetNotEmptyException') as exc: # pylint: disable=duplicate-except module.fail_json_aws( exc, msg= 'Could not purge stacks, or not all accounts/regions were chosen for deletion' ) module.exit_json(changed=True, msg='Stack set {0} deleted'.format( module.params['stack_set_name'])) result.update(**describe_stack_tree( module, stack_params['StackSetName'], operation_ids=operation_ids)) if 'operations' in result.keys(): if any(o['status'] == 'FAILED' for o in result['operations']): module.fail_json(msg="One or more operations failed to execute", **result) module.exit_json(changed=changed, **result)