def main(): argument_spec = dict(caller_reference=dict(), distribution_id=dict(), alias=dict(), target_paths=dict(required=True, type='list')) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) validation_mgr = CloudFrontInvalidationValidationManager(module) service_mgr = CloudFrontInvalidationServiceManager(module) caller_reference = module.params.get('caller_reference') distribution_id = module.params.get('distribution_id') alias = module.params.get('alias') target_paths = module.params.get('target_paths') result = {} distribution_id = validation_mgr.validate_distribution_id( distribution_id, alias) valid_target_paths = validation_mgr.validate_invalidation_batch( target_paths, caller_reference) valid_pascal_target_paths = snake_dict_to_camel_dict( valid_target_paths, True) result, changed = service_mgr.create_invalidation( distribution_id, valid_pascal_target_paths) module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
def create_or_update_bucket_cors(connection, module): name = module.params.get("name") rules = module.params.get("rules", []) changed = False try: current_camel_rules = connection.get_bucket_cors( Bucket=name)['CORSRules'] except ClientError: current_camel_rules = [] new_camel_rules = snake_dict_to_camel_dict(rules, capitalize_first=True) # compare_policies() takes two dicts and makes them hashable for comparison if compare_policies(new_camel_rules, current_camel_rules): changed = True if changed: try: cors = connection.put_bucket_cors( Bucket=name, CORSConfiguration={'CORSRules': new_camel_rules}) except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, msg="Unable to update CORS for bucket {0}".format(name)) module.exit_json(changed=changed, name=name, rules=rules)
def update_sqs_queue(module, client, queue_url): check_mode = module.check_mode changed = False existing_attributes = client.get_queue_attributes( QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True) attributes_to_set = dict() # Boto3 SQS deals with policies as strings, we want to deal with them as # dicts if module.params.get('policy') is not None: policy = module.params.get('policy') current_value = existing_attributes.get('Policy', '{}') current_policy = json.loads(current_value) if compare_policies(current_policy, policy): attributes_to_set['Policy'] = json.dumps(policy) changed = True if module.params.get('redrive_policy') is not None: policy = module.params.get('redrive_policy') current_value = existing_attributes.get('RedrivePolicy', '{}') current_policy = json.loads(current_value) if compare_policies(current_policy, policy): attributes_to_set['RedrivePolicy'] = json.dumps(policy) changed = True for attribute, value in existing_attributes.items(): # We handle these as a special case because they're IAM policies if attribute in ['Policy', 'RedrivePolicy']: continue if attribute not in new_attributes.keys(): continue if new_attributes.get(attribute) is None: continue new_value = new_attributes[attribute] if isinstance(new_value, bool): new_value = str(new_value).lower() existing_value = str(existing_value).lower() if new_value == value: continue # Boto3 expects strings attributes_to_set[attribute] = str(new_value) changed = True if changed and not check_mode: client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True) return changed, existing_attributes.get('queue_arn'),
def set_api_params(module, module_params): """ Sets module parameters to those expected by the boto3 API. :param module: :param module_params: :return: """ api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None) return snake_dict_to_camel_dict(api_params)
def create_route_spec(connection, module, vpc_id): routes = module.params.get('routes') for route_spec in routes: rename_key(route_spec, 'dest', 'destination_cidr_block') if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw': igw = find_igw(connection, module, vpc_id) route_spec['gateway_id'] = igw if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'): rename_key(route_spec, 'gateway_id', 'nat_gateway_id') return snake_dict_to_camel_dict(routes, capitalize_first=True)
def create_or_update_project(client, params, module): resp = {} name = params['name'] # clean up params formatted_params = snake_dict_to_camel_dict( dict((k, v) for k, v in params.items() if v is not None)) permitted_create_params = get_boto3_client_method_parameters( client, 'create_project') permitted_update_params = get_boto3_client_method_parameters( client, 'update_project') formatted_create_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_create_params) formatted_update_params = dict((k, v) for k, v in formatted_params.items() if k in permitted_update_params) # Check if project with that name already exists and if so update existing: found = describe_project(client=client, name=name, module=module) changed = False if 'name' in found: found_project = found resp = update_project(client=client, params=formatted_update_params, module=module) updated_project = resp['project'] # Prep both dicts for sensible change comparison: found_project.pop('lastModified') updated_project.pop('lastModified') if 'tags' not in updated_project: updated_project['tags'] = [] if updated_project != found_project: changed = True return resp, changed # Or create new project: try: resp = client.create_project(**formatted_create_params) changed = True return resp, changed except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to create CodeBuild project")
def delete_cluster(module, redshift): """ Delete a cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') params = {} for p in ('skip_final_cluster_snapshot', 'final_cluster_snapshot_identifier'): if p in module.params: # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) try: _delete_cluster( redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True)) except is_boto3_error_code('ClusterNotFound'): return(False, {}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete cluster") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_deleted') try: waiter.wait( ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout deleting the cluster") return(True, {})
def create_launch_config(connection, module): name = module.params.get('name') vpc_id = module.params.get('vpc_id') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) ec2_connection = boto3_conn(module, 'client', 'ec2', region, ec2_url, **aws_connect_kwargs) security_groups = get_ec2_security_group_ids_from_names( module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True) except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) except ValueError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) user_data = module.params.get('user_data') user_data_path = module.params.get('user_data_path') volumes = module.params['volumes'] instance_monitoring = module.params.get('instance_monitoring') assign_public_ip = module.params.get('assign_public_ip') instance_profile_name = module.params.get('instance_profile_name') ebs_optimized = module.params.get('ebs_optimized') classic_link_vpc_id = module.params.get('classic_link_vpc_id') classic_link_vpc_security_groups = module.params.get( 'classic_link_vpc_security_groups') block_device_mapping = [] convert_list = [ 'image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price' ] launch_config = (snake_dict_to_camel_dict( dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list))) if user_data_path: try: with open(user_data_path, 'r') as user_data_file: user_data = user_data_file.read() except IOError as e: module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) if volumes: for volume in volumes: if 'device_name' not in volume: module.fail_json(msg='Device name must be set for volume') # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume if 'volume_size' not in volume or int(volume['volume_size']) > 0: block_device_mapping.append( create_block_device_meta(module, volume)) try: launch_configs = connection.describe_launch_configurations( LaunchConfigurationNames=[name]).get('LaunchConfigurations') except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to describe launch configuration by name", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = False result = {} launch_config['LaunchConfigurationName'] = name if security_groups is not None: launch_config['SecurityGroups'] = security_groups if classic_link_vpc_id is not None: launch_config['ClassicLinkVPCId'] = classic_link_vpc_id if instance_monitoring is not None: launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring} if classic_link_vpc_security_groups is not None: launch_config[ 'ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups if block_device_mapping: launch_config['BlockDeviceMappings'] = block_device_mapping if instance_profile_name is not None: launch_config['IamInstanceProfile'] = instance_profile_name if assign_public_ip is not None: launch_config['AssociatePublicIpAddress'] = assign_public_ip if user_data is not None: launch_config['UserData'] = user_data if ebs_optimized is not None: launch_config['EbsOptimized'] = ebs_optimized if len(launch_configs) == 0: try: connection.create_launch_configuration(**launch_config) launch_configs = connection.describe_launch_configurations( LaunchConfigurationNames=[name]).get('LaunchConfigurations') changed = True if launch_configs: launch_config = launch_configs[0] except botocore.exceptions.ClientError as e: module.fail_json(msg="Failed to create launch configuration", exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) result = (dict((k, v) for k, v in launch_config.items() if k not in [ 'Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings' ])) result['CreatedTime'] = to_text(launch_config.get('CreatedTime')) try: result['InstanceMonitoring'] = module.boolean( launch_config.get('InstanceMonitoring').get('Enabled')) except AttributeError: result['InstanceMonitoring'] = False result['BlockDeviceMappings'] = [] for block_device_mapping in launch_config.get('BlockDeviceMappings', []): result['BlockDeviceMappings'].append( dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName'))) if block_device_mapping.get('Ebs') is not None: result['BlockDeviceMappings'][-1]['ebs'] = dict( snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize')) if user_data_path: result[ 'UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal return_object = { 'Name': result.get('LaunchConfigurationName'), 'CreatedTime': result.get('CreatedTime'), 'ImageId': result.get('ImageId'), 'Arn': result.get('LaunchConfigurationARN'), 'SecurityGroups': result.get('SecurityGroups'), 'InstanceType': result.get('InstanceType'), 'Result': result } module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object))
def modify_cluster(module, redshift): """ Modify an existing cluster. module: Ansible module object redshift: authenticated redshift connection object """ identifier = module.params.get('identifier') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') tags = module.params.get('tags') purge_tags = module.params.get('purge_tags') region = region = module.params.get('region') # Package up the optional parameters params = {} for p in ('cluster_type', 'cluster_security_groups', 'vpc_security_group_ids', 'cluster_subnet_group_name', 'availability_zone', 'preferred_maintenance_window', 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) # enhanced_vpc_routing parameter change needs an exclusive request if module.params.get('enhanced_vpc_routing') is not None: try: _modify_cluster( redshift, ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get('enhanced_vpc_routing')) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_available') try: waiter.wait( ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for cluster enhanced vpc routing modification") # change the rest try: _modify_cluster( redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) if module.params.get('new_cluster_identifier'): identifier = module.params.get('new_cluster_identifier') if wait: attempts = wait_timeout // 60 waiter2 = redshift.get_waiter('cluster_available') try: waiter2.wait( ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for cluster modification") try: resource = _describe_cluster(redshift, identifier) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json(e, msg="Couldn't modify redshift cluster %s " % identifier) if _ensure_tags(redshift, identifier, resource['Tags'], module): resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0] return(True, _collect_facts(resource))
def create_cluster(module, redshift): """ Create a new cluster module: AnsibleAWSModule object redshift: authenticated redshift connection object Returns: """ identifier = module.params.get('identifier') node_type = module.params.get('node_type') username = module.params.get('username') password = module.params.get('password') d_b_name = module.params.get('db_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') tags = module.params.get('tags') changed = True # Package up the optional parameters params = {} for p in ('cluster_type', 'cluster_security_groups', 'vpc_security_group_ids', 'cluster_subnet_group_name', 'availability_zone', 'preferred_maintenance_window', 'cluster_parameter_group_name', 'automated_snapshot_retention_period', 'port', 'cluster_version', 'allow_version_upgrade', 'number_of_nodes', 'publicly_accessible', 'encrypted', 'elastic_ip', 'enhanced_vpc_routing'): # https://github.com/boto/boto3/issues/400 if module.params.get(p) is not None: params[p] = module.params.get(p) if d_b_name: params['d_b_name'] = d_b_name if tags: tags = ansible_dict_to_boto3_tag_list(tags) params['tags'] = tags try: _describe_cluster(redshift, identifier) changed = False except is_boto3_error_code('ClusterNotFound'): try: _create_cluster(redshift, ClusterIdentifier=identifier, NodeType=node_type, MasterUsername=username, MasterUserPassword=password, **snake_dict_to_camel_dict(params, capitalize_first=True)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create cluster") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe cluster") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter('cluster_available') try: waiter.wait( ClusterIdentifier=identifier, WaiterConfig=dict(MaxAttempts=attempts) ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Timeout waiting for the cluster creation") try: resource = _describe_cluster(redshift, identifier) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe cluster") if tags: if _ensure_tags(redshift, identifier, resource['Tags'], module): changed = True resource = _describe_cluster(redshift, identifier) return(changed, _collect_facts(resource))
def main(): arg_spec = dict(state=dict(type='str', required=True, choices=['present', 'absent']), name=dict(type='str', required=True), scope=dict(type='str', required=True, choices=['CLOUDFRONT', 'REGIONAL']), description=dict(type='str'), default_action=dict(type='str', choices=['Block', 'Allow']), rules=dict(type='list', elements='dict'), sampled_requests=dict(type='bool', default=False), cloudwatch_metrics=dict(type='bool', default=True), metric_name=dict(type='str'), tags=dict(type='dict'), purge_rules=dict(default=True, type='bool')) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, required_if=[['state', 'present', ['default_action', 'rules']]]) state = module.params.get("state") name = module.params.get("name") scope = module.params.get("scope") description = module.params.get("description") default_action = module.params.get("default_action") rules = module.params.get("rules") sampled_requests = module.params.get("sampled_requests") cloudwatch_metrics = module.params.get("cloudwatch_metrics") metric_name = module.params.get("metric_name") tags = module.params.get("tags") purge_rules = module.params.get("purge_rules") check_mode = module.check_mode if default_action == 'Block': default_action = {'Block': {}} elif default_action == 'Allow': default_action = {'Allow': {}} if rules: rules = [] for rule in module.params.get("rules"): rules.append( wafv2_snake_dict_to_camel_dict( snake_dict_to_camel_dict(rule, capitalize_first=True))) if not metric_name: metric_name = name web_acl = WebACL(module.client('wafv2'), name, scope, module.fail_json_aws) change = False retval = {} if state == 'present': if web_acl.get(): change, rules = compare_priority_rules( web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) change = change or web_acl.get().get('WebACL').get( 'Description') != description change = change or web_acl.get().get('WebACL').get( 'DefaultAction') != default_action if change and not check_mode: retval = web_acl.update(default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name) else: retval = web_acl.get().get('WebACL') else: change = True if not check_mode: retval = web_acl.create(default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description) elif state == 'absent': if web_acl.get(): if rules: if len(rules) > 0: change, rules = compare_priority_rules( web_acl.get().get('WebACL').get('Rules'), rules, purge_rules, state) if change and not check_mode: retval = web_acl.update(default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name) else: change = True if not check_mode: retval = web_acl.remove() module.exit_json(changed=change, **camel_dict_to_snake_dict(retval))
def main(): argument_spec = dict( state=dict(required=True, choices=['present', 'absent', 'deleting']), name=dict(required=True, type='str'), cluster=dict(required=False, type='str'), task_definition=dict(required=False, type='str'), load_balancers=dict(required=False, default=[], type='list', elements='dict'), desired_count=dict(required=False, type='int'), client_token=dict(required=False, default='', type='str', no_log=False), role=dict(required=False, default='', type='str'), delay=dict(required=False, type='int', default=10), repeat=dict(required=False, type='int', default=10), force_new_deployment=dict(required=False, default=False, type='bool'), deployment_configuration=dict(required=False, default={}, type='dict'), placement_constraints=dict(required=False, default=[], type='list', elements='dict', options=dict(type=dict(type='str'), expression=dict(type='str'))), placement_strategy=dict(required=False, default=[], type='list', elements='dict', options=dict( type=dict(type='str'), field=dict(type='str'), )), health_check_grace_period_seconds=dict(required=False, type='int'), network_configuration=dict(required=False, type='dict', options=dict( subnets=dict(type='list', elements='str'), security_groups=dict(type='list', elements='str'), assign_public_ip=dict(type='bool'))), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), service_registries=dict(required=False, type='list', default=[], elements='dict'), scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA'])) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[('state', 'present', ['task_definition']), ('launch_type', 'FARGATE', ['network_configuration'])], required_together=[['load_balancers', 'role']]) if module.params['state'] == 'present' and module.params[ 'scheduling_strategy'] == 'REPLICA': if module.params['desired_count'] is None: module.fail_json( msg= 'state is present, scheduling_strategy is REPLICA; missing desired_count' ) service_mgr = EcsServiceManager(module) if module.params['network_configuration']: if not service_mgr.ecs_api_handles_network_configuration(): module.fail_json( msg= 'botocore needs to be version 1.7.44 or higher to use network configuration' ) network_configuration = service_mgr.format_network_configuration( module.params['network_configuration']) else: network_configuration = None deployment_configuration = map_complex_type( module.params['deployment_configuration'], DEPLOYMENT_CONFIGURATION_TYPE_MAP) deploymentConfiguration = snake_dict_to_camel_dict( deployment_configuration) serviceRegistries = list( map(snake_dict_to_camel_dict, module.params['service_registries'])) try: existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) except Exception as e: module.fail_json(msg="Exception describing service '" + module.params['name'] + "' in cluster '" + module.params['cluster'] + "': " + str(e)) results = dict(changed=False) if module.params['launch_type']: if not module.botocore_at_least('1.8.4'): module.fail_json( msg= 'botocore needs to be version 1.8.4 or higher to use launch_type' ) if module.params['force_new_deployment']: if not module.botocore_at_least('1.8.4'): module.fail_json( msg= 'botocore needs to be version 1.8.4 or higher to use force_new_deployment' ) if module.params['health_check_grace_period_seconds']: if not module.botocore_at_least('1.8.20'): module.fail_json( msg= 'botocore needs to be version 1.8.20 or higher to use health_check_grace_period_seconds' ) if module.params['state'] == 'present': matching = False update = False if existing and 'status' in existing and existing['status'] == "ACTIVE": if module.params['force_new_deployment']: update = True elif service_mgr.is_matching_service(module.params, existing): matching = True results['service'] = existing else: update = True if not matching: if not module.check_mode: role = module.params['role'] clientToken = module.params['client_token'] loadBalancers = [] for loadBalancer in module.params['load_balancers']: if 'containerPort' in loadBalancer: loadBalancer['containerPort'] = int( loadBalancer['containerPort']) loadBalancers.append(loadBalancer) for loadBalancer in loadBalancers: if 'containerPort' in loadBalancer: loadBalancer['containerPort'] = int( loadBalancer['containerPort']) if update: # check various parameters and boto versions and give a helpful error in boto is not new enough for feature if module.params['scheduling_strategy']: if not module.botocore_at_least('1.10.37'): module.fail_json( msg= 'botocore needs to be version 1.10.37 or higher to use scheduling_strategy' ) elif (existing['schedulingStrategy'] ) != module.params['scheduling_strategy']: module.fail_json( msg= "It is not possible to update the scheduling strategy of an existing service" ) if module.params['service_registries']: if not module.botocore_at_least('1.9.15'): module.fail_json( msg= 'botocore needs to be version 1.9.15 or higher to use service_registries' ) elif (existing['serviceRegistries'] or []) != serviceRegistries: module.fail_json( msg= "It is not possible to update the service registries of an existing service" ) if (existing['loadBalancers'] or []) != loadBalancers: module.fail_json( msg= "It is not possible to update the load balancers of an existing service" ) # update required response = service_mgr.update_service( module.params['name'], module.params['cluster'], module.params['task_definition'], module.params['desired_count'], deploymentConfiguration, network_configuration, module.params['health_check_grace_period_seconds'], module.params['force_new_deployment']) else: try: response = service_mgr.create_service( module.params['name'], module.params['cluster'], module.params['task_definition'], loadBalancers, module.params['desired_count'], clientToken, role, deploymentConfiguration, module.params['placement_constraints'], module.params['placement_strategy'], module.params['health_check_grace_period_seconds'], network_configuration, serviceRegistries, module.params['launch_type'], module.params['scheduling_strategy']) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't create service") results['service'] = response results['changed'] = True elif module.params['state'] == 'absent': if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted del existing['deployments'] del existing['events'] results['ansible_facts'] = existing if 'status' in existing and existing['status'] == "INACTIVE": results['changed'] = False else: if not module.check_mode: try: service_mgr.delete_service(module.params['name'], module.params['cluster']) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't delete service") results['changed'] = True elif module.params['state'] == 'deleting': if not existing: module.fail_json(msg="Service '" + module.params['name'] + " not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted delay = module.params['delay'] repeat = module.params['repeat'] time.sleep(delay) for i in range(repeat): existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) status = existing['status'] if status == "INACTIVE": results['changed'] = True break time.sleep(delay) if i is repeat - 1: module.fail_json(msg="Service still not deleted after " + str(repeat) + " tries of " + str(delay) + " seconds each.") return module.exit_json(**results)
def as_dict(self): result = self.__dict__ result.pop("tags") return snake_dict_to_camel_dict(result)