def convert_snake_to_camel(self, data): """ Converts a dictionary or list to camel case from snake case :type data: dict or list :return: Converted data structure, if list or dict """ if isinstance(data, dict): return snake_dict_to_camel_dict(data) elif isinstance(data, list): return [snake_dict_to_camel_dict(item) for item in data] else: return data
def construct_connection(module): """ Construct a Connection from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: Connection """ connection = dict((k, module.params.get(k)) for k in ( 'id', 'name', 'description', 'speed', 'high_availability', 'billing_term', 'customer_networks', 'service_key' )) connection.update(dict( type='AZURE_EXPRESS_ROUTE', peering=dict(type=module.params.get('peering_type')), location=dict(href=module.params.get('location_href')), nat=dict( enabled=module.params.get('nat_enabled'), mappings=[dict(native_cidr=nat_mapping) for nat_mapping in module.params.get('nat_mappings')] ) )) connection = snake_dict_to_camel_dict(connection) return connection
def construct_connection(module): """ Construct a Connection from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: pureport.api.client.Connection """ connection = dict( (k, module.params.get(k)) for k in ('id', 'name', 'description', 'speed', 'high_availability', 'billing_term', 'customer_asn', 'customer_networks', 'primary_customer_vlan', 'secondary_customer_vlan')) connection.update( dict(type='PORT', location=get_object_link(module, '/locations', 'location_id', 'location_href'), primary_port=get_object_link(module, '/ports', 'primary_port_id', 'primary_port_href'), nat=dict(enabled=module.params.get('nat_enabled'), mappings=[ dict(native_cidr=nat_mapping) for nat_mapping in module.params.get('nat_mappings') ]))) secondary_port = get_object_link(module, '/ports', 'secondary_port_id', 'secondary_port_href') if secondary_port is not None: connection.update(dict(secondary_port=secondary_port, )) connection = snake_dict_to_camel_dict(connection) # Correct naming connection.update( dict(customerASN=connection.pop('customerAsn'), tags=module.params.get('tags'))) return connection
def main(): argument_spec = dict( caller_reference=dict(), distribution_id=dict(), alias=dict(), target_paths=dict(required=True, type='list', elements='str') ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[['distribution_id', 'alias']]) validation_mgr = CloudFrontInvalidationValidationManager(module) service_mgr = CloudFrontInvalidationServiceManager(module) caller_reference = module.params.get('caller_reference') distribution_id = module.params.get('distribution_id') alias = module.params.get('alias') target_paths = module.params.get('target_paths') result = {} distribution_id = validation_mgr.validate_distribution_id(distribution_id, alias) valid_target_paths = validation_mgr.validate_invalidation_batch(target_paths, caller_reference) valid_pascal_target_paths = snake_dict_to_camel_dict(valid_target_paths, True) result, changed = service_mgr.create_invalidation(distribution_id, valid_pascal_target_paths) module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
def build_launch_specification(launch_spec): """ Remove keys that have a value of None from Launch Specification Descend into these subkeys: network_interfaces block_device_mappings monitoring placement iam_instance_profile """ assigned_keys = dict( (k, v) for k, v in launch_spec.items() if v is not None) sub_key_to_build = ['placement', 'iam_instance_profile', 'monitoring'] for subkey in sub_key_to_build: if launch_spec[subkey] is not None: assigned_keys[subkey] = dict( (k, v) for k, v in launch_spec[subkey].items() if v is not None) if launch_spec['network_interfaces'] is not None: interfaces = [] for iface in launch_spec['network_interfaces']: interfaces.append( dict((k, v) for k, v in iface.items() if v is not None)) assigned_keys['network_interfaces'] = interfaces if launch_spec['block_device_mappings'] is not None: block_devs = [] for dev in launch_spec['block_device_mappings']: block_devs.append( dict((k, v) for k, v in dev.items() if v is not None)) assigned_keys['block_device_mappings'] = block_devs return snake_dict_to_camel_dict(assigned_keys, capitalize_first=True)
def construct_connection(module): """ Construct a Connection from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: Connection """ connection = dict( (k, module.params.get(k)) for k in ('id', 'name', 'description', 'speed', 'high_availability', 'billing_term', 'customer_asn', 'customer_networks', 'aws_account_id', 'aws_region')) connection.update( dict(type='AWS_DIRECT_CONNECT', peering=dict(type=module.params.get('peering_type')), location=dict(href=module.params.get('location_href')), cloud_services=[ dict(href=cloud_service_href) for cloud_service_href in module.params.get('cloud_service_hrefs') ], nat=dict(enabled=module.params.get('nat_enabled'), mappings=[ dict(native_cidr=nat_mapping) for nat_mapping in module.params.get('nat_mappings') ]))) connection = snake_dict_to_camel_dict(connection) # Correct naming connection.update(dict(customerASN=connection.pop('customerAsn'))) return connection
def set_snmp(meraki, org_id): payload = dict() if meraki.params['peer_ips']: if len(meraki.params['peer_ips']) > 7: if ';' not in meraki.params['peer_ips']: meraki.fail_json( msg='Peer IP addresses are semi-colon delimited.') if meraki.params['v2c_enabled'] is not None: payload = { 'v2cEnabled': meraki.params['v2c_enabled'], } if meraki.params['v3_enabled'] is True: if len(meraki.params['v3_auth_pass']) < 8 or len( meraki.params['v3_priv_pass']) < 8: meraki.fail_json( msg= 'v3_auth_pass and v3_priv_pass must both be at least 8 characters long.' ) if meraki.params['v3_auth_mode'] is None or \ meraki.params['v3_auth_pass'] is None or \ meraki.params['v3_priv_mode'] is None or \ meraki.params['v3_priv_pass'] is None: meraki.fail_json( msg= 'v3_auth_mode, v3_auth_pass, v3_priv_mode, and v3_auth_pass are required' ) payload = { 'v3Enabled': meraki.params['v3_enabled'], 'v3AuthMode': meraki.params['v3_auth_mode'].upper(), 'v3AuthPass': meraki.params['v3_auth_pass'], 'v3PrivMode': meraki.params['v3_priv_mode'].upper(), 'v3PrivPass': meraki.params['v3_priv_pass'], } if meraki.params['peer_ips'] is not None: payload['peerIps'] = meraki.params['peer_ips'] elif meraki.params['v3_enabled'] is False: payload = {'v3Enabled': False} full_compare = snake_dict_to_camel_dict(payload) path = meraki.construct_path('create', org_id=org_id) snmp = get_snmp(meraki, org_id) ignored_parameters = [ 'v3AuthPass', 'v3PrivPass', 'hostname', 'port', 'v2CommunityString', 'v3User' ] if meraki.is_update_required(snmp, full_compare, optional_ignore=ignored_parameters): if meraki.module.check_mode is True: meraki.generate_diff(snmp, full_compare) snmp.update(payload) meraki.result['data'] = snmp meraki.result['changed'] = True meraki.exit_json(**meraki.result) r = meraki.request(path, method='PUT', payload=json.dumps(payload)) if meraki.status == 200: meraki.generate_diff(snmp, r) meraki.result['changed'] = True return r else: return snmp
def build_actions(actions): action_items = [] for action in actions: action_item = snake_dict_to_camel_dict(action) action_items.append(action_item) return action_items
def update_sqs_queue(module, client, queue_url): check_mode = module.check_mode changed = False existing_attributes = client.get_queue_attributes( QueueUrl=queue_url, AttributeNames=['All'], aws_retry=True)['Attributes'] new_attributes = snake_dict_to_camel_dict(module.params, capitalize_first=True) attributes_to_set = dict() # Boto3 SQS deals with policies as strings, we want to deal with them as # dicts if module.params.get('policy') is not None: policy = module.params.get('policy') current_value = existing_attributes.get('Policy', '{}') current_policy = json.loads(current_value) if compare_policies(current_policy, policy): attributes_to_set['Policy'] = json.dumps(policy) changed = True if module.params.get('redrive_policy') is not None: policy = module.params.get('redrive_policy') current_value = existing_attributes.get('RedrivePolicy', '{}') current_policy = json.loads(current_value) if compare_policies(current_policy, policy): attributes_to_set['RedrivePolicy'] = json.dumps(policy) changed = True for attribute, value in existing_attributes.items(): # We handle these as a special case because they're IAM policies if attribute in ['Policy', 'RedrivePolicy']: continue if attribute not in new_attributes.keys(): continue if new_attributes.get(attribute) is None: continue new_value = new_attributes[attribute] if isinstance(new_value, bool): new_value = str(new_value).lower() value = str(value).lower() if str(new_value) == str(value): continue # Boto3 expects strings attributes_to_set[attribute] = str(new_value) changed = True if changed and not check_mode: client.set_queue_attributes(QueueUrl=queue_url, Attributes=attributes_to_set, aws_retry=True) return changed, existing_attributes.get('queue_arn')
def build_group_from_params(params): GROUP_PARAMS = ["display_name", "description", "group_types", "mail_enabled", "mail_nickname", "security_enabled", "owners", "members"] group = {} for param in GROUP_PARAMS: group[param] = params[param] if group["members"] == []: group.pop("members") return snake_dict_to_camel_dict(group)
def set_api_params(module, module_params): """ Sets module parameters to those expected by the boto3 API. :param module: :param module_params: :return: """ api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None) return snake_dict_to_camel_dict(api_params)
def validate_input(module): mparams = module.params syslog_list = mparams.get("syslog_servers") if not syslog_list: module.exit_json(msg=NO_CHANGES_MSG) syslog_dict = {} for sys in syslog_list: trim_sys = dict((k, v) for k, v in sys.items() if v is not None) syslog_dict[sys.get('id')] = snake_dict_to_camel_dict(trim_sys, capitalize_first=True) if len(syslog_dict) < len(syslog_list): module.exit_json(msg=DUP_ID_MSG, failed=True) return syslog_dict
def main(): module = AnsibleModule( argument_spec=dict( data=dict(type='dict', required=True), capitalize_first=dict(type='bool', default=False), ), ) result = snake_dict_to_camel_dict( module.params['data'], module.params['capitalize_first'] ) module.exit_json(data=result)
def create_route_spec(connection, module, vpc_id): routes = module.params.get('routes') for route_spec in routes: rename_key(route_spec, 'dest', 'destination_cidr_block') if route_spec.get( 'gateway_id') and route_spec['gateway_id'].lower() == 'igw': igw = find_igw(connection, module, vpc_id) route_spec['gateway_id'] = igw if route_spec.get( 'gateway_id') and route_spec['gateway_id'].startswith('nat-'): rename_key(route_spec, 'gateway_id', 'nat_gateway_id') return snake_dict_to_camel_dict(routes, capitalize_first=True)
def arg_spec_to_rds_params(options_dict): tags = options_dict.pop('tags') has_processor_features = False if 'processor_features' in options_dict: has_processor_features = True processor_features = options_dict.pop('processor_features') camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True) for key in list(camel_options.keys()): for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')): if old in key: camel_options[key.replace(old, new)] = camel_options.pop(key) camel_options['Tags'] = tags if has_processor_features: camel_options['ProcessorFeatures'] = processor_features return camel_options
def params_to_launch_data(module, template_params): if template_params.get('tags'): tag_list = ansible_dict_to_boto3_tag_list(template_params.get('tags')) template_params['tag_specifications'] = [{ 'resource_type': r_type, 'tags': tag_list } for r_type in ('instance', 'volume')] del template_params['tags'] if module.params.get('iam_instance_profile'): template_params['iam_instance_profile'] = determine_iam_role( module, module.params['iam_instance_profile']) params = snake_dict_to_camel_dict( dict((k, v) for k, v in template_params.items() if v is not None), capitalize_first=True, ) return params
def construct_connection(module): """ Construct a Connection from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: Connection """ connection = dict( (k, module.params.get(k)) for k in ('id', 'name', 'description', 'speed', 'high_availability', 'billing_term', 'customer_asn', 'customer_networks', 'primary_customer_router_ip', 'secondary_customer_router_ip', 'routing_type', 'physical_address', 'ike_version', 'primary_key', 'secondary_key', 'traffic_selectors', 'enable_bgp_password')) is_ike_v1 = connection.get('ike_version') == 'V1' connection.update([('ikeV1' if is_ike_v1 else 'ikeV2', dict(ike=dict((k[4:], module.params.get(k)) for k in ( 'ike_encryption', 'ike_integrity', 'ike_prf' if not is_ike_v1 else None, 'ike_dh_group', ) if k is not None), esp=dict( (k[4:], module.params.get(k)) for k in ('esp_encryption', 'esp_integrity', 'esp_dh_group'))))]) connection.update( dict( type='SITE_IPSEC_VPN', authType='PSK', # TODO(mtraynham): Remove id parsing once we only need to pass href location=dict(href=module.params.get('location_href')), nat=dict(enabled=module.params.get('nat_enabled'), mappings=[ dict(native_cidr=nat_mapping) for nat_mapping in module.params.get('nat_mappings') ]))) connection = snake_dict_to_camel_dict(connection) # Correct naming connection.update( dict(primaryCustomerRouterIP=connection.pop('primaryCustomerRouterIp'), secondaryCustomerRouterIP=connection.pop( 'secondaryCustomerRouterIp'), customerASN=connection.pop('customerAsn'), enableBGPPassword=connection.pop('enableBgpPassword'))) return connection
def construct_port(module): """ Construct a Port from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: Port """ port = dict() port.update(dict((k, module.params.get(k)) for k in ('id', 'name', 'description', 'provider', 'speed', 'media_type', 'availability_domain', 'billing_term'))) port.update(dict( account=dict(href=module.params.get('account_href')), facility=dict(href=module.params.get('facility_href')), )) port = snake_dict_to_camel_dict(port) return port
def set_api_params(module, module_params): """ Sets non-None module parameters to those expected by the boto3 API. :param module: :param module_params: :return: """ api_params = dict() for param in module_params: module_param = module.params.get(param, None) if module_param: api_params[param] = module_param return snake_dict_to_camel_dict(api_params, capitalize_first=True)
def create_option_group_options(client, module): changed = True params = dict() params['OptionGroupName'] = module.params.get('option_group_name') options_to_include = module.params.get('options') params['OptionsToInclude'] = snake_dict_to_camel_dict( options_to_include, capitalize_first=True) if module.params.get('apply_immediately'): params['ApplyImmediately'] = module.params.get('apply_immediately') if module.check_mode: return changed try: client.modify_option_group(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update Option Group.") return changed
def construct_connection(module): """ Construct a Connection from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: pureport.api.client.Connection """ connection = dict((k, module.params.get(k)) for k in ( 'id', 'name', 'description', 'speed', 'billing_term', 'customer_networks', 'primary_ocid', 'secondary_ocid' )) connection.update(dict( type='ORACLE_FAST_CONNECT', high_availability=True, location=get_object_link(module, '/locations', 'location_id', 'location_href'), cloud_region=get_object_link(module, '/cloudRegions', 'cloud_region_id', 'cloud_region_href'), nat=dict( enabled=module.params.get('nat_enabled'), mappings=[dict(native_cidr=nat_mapping) for nat_mapping in module.params.get('nat_mappings')] ) )) connection = snake_dict_to_camel_dict(connection) # Correct naming connection.update(dict( peering=dict( type='PRIVATE', primaryRemoteBgpIP=module.params.get('primary_remote_bgp_ip'), primaryPureportBgpIP=module.params.get('primary_pureport_bgp_ip'), secondaryRemoteBgpIP=module.params.get('secondary_remote_bgp_ip'), secondaryPureportBgpIP=module.params.get('secondary_pureport_bgp_ip'), ), tags=module.params.get('tags') )) return connection
def construct_connection(module): """ Construct a Connection from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: pureport.api.client.Connection """ connection = dict( (k, module.params.get(k)) for k in ('id', 'name', 'description', 'speed', 'high_availability', 'billing_term', 'customer_networks', 'primary_pairing_key', 'secondary_pairing_key')) connection.update( dict(type='GOOGLE_CLOUD_INTERCONNECT', location=get_object_link(module, '/locations', 'location_id', 'location_href'), nat=dict(enabled=module.params.get('nat_enabled'), mappings=[ dict(native_cidr=nat_mapping) for nat_mapping in module.params.get('nat_mappings') ]))) connection = snake_dict_to_camel_dict(connection) connection.update(dict(tags=module.params.get('tags'))) return connection
def construct_connection(module): """ Construct a Connection from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: Connection """ connection = dict( (k, module.params.get(k)) for k in ('id', 'name', 'description', 'speed', 'high_availability', 'billing_term', 'customer_networks', 'primary_pairing_key', 'secondary_pairing_key')) connection.update( dict( type='GOOGLE_CLOUD_INTERCONNECT', # TODO(mtraynham): Remove id parsing once we only need to pass href location=dict(href=module.params.get('location_href')), nat=dict(enabled=module.params.get('nat_enabled'), mappings=[ dict(native_cidr=nat_mapping) for nat_mapping in module.params.get('nat_mappings') ]))) connection = snake_dict_to_camel_dict(connection) return connection
def make_request(self, endpoint, method, data=None, ignore_error=None): if data is not None: data = json.dumps(snake_dict_to_camel_dict(data)) endpoint_url = self.url + "/v1/" + endpoint headers = { "Content-Type": "application/json", "X-Consul-Token": self.token } response, info = fetch_url(self.module, endpoint_url, data=data, headers=headers, method=method) status_code = info["status"] if ignore_error == status_code: return None elif status_code >= 400: self.module.fail_json( msg="API request failed", endpoint=endpoint_url, method=method, status=status_code, response=info["body"], ) elif response is None: self.module.fail_json(**info) body = json.loads(response.read()) if type(body) is bool: return body elif type(body) is list: return [camel_dict_to_snake_dict(e) for e in body] else: return camel_dict_to_snake_dict(body)
def main(): argument_spec = dict( state=dict(required=True, choices=['present', 'absent', 'deleting']), name=dict(required=True, type='str'), cluster=dict(required=False, type='str'), task_definition=dict(required=False, type='str'), load_balancers=dict(required=False, default=[], type='list', elements='dict'), desired_count=dict(required=False, type='int'), client_token=dict(required=False, default='', type='str', no_log=False), role=dict(required=False, default='', type='str'), delay=dict(required=False, type='int', default=10), repeat=dict(required=False, type='int', default=10), force_new_deployment=dict(required=False, default=False, type='bool'), force_deletion=dict(required=False, default=False, type='bool'), deployment_configuration=dict(required=False, default={}, type='dict'), placement_constraints=dict( required=False, default=[], type='list', elements='dict', options=dict( type=dict(type='str'), expression=dict(type='str') ) ), placement_strategy=dict( required=False, default=[], type='list', elements='dict', options=dict( type=dict(type='str'), field=dict(type='str'), ) ), health_check_grace_period_seconds=dict(required=False, type='int'), network_configuration=dict(required=False, type='dict', options=dict( subnets=dict(type='list', elements='str'), security_groups=dict(type='list', elements='str'), assign_public_ip=dict(type='bool') )), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), platform_version=dict(required=False, type='str'), service_registries=dict(required=False, type='list', default=[], elements='dict'), scheduling_strategy=dict(required=False, choices=['DAEMON', 'REPLICA']), propagate_tags=dict(required=False, choices=['TASK_DEFINITION', 'SERVICE']), tags=dict(required=False, type='dict'), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[('state', 'present', ['task_definition']), ('launch_type', 'FARGATE', ['network_configuration'])], required_together=[['load_balancers', 'role']]) if module.params['state'] == 'present' and module.params['scheduling_strategy'] == 'REPLICA': if module.params['desired_count'] is None: module.fail_json(msg='state is present, scheduling_strategy is REPLICA; missing desired_count') service_mgr = EcsServiceManager(module) if module.params['network_configuration']: network_configuration = service_mgr.format_network_configuration(module.params['network_configuration']) else: network_configuration = None deployment_configuration = map_complex_type(module.params['deployment_configuration'], DEPLOYMENT_CONFIGURATION_TYPE_MAP) deploymentConfiguration = snake_dict_to_camel_dict(deployment_configuration) serviceRegistries = list(map(snake_dict_to_camel_dict, module.params['service_registries'])) try: existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) except Exception as e: module.fail_json_aws(e, msg="Exception describing service '{0}' in cluster '{1}'" .format(module.params['name'], module.params['cluster'])) results = dict(changed=False) if module.params['state'] == 'present': matching = False update = False if existing and 'status' in existing and existing['status'] == "ACTIVE": if module.params['force_new_deployment']: update = True elif service_mgr.is_matching_service(module.params, existing): matching = True results['service'] = existing else: update = True if not matching: if not module.check_mode: role = module.params['role'] clientToken = module.params['client_token'] loadBalancers = [] for loadBalancer in module.params['load_balancers']: if 'containerPort' in loadBalancer: loadBalancer['containerPort'] = int(loadBalancer['containerPort']) loadBalancers.append(loadBalancer) for loadBalancer in loadBalancers: if 'containerPort' in loadBalancer: loadBalancer['containerPort'] = int(loadBalancer['containerPort']) if update: # check various parameters and boto versions and give a helpful error in boto is not new enough for feature if module.params['scheduling_strategy']: if (existing['schedulingStrategy']) != module.params['scheduling_strategy']: module.fail_json(msg="It is not possible to update the scheduling strategy of an existing service") if module.params['service_registries']: if (existing['serviceRegistries'] or []) != serviceRegistries: module.fail_json(msg="It is not possible to update the service registries of an existing service") if (existing['loadBalancers'] or []) != loadBalancers: module.fail_json(msg="It is not possible to update the load balancers of an existing service") if module.params['propagate_tags'] and module.params['propagate_tags'] != existing['propagateTags']: module.fail_json(msg="It is not currently supported to enable propagation tags of an existing service") if module.params['tags'] and boto3_tag_list_to_ansible_dict(existing['tags']) != module.params['tags']: module.fail_json(msg="It is not currently supported to change tags of an existing service") # update required response = service_mgr.update_service(module.params['name'], module.params['cluster'], module.params['task_definition'], module.params['desired_count'], deploymentConfiguration, network_configuration, module.params['health_check_grace_period_seconds'], module.params['force_new_deployment'], ) else: try: response = service_mgr.create_service(module.params['name'], module.params['cluster'], module.params['task_definition'], loadBalancers, module.params['desired_count'], clientToken, role, deploymentConfiguration, module.params['placement_constraints'], module.params['placement_strategy'], module.params['health_check_grace_period_seconds'], network_configuration, serviceRegistries, module.params['launch_type'], module.params['scheduling_strategy'], module.params['platform_version'], module.params['tags'], module.params['propagate_tags'], ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't create service") if response.get('tags', None): response['tags'] = boto3_tag_list_to_ansible_dict(response['tags']) results['service'] = response results['changed'] = True elif module.params['state'] == 'absent': if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted del existing['deployments'] del existing['events'] results['ansible_facts'] = existing if 'status' in existing and existing['status'] == "INACTIVE": results['changed'] = False else: if not module.check_mode: try: service_mgr.delete_service( module.params['name'], module.params['cluster'], module.params['force_deletion'], ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e, msg="Couldn't delete service") results['changed'] = True elif module.params['state'] == 'deleting': if not existing: module.fail_json(msg="Service '" + module.params['name'] + " not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted delay = module.params['delay'] repeat = module.params['repeat'] time.sleep(delay) for i in range(repeat): existing = service_mgr.describe_service(module.params['cluster'], module.params['name']) status = existing['status'] if status == "INACTIVE": results['changed'] = True break time.sleep(delay) if i is repeat - 1: module.fail_json( msg="Service still not deleted after {0} tries of {1} seconds each." .format(repeat, delay) ) return module.exit_json(**results)
def get_connection_profile(disc_config): proto_add_dict = { 'wsman': { 'certificateDetail': None, 'isHttp': False, 'keepAlive': True, # 'version': None }, 'redfish': { 'certificateDetail': None, 'isHttp': False, 'keepAlive': True }, 'snmp': { # 'authenticationPassphrase': None, # 'authenticationProtocol': None, 'enableV1V2': True, 'enableV3': False, # 'localizationEngineID': None, # 'privacyPassphrase': None, # 'privacyProtocol': None, # 'securityName': None }, 'vmware': { 'certificateDetail': None, 'isHttp': False, 'keepAlive': False }, 'ssh': { 'useKey': False, 'key': None, 'knownHostKey': None, 'passphrase': None }, 'ipmi': { 'privilege': 2 }, 'storage': { 'certificateDetail': None, 'isHttp': False, 'keepAlive': True, # 'version': None } } proto_list = [ 'wsman', 'snmp', 'vmware', 'ssh', 'ipmi', 'redfish', 'storage' ] conn_profile = { "profileId": 0, "profileName": "", "profileDescription": "", "type": "DISCOVERY" } creds_dict = {} for p in proto_list: if disc_config.get(p): xproto = { "type": p.upper(), "authType": "Basic", "modified": False } xproto['credentials'] = snake_dict_to_camel_dict(disc_config[p]) (xproto['credentials']).update(proto_add_dict.get(p, {})) creds_dict[p] = xproto # Special handling, duplicating wsman to redfish as in GUI if p == 'wsman': rf = xproto.copy() rf['type'] = 'REDFISH' creds_dict['redfish'] = rf conn_profile['credentials'] = list(creds_dict.values()) return conn_profile
def construct_connection(module): """ Construct a Connection from the Ansible module arguments :param AnsibleModule module: the Ansible module :rtype: pureport.api.client.Connection """ connection = dict( (k, module.params.get(k)) for k in ('id', 'name', 'description', 'speed', 'high_availability', 'billing_term', 'customer_asn', 'customer_networks', 'primary_customer_router_ip', 'secondary_customer_router_ip', 'routing_type', 'physical_address', 'ike_version', 'primary_key', 'secondary_key', 'traffic_selectors', 'enable_bgp_password')) is_ike_v1 = connection.get('ike_version') == 'V1' connection.update([('ikeV1' if is_ike_v1 else 'ikeV2', dict(ike=dict((k[4:], module.params.get(k)) for k in ( 'ike_encryption', 'ike_integrity', 'ike_prf' if not is_ike_v1 else None, 'ike_dh_group', ) if k is not None), esp=dict( (k[4:], module.params.get(k)) for k in ('esp_encryption', 'esp_integrity', 'esp_dh_group'))))]) if 'ikeV1' in connection: if connection['ikeV1']['ike'][ 'encryption'] not in __IKE_V1_IKE_ENCRYPTION_ALGORITHMS: module.fail_json(msg='For IKE V1, \'ike_encryption\' must ' 'be one of %s' % __IKE_V1_IKE_ENCRYPTION_ALGORITHMS) if connection['ikeV1']['ike']['integrity'] is None: connection['ikeV1']['ike']['integrity'] = 'SHA256_HMAC' elif connection['ikeV1']['ike'][ 'integrity'] not in __IKE_V1_IKE_INTEGRITY_ALGORITHMS: module.fail_json(msg='For IKE V1, \'ike_integrity\' must ' 'be one of %s' % __IKE_V1_IKE_INTEGRITY_ALGORITHMS) if connection['ikeV1']['esp'][ 'encryption'] in __NO_INTEGRITY_ALGORITHMS: del connection['ikeV1']['esp']['integrity'] elif 'ikeV2' in connection: if connection['ikeV2']['ike'][ 'encryption'] in __NO_INTEGRITY_ALGORITHMS: if connection['ikeV2']['ike']['integrity'] is not None: del connection['ikeV2']['ike']['integrity'] if connection['ikeV2']['ike']['prf'] is None: connection['ikeV2']['ike']['prf'] = 'SHA_256' else: if connection['ikeV2']['ike']['integrity'] is None: connection['ikeV2']['ike']['integrity'] = 'SHA256_HMAC' if connection['ikeV2']['ike']['prf'] is not None: del connection['ikeV2']['ike']['prf'] connection.update( dict(type='SITE_IPSEC_VPN', authType='PSK', location=get_object_link(module, '/locations', 'location_id', 'location_href'), nat=dict(enabled=module.params.get('nat_enabled'), mappings=[ dict(native_cidr=nat_mapping) for nat_mapping in module.params.get('nat_mappings') ]))) connection = snake_dict_to_camel_dict(connection) # Correct naming connection.update( dict(primaryCustomerRouterIP=connection.pop('primaryCustomerRouterIp'), secondaryCustomerRouterIP=connection.pop( 'secondaryCustomerRouterIp'), customerASN=connection.pop('customerAsn'), enableBGPPassword=connection.pop('enableBgpPassword'), tags=module.params.get('tags'))) return connection
def create_response_header_policy(self, name, comment, cors_config, security_headers_config, custom_headers_config): cors_config = snake_dict_to_camel_dict(cors_config, capitalize_first=True) security_headers_config = snake_dict_to_camel_dict( security_headers_config, capitalize_first=True) # Little helper for turning xss_protection into XSSProtection and not into XssProtection if 'XssProtection' in security_headers_config: security_headers_config[ 'XSSProtection'] = security_headers_config.pop('XssProtection') custom_headers_config = snake_dict_to_camel_dict(custom_headers_config, capitalize_first=True) config = { 'Name': name, 'Comment': comment, 'CorsConfig': self.insert_quantities(cors_config), 'SecurityHeadersConfig': security_headers_config, 'CustomHeadersConfig': self.insert_quantities(custom_headers_config) } config = {k: v for k, v in config.items() if v} matching_policy = self.find_response_headers_policy(name) changed = False if self.check_mode: self.module.exit_json( changed=True, response_headers_policy=camel_dict_to_snake_dict(config)) if matching_policy is None: try: result = self.client.create_response_headers_policy( ResponseHeadersPolicyConfig=config) changed = True except (ParamValidationError, ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error creating policy") else: policy_id = matching_policy['ResponseHeadersPolicy']['Id'] etag = matching_policy['ETag'] try: result = self.client.update_response_headers_policy( Id=policy_id, IfMatch=etag, ResponseHeadersPolicyConfig=config) changed_time = result['ResponseHeadersPolicy'][ 'LastModifiedTime'] seconds = 3 # threshhold for returned timestamp age seconds_ago = (datetime.datetime.now(changed_time.tzinfo) - datetime.timedelta(0, seconds)) # consider change made by this execution of the module if returned timestamp was very recent if changed_time > seconds_ago: changed = True except (ParamValidationError, ClientError, BotoCoreError) as e: self.module.fail_json_aws(e, msg="Updating creating policy") self.module.exit_json(changed=changed, **camel_dict_to_snake_dict(result))
def create_launch_config(connection, module): name = module.params.get('name') vpc_id = module.params.get('vpc_id') try: ec2_connection = module.client('ec2') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') try: security_groups = get_ec2_security_group_ids_from_names( module.params.get('security_groups'), ec2_connection, vpc_id=vpc_id, boto3=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to get Security Group IDs') except ValueError as e: module.fail_json(msg="Failed to get Security Group IDs", exception=traceback.format_exc()) user_data = module.params.get('user_data') user_data_path = module.params.get('user_data_path') volumes = module.params['volumes'] instance_monitoring = module.params.get('instance_monitoring') assign_public_ip = module.params.get('assign_public_ip') instance_profile_name = module.params.get('instance_profile_name') ebs_optimized = module.params.get('ebs_optimized') classic_link_vpc_id = module.params.get('classic_link_vpc_id') classic_link_vpc_security_groups = module.params.get( 'classic_link_vpc_security_groups') block_device_mapping = [] convert_list = [ 'image_id', 'instance_type', 'instance_type', 'instance_id', 'placement_tenancy', 'key_name', 'kernel_id', 'ramdisk_id', 'spot_price' ] launch_config = (snake_dict_to_camel_dict( dict((k.capitalize(), str(v)) for k, v in module.params.items() if v is not None and k in convert_list))) if user_data_path: try: with open(user_data_path, 'r') as user_data_file: user_data = user_data_file.read() except IOError as e: module.fail_json(msg="Failed to open file for reading", exception=traceback.format_exc()) if volumes: for volume in volumes: if 'device_name' not in volume: module.fail_json(msg='Device name must be set for volume') # Minimum volume size is 1GiB. We'll use volume size explicitly set to 0 to be a signal not to create this volume if 'volume_size' not in volume or int(volume['volume_size']) > 0: block_device_mapping.append( create_block_device_meta(module, volume)) try: launch_configs = connection.describe_launch_configurations( LaunchConfigurationNames=[name]).get('LaunchConfigurations') except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, msg="Failed to describe launch configuration by name") changed = False result = {} launch_config['LaunchConfigurationName'] = name if security_groups is not None: launch_config['SecurityGroups'] = security_groups if classic_link_vpc_id is not None: launch_config['ClassicLinkVPCId'] = classic_link_vpc_id if instance_monitoring is not None: launch_config['InstanceMonitoring'] = {'Enabled': instance_monitoring} if classic_link_vpc_security_groups is not None: launch_config[ 'ClassicLinkVPCSecurityGroups'] = classic_link_vpc_security_groups if block_device_mapping: launch_config['BlockDeviceMappings'] = block_device_mapping if instance_profile_name is not None: launch_config['IamInstanceProfile'] = instance_profile_name if assign_public_ip is not None: launch_config['AssociatePublicIpAddress'] = assign_public_ip if user_data is not None: launch_config['UserData'] = user_data if ebs_optimized is not None: launch_config['EbsOptimized'] = ebs_optimized if len(launch_configs) == 0: try: connection.create_launch_configuration(**launch_config) launch_configs = connection.describe_launch_configurations( LaunchConfigurationNames=[name]).get('LaunchConfigurations') changed = True if launch_configs: launch_config = launch_configs[0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create launch configuration") result = (dict((k, v) for k, v in launch_config.items() if k not in [ 'Connection', 'CreatedTime', 'InstanceMonitoring', 'BlockDeviceMappings' ])) result['CreatedTime'] = to_text(launch_config.get('CreatedTime')) try: result['InstanceMonitoring'] = module.boolean( launch_config.get('InstanceMonitoring').get('Enabled')) except AttributeError: result['InstanceMonitoring'] = False result['BlockDeviceMappings'] = [] for block_device_mapping in launch_config.get('BlockDeviceMappings', []): result['BlockDeviceMappings'].append( dict(device_name=block_device_mapping.get('DeviceName'), virtual_name=block_device_mapping.get('VirtualName'))) if block_device_mapping.get('Ebs') is not None: result['BlockDeviceMappings'][-1]['ebs'] = dict( snapshot_id=block_device_mapping.get('Ebs').get('SnapshotId'), volume_size=block_device_mapping.get('Ebs').get('VolumeSize')) if user_data_path: result[ 'UserData'] = "hidden" # Otherwise, we dump binary to the user's terminal return_object = { 'Name': result.get('LaunchConfigurationName'), 'CreatedTime': result.get('CreatedTime'), 'ImageId': result.get('ImageId'), 'Arn': result.get('LaunchConfigurationARN'), 'SecurityGroups': result.get('SecurityGroups'), 'InstanceType': result.get('InstanceType'), 'Result': result } module.exit_json(changed=changed, **camel_dict_to_snake_dict(return_object))
def start_or_cancel_instance_refresh(conn, module): """ Args: conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. module: AnsibleAWSModule object Returns: { "instance_refreshes": [ { 'auto_scaling_group_name': 'ansible-test-hermes-63642726-asg', 'instance_refresh_id': '6507a3e5-4950-4503-8978-e9f2636efc09', 'instances_to_update': 1, 'percentage_complete': 0, "preferences": { "instance_warmup": 60, "min_healthy_percentage": 90, "skip_matching": false }, 'start_time': '2021-02-04T03:39:40+00:00', 'status': 'Cancelling', 'status_reason': 'Replacing instances before cancelling.', } ] } """ asg_state = module.params.get('state') asg_name = module.params.get('name') preferences = module.params.get('preferences') args = {} args['AutoScalingGroupName'] = asg_name if asg_state == 'started': args['Strategy'] = module.params.get('strategy') if preferences: if asg_state == 'cancelled': module.fail_json( msg='can not pass preferences dict when canceling a refresh') _prefs = scrub_none_parameters(preferences) args['Preferences'] = snake_dict_to_camel_dict(_prefs, capitalize_first=True) cmd_invocations = { 'cancelled': conn.cancel_instance_refresh, 'started': conn.start_instance_refresh, } try: if module.check_mode: if asg_state == 'started': ongoing_refresh = conn.describe_instance_refreshes( AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]') if ongoing_refresh: module.exit_json( changed=False, msg= 'In check_mode - Instance Refresh is already in progress, can not start new instance refresh.' ) else: module.exit_json( changed=True, msg= 'Would have started instance refresh if not in check mode.' ) elif asg_state == 'cancelled': ongoing_refresh = conn.describe_instance_refreshes( AutoScalingGroupName=asg_name).get('InstanceRefreshes', '[]')[0] if ongoing_refresh.get('Status', '') in ['Cancelling', 'Cancelled']: module.exit_json( changed=False, msg= 'In check_mode - Instance Refresh already cancelled or is pending cancellation.' ) elif not ongoing_refresh: module.exit_json( chaned=False, msg= 'In check_mode - No active referesh found, nothing to cancel.' ) else: module.exit_json( changed=True, msg= 'Would have cancelled instance refresh if not in check mode.' ) result = cmd_invocations[asg_state](aws_retry=True, **args) instance_refreshes = conn.describe_instance_refreshes( AutoScalingGroupName=asg_name, InstanceRefreshIds=[result['InstanceRefreshId']]) result = dict(instance_refreshes=camel_dict_to_snake_dict( instance_refreshes['InstanceRefreshes'][0])) return module.exit_json(**result) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to {0} InstanceRefresh'.format( asg_state.replace('ed', '')))