def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), rules=dict(type='list'), state=dict(type='str', choices=['present', 'absent'], required=True) ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs) state = module.params.get("state") if state == 'present': create_or_update_bucket_cors(client, module) elif state == 'absent': destroy_bucket_cors(client, module)
def main(): argument_spec = ec2_argument_spec() module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='Python module "boto3" is missing, please install it') if not HAS_BOTOCORE: module.fail_json(msg='Python module "botocore" is missing, please install it') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = None try: client = boto3_conn(module, conn_type='client', resource='lightsail', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: module.fail_json(msg='Failed while connecting to the lightsail service: %s' % e, exception=traceback.format_exc()) response = client.get_regions( includeAvailabilityZones=False ) module.exit_json(changed=False, results=response) except (botocore.exceptions.ClientError, Exception) as e: module.fail_json(msg=str(e), exception=traceback.format_exc())
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(type='str', required=True), path=dict(type='str', default="/"), assume_role_policy_document=dict(type='json'), managed_policy=dict(type='list', aliases=['managed_policies']), state=dict(type='str', choices=['present', 'absent'], default='present'), description=dict(type='str'), create_instance_profile=dict(type='bool', default=True), purge_policies=dict(type='bool', default=True), ) ) module = AnsibleModule(argument_spec=argument_spec, required_if=[('state', 'present', ['assume_role_policy_document'])], supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params) state = module.params.get("state") if state == 'present': create_or_update_role(connection, module) else: destroy_role(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( query=dict(choices=['services', 'endpoints'], required=True), filters=dict(default={}, type='dict'), vpc_endpoint_ids=dict(type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore and boto3 are required.') try: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg=str(e)) invocations = { 'services': get_supported_services, 'endpoints': get_endpoints, } results = invocations[module.params.get('query')](connection, module) module.exit_json(**results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( role_arn=dict(required=True, default=None), role_session_name=dict(required=True, default=None), duration_seconds=dict(required=False, default=None, type='int'), external_id=dict(required=False, default=None), policy=dict(required=False, default=None), mfa_serial_number=dict(required=False, default=None), mfa_token=dict(required=False, default=None) ) ) module = AnsibleAWSModule(argument_spec=argument_spec) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) else: module.fail_json(msg="region must be specified") assume_role_policy(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True), default_action=dict(choices=['block', 'allow', 'count']), metric_name=dict(), state=dict(default='present', choices=['present', 'absent']), rules=dict(type='list'), purge_rules=dict(type='bool', default=False) ), ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['state', 'present', ['default_action', 'rules']]]) state = module.params.get('state') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='waf', region=region, endpoint=ec2_url, **aws_connect_kwargs) if state == 'present': (changed, results) = ensure_web_acl_present(client, module) else: (changed, results) = ensure_web_acl_absent(client, module) module.exit_json(changed=changed, web_acl=camel_dict_to_snake_dict(results))
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( vpc_id=dict(), service=dict(), policy=dict(type='json'), policy_file=dict(type='path'), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=320, required=False), route_table_ids=dict(type='list'), vpc_endpoint_id=dict(), client_token=dict(), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['policy', 'policy_file']], required_if=[ ['state', 'present', ['vpc_id', 'service']], ['state', 'absent', ['vpc_endpoint_id']], ] ) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore and boto3 are required for this module') state = module.params.get('state') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) except NameError as e: # Getting around the get_aws_connection_info boto reliance for region if "global name 'boto' is not defined" in e.message: module.params['region'] = botocore.session.get_session().get_config_variable('region') if not module.params['region']: module.fail_json(msg="Error - no region provided") else: module.fail_json(msg="Can't retrieve connection information - " + str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Failed to connect to AWS due to wrong or missing credentials: %s" % str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # Ensure resource is present if state == 'present': (changed, results) = setup_creation(ec2, module) else: (changed, results) = setup_removal(ec2, module) module.exit_json(changed=changed, result=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=False), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore are required.') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='waf', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - " + str(e)) web_acls = list_web_acls(client, module) name = module.params['name'] if name: web_acls = [web_acl for web_acl in web_acls if web_acl['Name'] == name] if not web_acls: module.fail_json(msg="WAF named %s not found" % name) module.exit_json(wafs=[get_web_acl(client, module, web_acl['WebACLId']) for web_acl in web_acls])
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent', 'stopped', 'running', 'restarted']), zone=dict(type='str'), blueprint_id=dict(type='str'), bundle_id=dict(type='str'), key_pair_name=dict(type='str'), user_data=dict(type='str'), wait=dict(type='bool', default=True), wait_timeout=dict(default=300), )) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='Python module "boto3" is missing, please install it') if not HAS_BOTOCORE: module.fail_json(msg='Python module "botocore" is missing, please install it') try: core(module) except (botocore.exceptions.ClientError, Exception) as e: module.fail_json(msg=str(e), exception=traceback.format_exc())
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( snapshot_ids=dict(default=[], type='list'), owner_ids=dict(default=[], type='list'), restorable_by_user_ids=dict(default=[], type='list'), filters=dict(default={}, type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[ ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'] ] ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_ec2_snapshots(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( details=dict(type='bool', default=False), events=dict(type='bool', default=True), cluster=dict(), service=dict(type='list') )) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) show_details = module.params.get('details') task_mgr = EcsServiceManager(module) if show_details: if module.params['service']: services = module.params['service'] else: services = task_mgr.list_services(module.params['cluster'])['services'] ecs_facts = dict(services=[], services_not_running=[]) for chunk in chunks(services, 10): running_services, services_not_running = task_mgr.describe_services(module.params['cluster'], chunk) ecs_facts['services'].extend(running_services) ecs_facts['services_not_running'].extend(services_not_running) else: ecs_facts = task_mgr.list_services(module.params['cluster']) module.exit_json(changed=False, ansible_facts=ecs_facts, **ecs_facts)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( filters=dict(default={}, type='dict'), nat_gateway_ids=dict(default=[], type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore/boto3 is required.') try: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg=str(e)) results = get_nat_gateways(connection, module) module.exit_json(result=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( vpc_id=dict(), peer_vpc_id=dict(), peer_region=dict(), peering_id=dict(), peer_owner_id=dict(), tags=dict(required=False, type='dict'), profile=dict(), state=dict(default='present', choices=['present', 'absent', 'accept', 'reject']) ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='json, botocore and boto3 are required.') state = module.params.get('state') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - " + str(e)) if state == 'present': (changed, results) = create_peer_connection(client, module) module.exit_json(changed=changed, peering_id=results) elif state == 'absent': remove_peer_connection(client, module) else: (changed, results) = accept_reject(state, client, module) module.exit_json(changed=changed, peering_id=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( source_region=dict(required=True), source_image_id=dict(required=True), name=dict(required=True), description=dict(default=''), encrypted=dict(type='bool', required=False), kms_key_id=dict(type='str', required=False), wait=dict(type='bool', default=False, required=False), tags=dict(type='dict'))) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') # TODO: Check botocore version region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if HAS_BOTO3: try: ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) except NoRegionError: module.fail_json(msg='AWS Region is required') else: module.fail_json(msg='boto3 required for this module') copy_image(ec2, module)
def main(): """ Get list of S3 buckets :return: """ # Ensure we have an empty dict result = {} # Including ec2 argument spec module = AnsibleModule(argument_spec=ec2_argument_spec(), supports_check_mode=True) # Verify Boto3 is used if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') # Set up connection region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=HAS_BOTO3) # Set up connection if region: try: connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params) except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) else: module.fail_json(msg="AWS region must be specified (like: us-east-1)") # Gather results result['buckets'] = get_bucket_list(module, connection) # Send exit module.exit_json(msg="Retrieved s3 facts.", ansible_facts=result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( autoscaling_group_name=dict(required=True, type='str'), lifecycle_hook_name=dict(required=True, type='str'), transition=dict(type='str', choices=['autoscaling:EC2_INSTANCE_TERMINATING', 'autoscaling:EC2_INSTANCE_LAUNCHING']), role_arn=dict(type='str'), notification_target_arn=dict(type='str'), notification_meta_data=dict(type='str'), heartbeat_timeout=dict(type='int'), default_result=dict(default='ABANDON', choices=['ABANDON', 'CONTINUE']), state=dict(default='present', choices=['present', 'absent']) ) ) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[['state', 'present', ['transition']]]) state = module.params.get('state') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params) changed = False if state == 'present': changed = create_lifecycle_hook(connection, module) elif state == 'absent': changed = delete_lifecycle_hook(connection, module) module.exit_json(changed=changed)
def __init__(self, **kwargs): local_settings = {} for key in AnsibleAWSModule.default_settings: try: local_settings[key] = kwargs.pop(key) except KeyError: local_settings[key] = AnsibleAWSModule.default_settings[key] self.settings = local_settings if local_settings["default_args"]: # ec2_argument_spec contains the region so we use that; there's a patch coming which # will add it to aws_argument_spec so if that's accepted then later we should change # over argument_spec_full = ec2_argument_spec() try: argument_spec_full.update(kwargs["argument_spec"]) except (TypeError, NameError): pass kwargs["argument_spec"] = argument_spec_full self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs) if local_settings["check_boto3"] and not HAS_BOTO3: self._module.fail_json( msg='Python modules "botocore" or "boto3" are missing, please install both') self.check_mode = self._module.check_mode
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( filters = dict(type='dict', default=dict()), vpn_gateway_ids = dict(type='list', default=None) ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='json and boto3 is required.') try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - "+str(e)) # call your function here results = list_virtual_gateways(connection, module) module.exit_json(result=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(default='present', choices=['present', 'absent']), name=dict(required=True, type='str'), hash_key_name=dict(required=True, type='str'), hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), range_key_name=dict(type='str'), range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), read_capacity=dict(default=1, type='int'), write_capacity=dict(default=1, type='int'), indexes=dict(default=[], type='list'), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json(msg='region must be specified') try: connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) except (NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) state = module.params.get('state') if state == 'present': create_or_update_dynamo_table(connection, module) elif state == 'absent': delete_dynamo_table(connection, module)
def main(): """ Module action handler """ argument_spec = ec2_argument_spec() argument_spec.update(dict( id=dict(), name=dict(), tags=dict(type="dict", default={}), targets=dict(type="list", default=[]) )) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) region, _, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = EFSConnection(module, region, **aws_connect_params) name = module.params.get('name') fs_id = module.params.get('id') tags = module.params.get('tags') targets = module.params.get('targets') file_systems_info = connection.get_file_systems(fs_id, name) if tags: file_systems_info = [item for item in file_systems_info if has_tags(item['tags'], tags)] if targets: targets = [(item, prefix_to_attr(item)) for item in targets] file_systems_info = [item for item in file_systems_info if has_targets(item['mount_targets'], targets)] module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( source_region=dict(required=True), source_snapshot_id=dict(required=True), description=dict(default=''), encrypted=dict(type='bool', default=False, required=False), kms_key_id=dict(type='str', required=False), wait=dict(type='bool', default=False), tags=dict(type='dict'))) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO3: module.fail_json(msg='botocore and boto3 are required.') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="Region must be provided.") try: client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (NoCredentialsError, ProfileNotFound) as e: module.fail_json(msg="Can't authorize connection - %s" % to_native(e)) copy_snapshot(module, client)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( load_balancer_arn=dict(type='str'), target_group_arns=dict(type='list'), names=dict(type='list') ) ) module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=['load_balancer_arn', 'target_group_arns', 'names'], supports_check_mode=True ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_target_groups(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']), asg_name=dict(required=True, type='str'), scaling_adjustment=dict(type='int'), min_adjustment_step=dict(type='int'), cooldown=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) state = module.params.get('state') try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) if state == 'present': create_scaling_policy(connection, module) elif state == 'absent': delete_scaling_policy(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( filters=dict(default=None, type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region: try: connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") list_ec2_instances(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( vpc_id=dict(), name=dict(), nacl_id=dict(), subnets=dict(required=False, type='list', default=list()), tags=dict(required=False, type='dict'), ingress=dict(required=False, type='list', default=list()), egress=dict(required=False, type='list', default=list()), state=dict(default='present', choices=['present', 'absent']), ), ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['name', 'nacl_id']], required_if=[['state', 'present', ['vpc_id']]]) if not HAS_BOTO3: module.fail_json(msg='json, botocore and boto3 are required.') state = module.params.get('state').lower() try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Can't authorize connection - %s" % str(e)) invocations = { "present": setup_network_acl, "absent": remove_network_acl } (changed, results) = invocations[state](client, module) module.exit_json(changed=changed, nacl_id=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True, type='str'), managed_policy=dict(default=[], type='list'), state=dict(choices=['present', 'absent'], required=True), purge_policy=dict(default=False, type='bool') ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params) state = module.params.get("state") if state == 'present': create_or_update_user(connection, module) else: destroy_user(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent']), name=dict(), amazon_asn=dict(), virtual_gateway_id=dict(), direct_connect_gateway_id=dict(), wait_timeout=dict(type='int', default=320))) required_if = [('state', 'present', ['name', 'amazon_asn']), ('state', 'absent', ['direct_connect_gateway_id'])] module = AnsibleModule(argument_spec=argument_spec, required_if=required_if) if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module') state = module.params.get('state') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs) if state == 'present': (changed, results) = ensure_present(client, module) elif state == 'absent': changed = ensure_absent(client, module) results = {} module.exit_json(changed=changed, **camel_dict_to_snake_dict(results))
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(required=True), key_material=dict(), force=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(default=300) ) ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) ec2_client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) name = module.params['name'] state = module.params.get('state') key_material = module.params.get('key_material') force = module.params.get('force') if state == 'absent': delete_key_pair(module, ec2_client, name) elif state == 'present': create_key_pair(module, ec2_client, name, key_material, force)
def main(): """ Main entry point. :return dict: ansible facts """ argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(required=False, default='present', choices=['present', 'absent']), function_name=dict(required=True, default=None), name=dict(required=True, default=None, aliases=['alias_name']), function_version=dict(type='int', required=False, default=0, aliases=['version']), description=dict(required=False, default=None), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[], required_together=[] ) # validate dependencies if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module.') aws = AWSConnection(module, ['lambda']) validate_params(module, aws) results = lambda_alias(module, aws) module.exit_json(**camel_dict_to_snake_dict(results))
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( filters = dict(default=None, type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') if HAS_BOTO3: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") list_ec2_snapshots_boto3(connection, module) else: region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region: try: connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") list_eni(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( subnet_id=dict(type='str'), eip_address=dict(type='str'), allocation_id=dict(type='str'), if_exist_do_not_create=dict(type='bool', default=False), state=dict(default='present', choices=['present', 'absent']), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=320, required=False), release_eip=dict(type='bool', default=False), nat_gateway_id=dict(type='str'), client_token=dict(type='str'), )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['allocation_id', 'eip_address']], required_if=[['state', 'absent', ['nat_gateway_id']], ['state', 'present', ['subnet_id']]]) # Validate Requirements if not HAS_BOTO3: module.fail_json(msg='botocore/boto3 is required.') state = module.params.get('state').lower() check_mode = module.check_mode subnet_id = module.params.get('subnet_id') allocation_id = module.params.get('allocation_id') eip_address = module.params.get('eip_address') nat_gateway_id = module.params.get('nat_gateway_id') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') release_eip = module.params.get('release_eip') client_token = module.params.get('client_token') if_exist_do_not_create = module.params.get('if_exist_do_not_create') try: region, ec2_url, aws_connect_kwargs = (get_aws_connection_info( module, boto3=True)) client = (boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)) except botocore.exceptions.ClientError as e: module.fail_json(msg="Boto3 Client Error - " + str(e.msg)) changed = False err_msg = '' if state == 'present': success, changed, err_msg, results = (pre_create( client, subnet_id, allocation_id, eip_address, if_exist_do_not_create, wait, wait_timeout, client_token, check_mode=check_mode)) else: success, changed, err_msg, results = (remove(client, nat_gateway_id, wait, wait_timeout, release_eip, check_mode=check_mode)) if not success: module.fail_json(msg=err_msg, success=success, changed=changed) else: module.exit_json(msg=err_msg, success=success, changed=changed, **results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(required=True, choices=['present', 'absent']), name=dict(), cert=dict(), key=dict(no_log=True), cert_chain=dict(), new_name=dict(), path=dict(default='/'), new_path=dict(), dup_ok=dict(type='bool') ) ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ ['new_path', 'key'], ['new_path', 'cert'], ['new_path', 'cert_chain'], ['new_name', 'key'], ['new_name', 'cert'], ['new_name', 'cert_chain'], ], ) if not HAS_BOTO: module.fail_json(msg="Boto is required for this module") region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: if region: iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) else: iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) state = module.params.get('state') name = module.params.get('name') path = module.params.get('path') new_name = module.params.get('new_name') new_path = module.params.get('new_path') dup_ok = module.params.get('dup_ok') if state == 'present' and not new_name and not new_path: cert, key, cert_chain = load_data(cert=module.params.get('cert'), key=module.params.get('key'), cert_chain=module.params.get('cert_chain')) else: cert = key = cert_chain = None orig_cert_names = [ctb['server_certificate_name'] for ctb in iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list] orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body for thing in orig_cert_names] if new_name == name: new_name = None if new_path == path: new_path = None changed = False try: cert_action(module, iam, name, path, new_name, new_path, state, cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok) except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err), debug=[cert, key])
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( name=dict(), function_arn=dict(), wait=dict(default=True, type='bool'), tail_log=dict(default=False, type='bool'), dry_run=dict(default=False, type='bool'), version_qualifier=dict(), payload=dict(default={}, type='dict'), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ ['name', 'function_arn'], ]) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') name = module.params.get('name') function_arn = module.params.get('function_arn') await_return = module.params.get('wait') dry_run = module.params.get('dry_run') tail_log = module.params.get('tail_log') version_qualifier = module.params.get('version_qualifier') payload = module.params.get('payload') if not HAS_BOTO3: module.fail_json( msg='Python module "boto3" is missing, please install it') if not (name or function_arn): module.fail_json( msg="Must provide either a function_arn or a name to invoke.") region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=HAS_BOTO3) if not region: module.fail_json(msg="The AWS region must be specified as an " "environment variable or in the AWS credentials " "profile.") try: client = boto3_conn(module, conn_type='client', resource='lambda', region=region, endpoint=ec2_url, **aws_connect_kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e: module.fail_json(msg="Failure connecting boto3 to AWS: %s" % to_native(e), exception=traceback.format_exc()) invoke_params = {} if await_return: # await response invoke_params['InvocationType'] = 'RequestResponse' else: # fire and forget invoke_params['InvocationType'] = 'Event' if dry_run or module.check_mode: # dry_run overrides invocation type invoke_params['InvocationType'] = 'DryRun' if tail_log and await_return: invoke_params['LogType'] = 'Tail' elif tail_log and not await_return: module.fail_json(msg="The `tail_log` parameter is only available if " "the invocation waits for the function to complete. " "Set `wait` to true or turn off `tail_log`.") else: invoke_params['LogType'] = 'None' if version_qualifier: invoke_params['Qualifier'] = version_qualifier if payload: invoke_params['Payload'] = json.dumps(payload) if function_arn: invoke_params['FunctionName'] = function_arn elif name: invoke_params['FunctionName'] = name try: response = client.invoke(**invoke_params) except botocore.exceptions.ClientError as ce: if ce.response['Error']['Code'] == 'ResourceNotFoundException': module.fail_json(msg="Could not find Lambda to execute. Make sure " "the ARN is correct and your profile has " "permissions to execute this function.", exception=traceback.format_exc()) module.fail_json( msg= "Client-side error when invoking Lambda, check inputs and specific error", exception=traceback.format_exc()) except botocore.exceptions.ParamValidationError as ve: module.fail_json(msg="Parameters to `invoke` failed to validate", exception=traceback.format_exc(ve)) except Exception as e: module.fail_json( msg="Unexpected failure while invoking Lambda function", exception=traceback.format_exc()) results = { 'logs': '', 'status': response['StatusCode'], 'output': '', } if response.get('LogResult'): try: # logs are base64 encoded in the API response results['logs'] = base64.b64decode(response.get('LogResult', '')) except Exception as e: module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc()) if invoke_params['InvocationType'] == 'RequestResponse': try: results['output'] = json.loads( response['Payload'].read().decode('utf8')) except Exception as e: module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc()) if isinstance(results.get('output'), dict) and any([ results['output'].get('stackTrace'), results['output'].get('errorMessage') ]): # AWS sends back stack traces and error messages when a function failed # in a RequestResponse (synchronous) context. template = ( "Function executed, but there was an error in the Lambda function. " "Message: {errmsg}, Type: {type}, Stack Trace: {trace}") error_data = { # format the stacktrace sent back as an array into a multiline string 'trace': '\n'.join([ ' '.join([ str(x) for x in line # cast line numbers to strings ]) for line in results.get('output', {}).get( 'stackTrace', []) ]), 'errmsg': results['output'].get('errorMessage'), 'type': results['output'].get('errorType') } module.fail_json(msg=template.format(**error_data), result=results) module.exit_json(changed=True, result=results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(aliases=['command'], choices=['present', 'absent', 'get', 'create', 'delete'], required=True), zone=dict(required=True), hosted_zone_id=dict(required=False, default=None), record=dict(required=True), ttl=dict(required=False, type='int', default=3600), type=dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'SOA'], required=True), alias=dict(required=False, type='bool'), alias_hosted_zone_id=dict(required=False), alias_evaluate_target_health=dict(required=False, type='bool', default=False), value=dict(required=False, type='list'), overwrite=dict(required=False, type='bool'), retry_interval=dict(required=False, default=500), private_zone=dict(required=False, type='bool', default=False), identifier=dict(required=False, default=None), weight=dict(required=False, type='int'), region=dict(required=False), health_check=dict(required=False), failover=dict(required=False, choices=['PRIMARY', 'SECONDARY']), vpc_id=dict(required=False), wait=dict(required=False, type='bool', default=False), wait_timeout=dict(required=False, type='int', default=300), )) # state=present, absent, create, delete THEN value is required required_if = [('state', 'present', ['value']), ('state', 'create', ['value'])] required_if.extend([('state', 'absent', ['value']), ('state', 'delete', ['value'])]) # If alias is True then you must specify alias_hosted_zone as well required_together = [['alias', 'alias_hosted_zone_id']] # failover, region, and weight are mutually exclusive mutually_exclusive = [('failover', 'region', 'weight')] module = AnsibleModule(argument_spec=argument_spec, required_together=required_together, required_if=required_if, mutually_exclusive=mutually_exclusive, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') if distutils.version.StrictVersion(boto.__version__) < distutils.version.StrictVersion(MINIMUM_BOTO_VERSION): module.fail_json(msg='Found boto in version %s, but >= %s is required' % (boto.__version__, MINIMUM_BOTO_VERSION)) if module.params['state'] in ('present', 'create'): command_in = 'create' elif module.params['state'] in ('absent', 'delete'): command_in = 'delete' elif module.params['state'] == 'get': command_in = 'get' zone_in = module.params.get('zone').lower() hosted_zone_id_in = module.params.get('hosted_zone_id') ttl_in = module.params.get('ttl') record_in = module.params.get('record').lower() type_in = module.params.get('type') value_in = module.params.get('value') or [] alias_in = module.params.get('alias') alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health') retry_interval_in = module.params.get('retry_interval') if module.params['vpc_id'] is not None: private_zone_in = True else: private_zone_in = module.params.get('private_zone') identifier_in = module.params.get('identifier') weight_in = module.params.get('weight') region_in = module.params.get('region') health_check_in = module.params.get('health_check') failover_in = module.params.get('failover') vpc_id_in = module.params.get('vpc_id') wait_in = module.params.get('wait') wait_timeout_in = module.params.get('wait_timeout') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if zone_in[-1:] != '.': zone_in += "." if record_in[-1:] != '.': record_in += "." if command_in == 'create' or command_in == 'delete': if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") if (weight_in is not None or region_in is not None or failover_in is not None) and identifier_in is None: module.fail_json(msg="If you specify failover, region or weight you must also specify identifier") if (weight_in is None and region_in is None and failover_in is None) and identifier_in is not None: module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region or failover.") # connect to the route53 endpoint try: conn = Route53Connection(**aws_connect_kwargs) except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) # Find the named zone ID zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in) # Verify that the requested zone is already defined in Route53 if zone is None: errmsg = "Zone %s does not exist in Route53" % zone_in module.fail_json(msg=errmsg) record = {} found_record = False wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in, identifier=identifier_in, weight=weight_in, region=region_in, health_check=health_check_in, failover=failover_in) for v in value_in: if alias_in: wanted_rset.set_alias(alias_hosted_zone_id_in, v, alias_evaluate_target_health_in) else: wanted_rset.add_value(v) need_to_sort_records = (type_in == 'CAA') # Sort records for wanted_rset if necessary (keep original list) unsorted_records = wanted_rset.resource_records if need_to_sort_records: wanted_rset.resource_records = sorted(unsorted_records) sets = invoke_with_throttling_retries(conn.get_all_rrsets, zone.id, name=record_in, type=type_in, identifier=identifier_in) sets_iter = iter(sets) while True: try: rset = invoke_with_throttling_retries(next, sets_iter) except StopIteration: break # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round # tripping of things like * and @. decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') # Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block rset.name = decoded_name if identifier_in is not None: identifier_in = str(identifier_in) if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in: if need_to_sort_records: # Sort records rset.resource_records = sorted(rset.resource_records) found_record = True record['zone'] = zone_in record['type'] = rset.type record['record'] = decoded_name record['ttl'] = rset.ttl if hosted_zone_id_in: record['hosted_zone_id'] = hosted_zone_id_in record['identifier'] = rset.identifier record['weight'] = rset.weight record['region'] = rset.region record['failover'] = rset.failover record['health_check'] = rset.health_check if hosted_zone_id_in: record['hosted_zone_id'] = hosted_zone_id_in if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name record['values'] = [rset.alias_dns_name] record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health else: record['alias'] = False record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml(): module.exit_json(changed=False) # We need to look only at the first rrset returned by the above call, # so break here. The returned elements begin with the one matching our # requested name, type, and identifier, if such an element exists, # followed by all others that come after it in alphabetical order. # Therefore, if the first set does not match, no subsequent set will # match either. break if command_in == 'get': if type_in == 'NS': ns = record.get('values', []) else: # Retrieve name servers associated to the zone. z = invoke_with_throttling_retries(conn.get_zone, zone_in) ns = invoke_with_throttling_retries(z.get_nameservers) module.exit_json(changed=False, set=record, nameservers=ns) if command_in == 'delete' and not found_record: module.exit_json(changed=False) changes = ResourceRecordSets(conn, zone.id) if command_in == 'create' or command_in == 'delete': if command_in == 'create' and found_record: if not module.params['overwrite']: module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it") command = 'UPSERT' else: command = command_in.upper() # Restore original order of records wanted_rset.resource_records = unsorted_records changes.add_change_record(command, wanted_rset) if not module.check_mode: try: invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in) except boto.route53.exception.DNSServerError as e: txt = e.body.split("<Message>")[1] txt = txt.split("</Message>")[0] if "but it already exists" in txt: module.exit_json(changed=False) else: module.fail_json(msg=txt) except TimeoutError: module.fail_json(msg='Timeout waiting for changes to replicate') module.exit_json(changed=True)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(device_id=dict(required=False, aliases=['instance_id']), public_ip=dict(required=False, aliases=['ip']), state=dict(required=False, default='present', choices=['present', 'absent']), in_vpc=dict(required=False, type='bool', default=False), reuse_existing_ip_allowed=dict(required=False, type='bool', default=False), release_on_disassociation=dict(required=False, type='bool', default=False), allow_reassociation=dict(type='bool', default=False), wait_timeout=dict(default=300), private_ip_address=dict(required=False, default=None, type='str'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') ec2 = ec2_connect(module) device_id = module.params.get('device_id') instance_id = module.params.get('instance_id') public_ip = module.params.get('public_ip') private_ip_address = module.params.get('private_ip_address') state = module.params.get('state') in_vpc = module.params.get('in_vpc') domain = 'vpc' if in_vpc else None reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') release_on_disassociation = module.params.get('release_on_disassociation') allow_reassociation = module.params.get('allow_reassociation') # Parameter checks if private_ip_address is not None and device_id is None: module.fail_json( msg= "parameters are required together: ('device_id', 'private_ip_address')" ) if instance_id: warnings = [ "instance_id is no longer used, please use device_id going forward" ] is_instance = True device_id = instance_id else: if device_id and device_id.startswith('i-'): is_instance = True elif device_id: if device_id.startswith('eni-') and not in_vpc: module.fail_json( msg="If you are specifying an ENI, in_vpc must be true") is_instance = False try: if device_id: address = find_address(ec2, public_ip, device_id, isinstance=is_instance) else: address = find_address(ec2, public_ip, None) if state == 'present': if device_id: result = ensure_present(ec2, module, domain, address, private_ip_address, device_id, reuse_existing_ip_allowed, allow_reassociation, module.check_mode, isinstance=is_instance) else: if address: changed = False else: address, changed = allocate_address( ec2, domain, reuse_existing_ip_allowed) result = { 'changed': changed, 'public_ip': address.public_ip, 'allocation_id': address.allocation_id } else: if device_id: disassociated = ensure_absent(ec2, domain, address, device_id, module.check_mode, isinstance=is_instance) if release_on_disassociation and disassociated['changed']: released = release_address(ec2, address, module.check_mode) result = { 'changed': True, 'disassociated': disassociated, 'released': released } else: result = { 'changed': disassociated['changed'], 'disassociated': disassociated, 'released': { 'changed': False } } else: released = release_address(ec2, address, module.check_mode) result = { 'changed': released['changed'], 'disassociated': { 'changed': False }, 'released': released } except (boto.exception.EC2ResponseError, EIPException) as e: module.fail_json(msg=str(e)) if instance_id: result['warnings'] = warnings module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(default='present', choices=['present', 'absent']), name=dict(required=True, type='str'), hash_key_name=dict(type='str'), hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), range_key_name=dict(type='str'), range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), read_capacity=dict(default=1, type='int'), write_capacity=dict(default=1, type='int'), indexes=dict(default=[], type='list'), tags=dict(type='dict'), wait_for_active_timeout=dict(default=60, type='int'), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') if not HAS_BOTO3 and module.params.get('tags'): module.fail_json(msg='boto3 required when using tags for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json(msg='region must be specified') try: connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) except (NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) if module.params.get('tags'): try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) boto3_dynamodb = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs) if not hasattr(boto3_dynamodb, 'tag_resource'): module.fail_json( msg= 'boto3 connection does not have tag_resource(), likely due to using an old version' ) boto3_sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc(e)) else: boto3_dynamodb = None boto3_sts = None state = module.params.get('state') if state == 'present': create_or_update_dynamo_table(connection, module, boto3_dynamodb, boto3_sts, region) elif state == 'absent': delete_dynamo_table(connection, module)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(name=dict(), group_id=dict(), description=dict(), vpc_id=dict(), rules=dict(type='list'), rules_egress=dict(type='list'), state=dict(default='present', type='str', choices=['present', 'absent']), purge_rules=dict(default=True, required=False, type='bool'), purge_rules_egress=dict(default=True, required=False, type='bool'), tags=dict(required=False, type='dict', aliases=['resource_tags']), purge_tags=dict(default=True, required=False, type='bool'))) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['name', 'group_id']], required_if=[['state', 'present', ['name']]], ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') name = module.params['name'] group_id = module.params['group_id'] description = module.params['description'] vpc_id = module.params['vpc_id'] rules = deduplicate_rules_args( rules_expand_sources(rules_expand_ports(module.params['rules']))) rules_egress = deduplicate_rules_args( rules_expand_sources(rules_expand_ports( module.params['rules_egress']))) state = module.params.get('state') purge_rules = module.params['purge_rules'] purge_rules_egress = module.params['purge_rules_egress'] tags = module.params['tags'] purge_tags = module.params['purge_tags'] if state == 'present' and not description: module.fail_json(msg='Must provide description when state is present.') changed = False region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="The AWS region must be specified as an " "environment variable or in the AWS credentials " "profile.") client = boto3_conn(module, conn_type='client', resource='ec2', endpoint=ec2_url, region=region, **aws_connect_params) group = None groups = dict() security_groups = [] # do get all security groups # find if the group is present try: response = get_security_groups_with_backoff(client) security_groups = response.get('SecurityGroups', []) except botocore.exceptions.NoCredentialsError as e: module.fail_json(msg="Error in describe_security_groups: %s" % "Unable to locate credentials", exception=traceback.format_exc()) except botocore.exceptions.ClientError as e: module.fail_json(msg="Error in describe_security_groups: %s" % e, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) for sg in security_groups: groups[sg['GroupId']] = sg groupName = sg['GroupName'] if groupName in groups: # Prioritise groups from the current VPC # even if current VPC is EC2-Classic if groups[groupName].get('VpcId') == vpc_id: # Group saved already matches current VPC, change nothing pass elif vpc_id is None and groups[groupName].get('VpcId') is None: # We're in EC2 classic, and the group already saved is as well # No VPC groups can be used alongside EC2 classic groups pass else: # the current SG stored has no direct match, so we can replace it groups[groupName] = sg else: groups[groupName] = sg if group_id and sg['GroupId'] == group_id: group = sg elif groupName == name and (vpc_id is None or sg['VpcId'] == vpc_id): group = sg # Ensure requested group is absent if state == 'absent': if group: # found a match, delete it try: if not module.check_mode: client.delete_security_group(GroupId=group['GroupId']) except botocore.exceptions.ClientError as e: module.fail_json( msg="Unable to delete security group '%s' - %s" % (group, e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) else: group = None changed = True else: # no match found, no changes required pass # Ensure requested group is present elif state == 'present': if group: # existing group if group['Description'] != description: module.fail_json( msg= "Group description does not match existing group. ec2_group does not support this case." ) # if the group doesn't exist, create it now else: # no match found, create it if not module.check_mode: params = dict(GroupName=name, Description=description) if vpc_id: params['VpcId'] = vpc_id group = client.create_security_group(**params) # When a group is created, an egress_rule ALLOW ALL # to 0.0.0.0/0 is added automatically but it's not # reflected in the object returned by the AWS API # call. We re-read the group for getting an updated object # amazon sometimes takes a couple seconds to update the security group so wait till it exists while True: group = get_security_groups_with_backoff( client, GroupIds=[group['GroupId']])['SecurityGroups'][0] if group.get( 'VpcId') and not group.get('IpPermissionsEgress'): pass else: break changed = True if tags is not None: current_tags = boto3_tag_list_to_ansible_dict(group.get( 'Tags', [])) tags_need_modify, tags_to_delete = compare_aws_tags( current_tags, tags, purge_tags) if tags_to_delete: try: client.delete_tags(Resources=[group['GroupId']], Tags=[{ 'Key': tag } for tag in tags_to_delete]) except botocore.exceptions.ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True # Add/update tags if tags_need_modify: try: client.create_tags( Resources=[group['GroupId']], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) except botocore.exceptions.ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True else: module.fail_json(msg="Unsupported state requested: %s" % state) # create a lookup for all existing rules on the group ip_permission = [] if group: # Manage ingress rules groupRules = {} add_rules_to_lookup(group['IpPermissions'], group['GroupId'], 'in', groupRules) # Now, go through all provided rules and ensure they are there. if rules is not None: for rule in rules: validate_rule(module, rule) group_id, ip, ipv6, target_group_created = get_target_from_rule( module, client, rule, name, group, groups, vpc_id) if target_group_created: changed = True if rule['proto'] in ('all', '-1', -1): rule['proto'] = -1 rule['from_port'] = None rule['to_port'] = None if group_id: rule_id = make_rule_key('in', rule, group['GroupId'], group_id) if rule_id in groupRules: del groupRules[rule_id] else: if not module.check_mode: ip_permission = serialize_group_grant( group_id, rule) if ip_permission: ips = ip_permission if vpc_id: [ useridpair.update({'VpcId': vpc_id}) for useridpair in ip_permission.get( 'UserIdGroupPairs', []) ] try: client.authorize_security_group_ingress( GroupId=group['GroupId'], IpPermissions=[ips]) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to authorize ingress for group %s security group '%s' - %s" % (group_id, group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True elif ip: # Convert ip to list we can iterate over if ip and not isinstance(ip, list): ip = [ip] changed, ip_permission = authorize_ip( "in", changed, client, group, groupRules, ip, ip_permission, module, rule, "ipv4") elif ipv6: # Convert ip to list we can iterate over if not isinstance(ipv6, list): ipv6 = [ipv6] # If rule already exists, don't later delete it changed, ip_permission = authorize_ip( "in", changed, client, group, groupRules, ipv6, ip_permission, module, rule, "ipv6") # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules: for (rule, grant) in groupRules.values(): ip_permission = serialize_revoke(grant, rule) if not module.check_mode: try: client.revoke_security_group_ingress( GroupId=group['GroupId'], IpPermissions=[ip_permission]) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to revoke ingress for security group '%s' - %s" % (group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True # Manage egress rules groupRules = {} add_rules_to_lookup(group['IpPermissionsEgress'], group['GroupId'], 'out', groupRules) # Now, go through all provided rules and ensure they are there. if rules_egress is not None: for rule in rules_egress: validate_rule(module, rule) group_id, ip, ipv6, target_group_created = get_target_from_rule( module, client, rule, name, group, groups, vpc_id) if target_group_created: changed = True if rule['proto'] in ('all', '-1', -1): rule['proto'] = -1 rule['from_port'] = None rule['to_port'] = None if group_id: rule_id = make_rule_key('out', rule, group['GroupId'], group_id) if rule_id in groupRules: del groupRules[rule_id] else: if not module.check_mode: ip_permission = serialize_group_grant( group_id, rule) if ip_permission: ips = ip_permission if vpc_id: [ useridpair.update({'VpcId': vpc_id}) for useridpair in ip_permission.get( 'UserIdGroupPairs', []) ] try: client.authorize_security_group_egress( GroupId=group['GroupId'], IpPermissions=[ips]) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to authorize egress for group %s security group '%s' - %s" % (group_id, group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True elif ip: # Convert ip to list we can iterate over if not isinstance(ip, list): ip = [ip] changed, ip_permission = authorize_ip( "out", changed, client, group, groupRules, ip, ip_permission, module, rule, "ipv4") elif ipv6: # Convert ip to list we can iterate over if not isinstance(ipv6, list): ipv6 = [ipv6] # If rule already exists, don't later delete it changed, ip_permission = authorize_ip( "out", changed, client, group, groupRules, ipv6, ip_permission, module, rule, "ipv6") elif vpc_id is not None: # when no egress rules are specified and we're in a VPC, # we add in a default allow all out rule, which was the # default behavior before egress rules were added default_egress_rule = 'out--1-None-None-' + group[ 'GroupId'] + '-0.0.0.0/0' if default_egress_rule not in groupRules: if not module.check_mode: ip_permission = [{ 'IpProtocol': '-1', 'IpRanges': [{ 'CidrIp': '0.0.0.0/0' }] }] try: client.authorize_security_group_egress( GroupId=group['GroupId'], IpPermissions=ip_permission) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to authorize egress for ip %s security group '%s' - %s" % ('0.0.0.0/0', group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True else: # make sure the default egress rule is not removed del groupRules[default_egress_rule] # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules_egress and vpc_id is not None: for (rule, grant) in groupRules.values(): # we shouldn't be revoking 0.0.0.0 egress if grant != '0.0.0.0/0': ip_permission = serialize_revoke(grant, rule) if not module.check_mode: try: client.revoke_security_group_egress( GroupId=group['GroupId'], IpPermissions=[ip_permission]) except botocore.exceptions.ClientError as e: module.fail_json( msg= "Unable to revoke egress for ip %s security group '%s' - %s" % (grant, group['GroupName'], e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) changed = True if group: security_group = get_security_groups_with_backoff( client, GroupIds=[group['GroupId']])['SecurityGroups'][0] security_group = camel_dict_to_snake_dict(security_group) security_group['tags'] = boto3_tag_list_to_ansible_dict( security_group.get('tags', []), tag_name_key_name='key', tag_value_key_name='value') module.exit_json(changed=changed, **security_group) else: module.exit_json(changed=changed, group_id=None)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( bgp_asn=dict(required=False, type='int'), ip_address=dict(required=True), name=dict(required=True), state=dict(default='present', choices=['present', 'absent']), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[('state', 'present', ['bgp_asn'])]) if not HAS_BOTOCORE: module.fail_json(msg='botocore is required.') if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') gw_mgr = Ec2CustomerGatewayManager(module) name = module.params.get('name') existing = gw_mgr.describe_gateways(module.params['ip_address']) # describe_gateways returns a key of CustomerGateways where as create_gateway returns a # key of CustomerGateway. For consistency, change it here existing['CustomerGateway'] = existing['CustomerGateways'] results = dict(changed=False) if module.params['state'] == 'present': if existing['CustomerGateway']: results['gateway'] = existing if existing['CustomerGateway'][0]['Tags']: tag_array = existing['CustomerGateway'][0]['Tags'] for key, value in enumerate(tag_array): if value['Key'] == 'Name': current_name = value['Value'] if current_name != name: results['name'] = gw_mgr.tag_cgw_name( results['gateway']['CustomerGateway'][0] ['CustomerGatewayId'], module.params['name'], ) results['changed'] = True else: if not module.check_mode: results['gateway'] = gw_mgr.ensure_cgw_present( module.params['bgp_asn'], module.params['ip_address'], ) results['name'] = gw_mgr.tag_cgw_name( results['gateway']['CustomerGateway']['CustomerGatewayId'], module.params['name'], ) results['changed'] = True elif module.params['state'] == 'absent': if existing['CustomerGateway']: results['gateway'] = existing if not module.check_mode: results['gateway'] = gw_mgr.ensure_cgw_absent( existing['CustomerGateway'][0]['CustomerGatewayId']) results['changed'] = True pretty_results = camel_dict_to_snake_dict(results) module.exit_json(**pretty_results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True), engine=dict(required=False, choices=VALID_ENGINES), description=dict(required=False), params=dict(required=False, aliases=['parameters'], type='dict'), immediate=dict(required=False, type='bool'), )) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') group_name = module.params.get('name').lower() group_engine = module.params.get('engine') group_description = module.params.get('description') group_params = module.params.get('params') or {} immediate = module.params.get('immediate') or False if state == 'present': for required in ['name', 'description', 'engine']: if not module.params.get(required): module.fail_json( msg=str("Parameter %s required for state='present'" % required)) else: for not_allowed in ['description', 'engine', 'params']: if module.params.get(not_allowed): module.fail_json( msg=str("Parameter %s not allowed for state='absent'" % not_allowed)) # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if not region: module.fail_json(msg=str( "Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set." )) try: conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs) except boto.exception.BotoServerError as e: module.fail_json(msg=e.error_message) group_was_added = False try: changed = False try: all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100) exists = len(all_groups) > 0 except BotoServerError as e: if e.error_code != 'DBParameterGroupNotFound': module.fail_json(msg=e.error_message) exists = False if state == 'absent': if exists: conn.delete_parameter_group(group_name) changed = True else: changed = {} if not exists: new_group = conn.create_parameter_group( group_name, engine=group_engine, description=group_description) group_was_added = True # If a "Marker" is present, this group has more attributes remaining to check. Get the next batch, but only # if there are parameters left to set. marker = None while len(group_params): next_group = conn.get_all_dbparameters(group_name, marker=marker) changed_params, group_params = modify_group( next_group, group_params, immediate) changed.update(changed_params) if hasattr(next_group, 'Marker'): marker = next_group.Marker else: break except BotoServerError as e: module.fail_json(msg=e.error_message) except NotModifiableError as e: msg = e.error_message if group_was_added: msg = '%s The group "%s" was added first.' % (msg, group_name) module.fail_json(msg=msg) module.exit_json(changed=changed)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(state={ 'required': True, 'choices': ['present', 'absent'] }, instance_id={'required': True}, ec2_elbs={ 'default': None, 'required': False, 'type': 'list' }, enable_availability_zone={ 'default': True, 'required': False, 'type': 'bool' }, wait={ 'required': False, 'default': True, 'type': 'bool' }, wait_timeout={ 'required': False, 'default': 0, 'type': 'int' })) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO: module.fail_json(msg='boto required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json( msg= "Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file" ) ec2_elbs = module.params['ec2_elbs'] wait = module.params['wait'] enable_availability_zone = module.params['enable_availability_zone'] timeout = module.params['wait_timeout'] if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: module.fail_json(msg="ELBs are required for registration") instance_id = module.params['instance_id'] elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) if ec2_elbs is not None: for elb in ec2_elbs: if not elb_man.exists(elb): msg = "ELB %s does not exist" % elb module.fail_json(msg=msg) if not module.check_mode: if module.params['state'] == 'present': elb_man.register(wait, enable_availability_zone, timeout) elif module.params['state'] == 'absent': elb_man.deregister(wait, timeout) ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) module.exit_json(**ec2_facts_result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(required=True, choices=['present', 'absent']), name=dict(), link_aggregation_group_id=dict(), num_connections=dict(type='int'), min_links=dict(type='int'), location=dict(), bandwidth=dict(), connection_id=dict(), delete_with_disassociation=dict(type='bool', default=False), force_delete=dict(type='bool', default=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=120), )) module = AnsibleModule(argument_spec=argument_spec, required_one_of=[('link_aggregation_group_id', 'name')], required_if=[('state', 'present', ('location', 'bandwidth'))]) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json( msg= "Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set." ) connection = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs) state = module.params.get('state') response = {} try: if state == 'present': changed, lag_id = ensure_present( connection, num_connections=module.params.get("num_connections"), lag_id=module.params.get("link_aggregation_group_id"), lag_name=module.params.get("name"), location=module.params.get("location"), bandwidth=module.params.get("bandwidth"), connection_id=module.params.get("connection_id"), min_links=module.params.get("min_links"), wait=module.params.get("wait"), wait_timeout=module.params.get("wait_timeout")) response = lag_status(connection, lag_id) elif state == "absent": changed = ensure_absent( connection, lag_id=module.params.get("link_aggregation_group_id"), lag_name=module.params.get("name"), force_delete=module.params.get("force_delete"), delete_with_disassociation=module.params.get( "delete_with_disassociation"), wait=module.params.get('wait'), wait_timeout=module.params.get('wait_timeout')) except DirectConnectError as e: if e.last_traceback: module.fail_json(msg=e.msg, exception=e.last_traceback, **camel_dict_to_snake_dict(e.exception)) else: module.fail_json(msg=e.msg) module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( application=dict(type='str', required=True), environment=dict(type='str', required=True), envval=dict(type='dict', required=True), ) ) module = AnsibleModule(argument_spec=argument_spec) application = module.params['application'] environment = module.params['environment'] desired_envval = module.params['envval'] for v in desired_envval.values(): if not isinstance(v, str): module.fail_json(msg="envval dict must be string") if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') if not HAS_BOTOCORE: module.fail_json(msg='botocore required for this module') # Connect to AWS try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) conn = boto3_conn(module, conn_type="client", resource="elasticbeanstalk", region=region, **aws_connect_kwargs) except NoCredentialsError as ex: module.fail_json(msg=ex.message) # Check current envvals try: res = conn.describe_configuration_settings( ApplicationName=application, EnvironmentName=environment ) except ClientError as ex: module.fail_json(msg=ex.response['Error']['Message']) current_envval = { str(x['OptionName']): str(x['Value']) for x in res['ConfigurationSettings'][0]['OptionSettings'] if x['Namespace'] == "aws:elasticbeanstalk:application:environment" } # The desired envvals is same as current envvals if current_envval == desired_envval: module.exit_json(changed=False) # Update envval option_settings = [ { "Namespace": "aws:elasticbeanstalk:application:environment", "OptionName": x, "Value": desired_envval[x] } for x in desired_envval.keys() ] options_to_remove = None revoke_envval = list(set(current_envval.keys()) - set(desired_envval.keys())) if revoke_envval != 0: options_to_remove = [ { "Namespace": "aws:elasticbeanstalk:application:environment", "OptionName": x } for x in revoke_envval ] # Update try: res = conn.update_environment( ApplicationName=application, EnvironmentName=environment, OptionSettings=option_settings, OptionsToRemove=options_to_remove ) except ClientError as ex: module.fail_json(msg=ex.response['Error']['Message']) module.exit_json(changed=True)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), name=dict(default='default'), enable_logging=dict(default=True, type='bool'), s3_bucket_name=dict(), s3_key_prefix=dict(), sns_topic_name=dict(), is_multi_region_trail=dict(default=False, type='bool'), enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']), include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']), cloudwatch_logs_role_arn=dict(), cloudwatch_logs_log_group_arn=dict(), kms_key_id=dict(), tags=dict(default={}, type='dict'), )) required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])] required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')] module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if) if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module') # collect parameters if module.params['state'] in ('present', 'enabled'): state = 'present' elif module.params['state'] in ('absent', 'disabled'): state = 'absent' tags = module.params['tags'] enable_logging = module.params['enable_logging'] ct_params = dict( Name=module.params['name'], S3BucketName=module.params['s3_bucket_name'], IncludeGlobalServiceEvents=module.params['include_global_events'], IsMultiRegionTrail=module.params['is_multi_region_trail'], ) if module.params['s3_key_prefix']: ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/') if module.params['sns_topic_name']: ct_params['SnsTopicName'] = module.params['sns_topic_name'] if module.params['cloudwatch_logs_role_arn']: ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn'] if module.params['cloudwatch_logs_log_group_arn']: ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn'] if module.params['enable_log_file_validation'] is not None: ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation'] if module.params['kms_key_id']: ct_params['KmsKeyId'] = module.params['kms_key_id'] try: region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='cloudtrail', region=region, endpoint=ec2_url, **aws_connect_params) except ClientError as err: module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response)) results = dict( changed=False, exists=False ) # Get existing trail facts trail = get_trail_facts(module, client, ct_params['Name']) # If the trail exists set the result exists variable if trail is not None: results['exists'] = True if state == 'absent' and results['exists']: # If Trail exists go ahead and delete results['changed'] = True results['exists'] = False results['trail'] = dict() if not module.check_mode: delete_trail(module, client, trail['TrailARN']) elif state == 'present' and results['exists']: # If Trail exists see if we need to update it do_update = False for key in ct_params: tkey = str(key) # boto3 has inconsistent parameter naming so we handle it here if key == 'EnableLogFileValidation': tkey = 'LogFileValidationEnabled' # We need to make an empty string equal None if ct_params.get(key) == '': val = None else: val = ct_params.get(key) if val != trail.get(tkey): do_update = True results['changed'] = True # If we are in check mode copy the changed values to the trail facts in result output to show what would change. if module.check_mode: trail.update({tkey: ct_params.get(key)}) if not module.check_mode and do_update: update_trail(module, client, ct_params) trail = get_trail_facts(module, client, ct_params['Name']) # Check if we need to start/stop logging if enable_logging and not trail['IsLogging']: results['changed'] = True trail['IsLogging'] = True if not module.check_mode: set_logging(module, client, name=ct_params['Name'], action='start') if not enable_logging and trail['IsLogging']: results['changed'] = True trail['IsLogging'] = False if not module.check_mode: set_logging(module, client, name=ct_params['Name'], action='stop') # Check if we need to update tags on resource tag_dry_run = False if module.check_mode: tag_dry_run = True tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], dry_run=tag_dry_run) if tags_changed: results['changed'] = True trail['tags'] = tags # Populate trail facts in output results['trail'] = camel_dict_to_snake_dict(trail) elif state == 'present' and not results['exists']: # Trail doesn't exist just go create it results['changed'] = True if not module.check_mode: # If we aren't in check_mode then actually create it created_trail = create_trail(module, client, ct_params) # Apply tags tag_trail(module, client, tags=tags, trail_arn=created_trail['TrailARN']) # Get the trail status try: status_resp = client.get_trail_status(Name=created_trail['Name']) except ClientError as err: module.fail_json(msg=err.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response)) # Set the logging state for the trail to desired value if enable_logging and not status_resp['IsLogging']: set_logging(module, client, name=ct_params['Name'], action='start') if not enable_logging and status_resp['IsLogging']: set_logging(module, client, name=ct_params['Name'], action='stop') # Get facts for newly created Trail trail = get_trail_facts(module, client, ct_params['Name']) # If we are in check mode create a fake return structure for the newly minted trail if module.check_mode: acct_id = '123456789012' try: sts_client = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params) acct_id = sts_client.get_caller_identity()['Account'] except ClientError: pass trail = dict() trail.update(ct_params) if 'EnableLogFileValidation' not in ct_params: ct_params['EnableLogFileValidation'] = False trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation'] trail.pop('EnableLogFileValidation') fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name'] trail['HasCustomEventSelectors'] = False trail['HomeRegion'] = region trail['TrailARN'] = fake_arn trail['IsLogging'] = enable_logging trail['tags'] = tags # Populate trail facts in output results['trail'] = camel_dict_to_snake_dict(trail) module.exit_json(**results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( bucket=dict(required=True), dest=dict(default=None, type='path'), encrypt=dict(default=True, type='bool'), expiry=dict(default=600, type='int', aliases=['expiration']), headers=dict(type='dict'), marker=dict(default=""), max_keys=dict(default=1000, type='int'), metadata=dict(type='dict'), mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object=dict(), permission=dict(type='list', default=['private']), version=dict(default=None), overwrite=dict(aliases=['force'], default='always'), prefix=dict(default=""), retries=dict(aliases=['retry'], type='int', default=0), s3_url=dict(aliases=['S3_URL']), rgw=dict(default='no', type='bool'), src=dict(), ignore_nonexistent_bucket=dict(default=False, type='bool') ), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[('mode', 'put', ('src',))], ) if module._name == 's3': module.deprecate("The 's3' module is being renamed 'aws_s3'", version=2.7) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore required for this module') bucket = module.params.get('bucket') encrypt = module.params.get('encrypt') expiry = module.params.get('expiry') dest = module.params.get('dest', '') headers = module.params.get('headers') marker = module.params.get('marker') max_keys = module.params.get('max_keys') metadata = module.params.get('metadata') mode = module.params.get('mode') obj = module.params.get('object') version = module.params.get('version') overwrite = module.params.get('overwrite') prefix = module.params.get('prefix') retries = module.params.get('retries') s3_url = module.params.get('s3_url') rgw = module.params.get('rgw') src = module.params.get('src') ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"] bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"] if overwrite not in ['always', 'never', 'different']: if module.boolean(overwrite): overwrite = 'always' else: overwrite = 'never' region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if region in ('us-east-1', '', None): # default to US Standard region location = 'us-east-1' else: # Boto uses symbolic names for locations but region strings will # actually work fine for everything except us-east-1 (US Standard) location = region if module.params.get('object'): obj = module.params['object'] # If there is a top level object, do nothing - if the object starts with / # remove the leading character to maintain compatibility with Ansible versions < 2.4 if obj.startswith('/'): obj = obj[1:] # Bucket deletion does not require obj. Prevents ambiguity with delobj. if obj and mode == "delete": module.fail_json(msg='Parameter obj cannot be used with mode=delete') # allow eucarc environment variables to be used if ansible vars aren't set if not s3_url and 'S3_URL' in os.environ: s3_url = os.environ['S3_URL'] # rgw requires an explicit url if rgw and not s3_url: module.fail_json(msg='rgw flavour requires s3_url') # Look at s3_url and tweak connection settings # if connecting to RGW, Walrus or fakes3 if s3_url: for key in ['validate_certs', 'security_token', 'profile_name']: aws_connect_kwargs.pop(key, None) s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url) validate = not ignore_nonexistent_bucket # separate types of ACLs bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl] object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl] error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl] if error_acl: module.fail_json(msg='Unknown permission specified: %s' % error_acl) # First, we check to see if the bucket exists, we get "bucket" returned. bucketrtn = bucket_check(module, s3, bucket, validate=validate) if validate and mode not in ('create', 'put', 'delete') and not bucketrtn: module.fail_json(msg="Source bucket cannot be found.") # If our mode is a GET operation (download), go through the procedure as appropriate ... if mode == 'get': # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn is False: if version: module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) else: module.fail_json(msg="Key %s does not exist." % obj) # If the destination path doesn't exist or overwrite is True, no need to do the md5sum ETag check, so just download. # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if path_check(dest): # Determine if the remote and local object are identical if keysum_compare(module, dest, s3, bucket, obj, version=version): sum_matches = True if overwrite == 'always': download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) else: sum_matches = False if overwrite in ('always', 'different'): download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.") else: download_s3file(module, s3, bucket, obj, dest, retries, version=version) # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified # these were separated into the variables bucket_acl and object_acl above # Lets check the src path. if not path_check(src): module.fail_json(msg="Local object for PUT does not exist") # Lets check to see if bucket exists to get ground truth. if bucketrtn: keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) # Lets check key state. Does it exist and if it does, compute the ETag md5sum. if bucketrtn and keyrtn: # Compare the local and remote object if keysum_compare(module, src, s3, bucket, obj): sum_matches = True if overwrite == 'always': # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite in ('always', 'different'): # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.") # If neither exist (based on bucket existence), we can create both. if not bucketrtn: # only use valid bucket acls for create_bucket function module.params['permission'] = bucket_acl create_bucket(module, s3, bucket, location) # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) # If bucket exists but key doesn't, just upload. if bucketrtn and not keyrtn: # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) # Delete an object from a bucket, not the entire bucket if mode == 'delobj': if obj is None: module.fail_json(msg="object parameter is required") if bucket: deletertn = delete_key(module, s3, bucket, obj) if deletertn is True: module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) else: module.fail_json(msg="Bucket parameter is required.") # Delete an entire bucket, including all objects in the bucket if mode == 'delete': if bucket: deletertn = delete_bucket(module, s3, bucket) if deletertn is True: module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True) else: module.fail_json(msg="Bucket parameter is required.") # Support for listing a set of keys if mode == 'list': exists = bucket_check(module, s3, bucket) # If the bucket does not exist then bail out if not exists: module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) list_keys(module, s3, bucket, prefix, marker, max_keys) # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. if mode == 'create': # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified # these were separated above into the variables bucket_acl and object_acl if bucket and not obj: if bucketrtn: module.exit_json(msg="Bucket already exists.", changed=False) else: # only use valid bucket acls when creating the bucket module.params['permission'] = bucket_acl module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) if bucket and obj: if obj.endswith('/'): dirobj = obj else: dirobj = obj + "/" if bucketrtn: if key_check(module, s3, bucket, dirobj): module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) else: # setting valid object acls for the create_dirkey function module.params['permission'] = object_acl create_dirkey(module, s3, bucket, dirobj, encrypt) else: # only use valid bucket acls for the create_bucket function module.params['permission'] = bucket_acl created = create_bucket(module, s3, bucket, location) # only use valid object acls for the create_dirkey function module.params['permission'] = object_acl create_dirkey(module, s3, bucket, dirobj, encrypt) # Support for grabbing the time-expired URL for an object in S3/Walrus. if mode == 'geturl': if not bucket and not obj: module.fail_json(msg="Bucket and Object parameters must be set") keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn: get_download_url(module, s3, bucket, obj, expiry) else: module.fail_json(msg="Key %s does not exist." % obj) if mode == 'getstr': if bucket and obj: keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn: download_s3str(module, s3, bucket, obj, version=version) elif version is not None: module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) else: module.fail_json(msg="Key %s does not exist." % obj) module.exit_json(failed=False)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True), description=dict(required=False), subnets=dict(required=False, type='list'), )) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') group_name = module.params.get('name').lower() group_description = module.params.get('description') group_subnets = module.params.get('subnets') or {} if state == 'present': for required in ['name', 'description', 'subnets']: if not module.params.get(required): module.fail_json( msg=str("Parameter %s required for state='present'" % required)) else: for not_allowed in ['description', 'subnets']: if module.params.get(not_allowed): module.fail_json( msg=str("Parameter %s not allowed for state='absent'" % not_allowed)) # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if not region: module.fail_json(msg=str( "Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set." )) try: conn = connect_to_aws(boto.rds, region, **aws_connect_kwargs) except BotoServerError as e: module.fail_json(msg=e.error_message) try: exists = False result = create_result(False) try: matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100) exists = len(matching_groups) > 0 except BotoServerError as e: if e.error_code != 'DBSubnetGroupNotFoundFault': module.fail_json(msg=e.error_message) if state == 'absent': if exists: conn.delete_db_subnet_group(group_name) result = create_result(True) else: if not exists: new_group = conn.create_db_subnet_group( group_name, desc=group_description, subnet_ids=group_subnets) result = create_result(True, new_group) else: # Sort the subnet groups before we compare them matching_groups[0].subnet_ids.sort() group_subnets.sort() if (matching_groups[0].name != group_name or matching_groups[0].description != group_description or matching_groups[0].subnet_ids != group_subnets): changed_group = conn.modify_db_subnet_group( group_name, description=group_description, subnet_ids=group_subnets) result = create_result(True, changed_group) else: result = create_result(False, matching_groups[0]) except BotoServerError as e: module.fail_json(msg=e.error_message) module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(app_name=dict(aliases=['name'], type='str', required=False), description=dict(), state=dict(choices=['present', 'absent'], default='present'), terminate_by_force=dict(type='bool', default=False, required=False))) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) app_name = module.params['app_name'] description = module.params['description'] state = module.params['state'] terminate_by_force = module.params['terminate_by_force'] if app_name is None: module.fail_json(msg='Module parameter "app_name" is required') result = {} region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) ebs = boto3_conn(module, conn_type='client', resource='elasticbeanstalk', region=region, endpoint=ec2_url, **aws_connect_params) app = describe_app(ebs, app_name, module) if module.check_mode: check_app(ebs, app, module) module.fail_json( msg='ASSERTION FAILURE: check_app() should not return control.') if state == 'present': if app is None: try: create_app = ebs.create_application(**filter_empty( ApplicationName=app_name, Description=description)) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not create application") app = describe_app(ebs, app_name, module) result = dict(changed=True, app=app) else: if app.get("Description", None) != description: try: if not description: ebs.update_application(ApplicationName=app_name) else: ebs.update_application(ApplicationName=app_name, Description=description) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not update application") app = describe_app(ebs, app_name, module) result = dict(changed=True, app=app) else: result = dict(changed=False, app=app) else: if app is None: result = dict(changed=False, output='Application not found', app={}) else: try: if terminate_by_force: # Running environments will be terminated before deleting the application ebs.delete_application( ApplicationName=app_name, TerminateEnvByForce=terminate_by_force) else: ebs.delete_application(ApplicationName=app_name) changed = True except BotoCoreError as e: module.fail_json_aws(e, msg="Cannot terminate app") except ClientError as e: if 'It is currently pending deletion.' not in e.response[ 'Error']['Message']: module.fail_json_aws(e, msg="Cannot terminate app") else: changed = False result = dict(changed=changed, app=app) module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(access_logs_enabled=dict(type='bool'), access_logs_s3_bucket=dict(type='str'), access_logs_s3_prefix=dict(type='str'), deletion_protection=dict(type='bool'), http2=dict(type='bool'), idle_timeout=dict(type='int'), listeners=dict(type='list', elements='dict', options=dict(Protocol=dict(type='str', required=True), Port=dict(type='int', required=True), SslPolicy=dict(type='str'), Certificates=dict(type='list'), DefaultActions=dict(type='list', required=True), Rules=dict(type='list'))), name=dict(required=True, type='str'), purge_listeners=dict(default=True, type='bool'), purge_tags=dict(default=True, type='bool'), subnets=dict(type='list'), security_groups=dict(type='list'), scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), state=dict(choices=['present', 'absent'], type='str'), tags=dict(type='dict'), wait_timeout=dict(type='int'), wait=dict(default=False, type='bool'), purge_rules=dict(default=True, type='bool'))) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[('state', 'present', ['subnets', 'security_groups'])], required_together=[[ 'access_logs_enabled', 'access_logs_s3_bucket', 'access_logs_s3_prefix' ]]) # Quick check of listeners parameters listeners = module.params.get("listeners") if listeners is not None: for listener in listeners: for key in listener.keys(): if key == 'Protocol' and listener[key] == 'HTTPS': if listener.get('SslPolicy') is None: module.fail_json( msg= "'SslPolicy' is a required listener dict key when Protocol = HTTPS" ) if listener.get('Certificates') is None: module.fail_json( msg= "'Certificates' is a required listener dict key when Protocol = HTTPS" ) connection = module.client('elbv2') connection_ec2 = module.client('ec2') state = module.params.get("state") elb = ApplicationLoadBalancer(connection, connection_ec2, module) if state == 'present': create_or_update_elb(elb) else: delete_elb(elb)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(instance=dict(), id=dict(), name=dict(), volume_size=dict(type='int'), volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'), iops=dict(type='int'), encrypted=dict(type='bool', default=False), kms_key_id=dict(), device_name=dict(), delete_on_termination=dict(type='bool', default=False), zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), snapshot=dict(), state=dict(choices=['absent', 'present', 'list'], default='present'), tags=dict(type='dict', default={}))) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') id = module.params.get('id') name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') encrypted = module.params.get('encrypted') kms_key_id = module.params.get('kms_key_id') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') state = module.params.get('state') tags = module.params.get('tags') # Ensure we have the zone or can get the zone if instance is None and zone is None and state == 'present': module.fail_json(msg="You must specify either instance or zone") # Set volume detach flag if instance == 'None' or instance == '': instance = None detach_vol_flag = True else: detach_vol_flag = False # Set changed flag changed = False region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region: try: ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") if state == 'list': returned_volumes = [] vols = get_volumes(module, ec2) for v in vols: attachment = v.attach_data returned_volumes.append(get_volume_info(v, state)) module.exit_json(changed=False, volumes=returned_volumes) if encrypted and not boto_supports_volume_encryption(): module.fail_json( msg="You must use boto >= v2.29.0 to use encrypted volumes") if kms_key_id is not None and not boto_supports_kms_key_id(): module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id") # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. # Useful for playbooks chaining instance launch with volume create + attach and where the # zone doesn't matter to the user. inst = None if instance: try: reservation = ec2.get_all_instances(instance_ids=instance) except BotoServerError as e: module.fail_json(msg=e.message) inst = reservation[0].instances[0] zone = inst.placement # Check if there is a volume already mounted there. if device_name: if device_name in inst.block_device_mapping: module.exit_json( msg="Volume mapping for %s already exists on instance %s" % (device_name, instance), volume_id=inst.block_device_mapping[device_name].volume_id, device=device_name, changed=False) # Delaying the checks until after the instance check allows us to get volume ids for existing volumes # without needing to pass an unused volume_size if not volume_size and not (id or name or snapshot): module.fail_json( msg= "You must specify volume_size or identify an existing volume by id, name, or snapshot" ) if volume_size and id: module.fail_json(msg="Cannot specify volume_size together with id") if state == 'present': volume, changed = create_volume(module, ec2, zone) if detach_vol_flag: volume, changed = detach_volume(module, ec2, volume) elif inst is not None: volume, changed = attach_volume(module, ec2, volume, inst) # Add device, volume_id and volume_type parameters separately to maintain backward compatibility volume_info = get_volume_info(volume, state) # deleteOnTermination is not correctly reflected on attachment if module.params.get('delete_on_termination'): for attempt in range(0, 8): if volume_info['attachment_set'].get( 'deleteOnTermination') == 'true': break time.sleep(5) volume = ec2.get_all_volumes(volume_ids=volume.id)[0] volume_info = get_volume_info(volume, state) module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type']) elif state == 'absent': delete_volume(module, ec2)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state = dict(required=True, choices=['present', 'absent']), name = dict(required=True), description = dict(required=False), subnets = dict(required=False, type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') group_name = module.params.get('name').lower() group_description = module.params.get('description') group_subnets = module.params.get('subnets') or {} if state == 'present': for required in ['name', 'description', 'subnets']: if not module.params.get(required): module.fail_json(msg = str("Parameter %s required for state='present'" % required)) else: for not_allowed in ['description', 'subnets']: if module.params.get(not_allowed): module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if not region: module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) """Get an elasticache connection""" try: conn = connect_to_region(region_name=region, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=e.message) try: changed = False exists = False try: matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100) exists = len(matching_groups) > 0 except BotoServerError as e: if e.error_code != 'CacheSubnetGroupNotFoundFault': module.fail_json(msg = e.error_message) if state == 'absent': if exists: conn.delete_cache_subnet_group(group_name) changed = True else: if not exists: new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) changed = True else: changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) changed = True except BotoServerError as e: if e.error_message != 'No modifications were requested.': module.fail_json(msg = e.error_message) else: changed = False module.exit_json(changed=changed)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(state=dict(required=True, choices=['present', 'absent']), arn=dict(required=False, type='str'), family=dict(required=False, type='str'), revision=dict(required=False, type='int'), force_create=dict(required=False, default=False, type='bool'), containers=dict(required=False, type='list'), network_mode=dict(required=False, default='bridge', choices=['bridge', 'host', 'none', 'awsvpc'], type='str'), task_role_arn=dict(required=False, default='', type='str'), execution_role_arn=dict(required=False, default='', type='str'), volumes=dict(required=False, type='list'), launch_type=dict(required=False, choices=['EC2', 'FARGATE']), cpu=dict(), memory=dict(required=False, type='str'))) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=[('launch_type', 'FARGATE', ['cpu', 'memory'])]) if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') task_to_describe = None task_mgr = EcsTaskManager(module) results = dict(changed=False) if module.params['launch_type']: if not module.botocore_at_least('1.8.4'): module.fail_json( msg= 'botocore needs to be version 1.8.4 or higher to use launch_type' ) if module.params['execution_role_arn']: if not module.botocore_at_least('1.10.44'): module.fail_json( msg= 'botocore needs to be version 1.10.44 or higher to use execution_role_arn' ) if module.params['containers']: for container in module.params['containers']: for environment in container.get('environment', []): environment['value'] = to_text(environment['value']) if module.params['state'] == 'present': if 'containers' not in module.params or not module.params['containers']: module.fail_json( msg= "To use task definitions, a list of containers must be specified" ) if 'family' not in module.params or not module.params['family']: module.fail_json( msg="To use task definitions, a family must be specified") network_mode = module.params['network_mode'] launch_type = module.params['launch_type'] if launch_type == 'FARGATE' and network_mode != 'awsvpc': module.fail_json( msg="To use FARGATE launch type, network_mode must be awsvpc") family = module.params['family'] existing_definitions_in_family = task_mgr.describe_task_definitions( module.params['family']) if 'revision' in module.params and module.params['revision']: # The definition specifies revision. We must gurantee that an active revision of that number will result from this. revision = int(module.params['revision']) # A revision has been explicitly specified. Attempt to locate a matching revision tasks_defs_for_revision = [ td for td in existing_definitions_in_family if td['revision'] == revision ] existing = tasks_defs_for_revision[0] if len( tasks_defs_for_revision) > 0 else None if existing and existing['status'] != "ACTIVE": # We cannot reactivate an inactive revision module.fail_json( msg= "A task in family '%s' already exists for revsion %d, but it is inactive" % (family, revision)) elif not existing: if not existing_definitions_in_family and revision != 1: module.fail_json( msg= "You have specified a revision of %d but a created revision would be 1" % revision) elif existing_definitions_in_family and existing_definitions_in_family[ -1]['revision'] + 1 != revision: module.fail_json( msg= "You have specified a revision of %d but a created revision would be %d" % (revision, existing_definitions_in_family[-1]['revision'] + 1)) else: existing = None def _right_has_values_of_left(left, right): # Make sure the values are equivalent for everything left has for k, v in left.items(): if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])): # We don't care about list ordering because ECS can change things if isinstance(v, list) and k in right: left_list = v right_list = right[k] or [] if len(left_list) != len(right_list): return False for list_val in left_list: if list_val not in right_list: return False else: return False # Make sure right doesn't have anything that left doesn't for k, v in right.items(): if v and k not in left: return False return True def _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, existing_task_definition): if td['status'] != "ACTIVE": return None if requested_task_role_arn != td.get('taskRoleArn', ""): return None existing_volumes = td.get('volumes', []) or [] if len(requested_volumes) != len(existing_volumes): # Nope. return None if len(requested_volumes) > 0: for requested_vol in requested_volumes: found = False for actual_vol in existing_volumes: if _right_has_values_of_left( requested_vol, actual_vol): found = True break if not found: return None existing_containers = td.get('containerDefinitions', []) or [] if len(requested_containers) != len(existing_containers): # Nope. return None for requested_container in requested_containers: found = False for actual_container in existing_containers: if _right_has_values_of_left(requested_container, actual_container): found = True break if not found: return None return existing_task_definition # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested for td in existing_definitions_in_family: requested_volumes = module.params['volumes'] or [] requested_containers = module.params['containers'] or [] requested_task_role_arn = module.params['task_role_arn'] existing = _task_definition_matches(requested_volumes, requested_containers, requested_task_role_arn, td) if existing: break if existing and not module.params.get('force_create'): # Awesome. Have an existing one. Nothing to do. results['taskdefinition'] = existing else: if not module.check_mode: # Doesn't exist. create it. volumes = module.params.get('volumes', []) or [] results['taskdefinition'] = task_mgr.register_task( module.params['family'], module.params['task_role_arn'], module.params['execution_role_arn'], module.params['network_mode'], module.params['containers'], volumes, module.params['launch_type'], module.params['cpu'], module.params['memory']) results['changed'] = True elif module.params['state'] == 'absent': # When de-registering a task definition, we can specify the ARN OR the family and revision. if module.params['state'] == 'absent': if 'arn' in module.params and module.params['arn'] is not None: task_to_describe = module.params['arn'] elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \ module.params['revision'] is not None: task_to_describe = module.params['family'] + ":" + str( module.params['revision']) else: module.fail_json( msg= "To use task definitions, an arn or family and revision must be specified" ) existing = task_mgr.describe_task(task_to_describe) if not existing: pass else: # It exists, so we should delete it and mark changed. Return info about the task definition deleted results['taskdefinition'] = existing if 'status' in existing and existing['status'] == "INACTIVE": results['changed'] = False else: if not module.check_mode: task_mgr.deregister_task(task_to_describe) results['changed'] = True module.exit_json(**results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(instance_id=dict(), image_id=dict(), architecture=dict(default='x86_64'), kernel_id=dict(), virtualization_type=dict(default='hvm'), root_device_name=dict(), delete_snapshot=dict(default=False, type='bool'), name=dict(), wait=dict(type='bool', default=False), wait_timeout=dict(default=900, type='int'), description=dict(default=''), no_reboot=dict(default=False, type='bool'), state=dict(default='present'), device_mapping=dict(type='list'), tags=dict(type='dict'), launch_permissions=dict(type='dict'), image_location=dict(), enhanced_networking=dict(type='bool'), billing_products=dict(type='list'), ramdisk_id=dict(), sriov_net_support=dict(), purge_tags=dict(type='bool', default=False))) module = AnsibleAWSModule(argument_spec=argument_spec, required_if=[ ['state', 'absent', ['image_id']], ]) # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by # the required_if for state=absent, so check manually instead if not any([module.params['image_id'], module.params['name']]): module.fail_json( msg="one of the following is required: name, image_id") try: region, ec2_url, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs) except botocore.exceptions.NoRegionError: module.fail_json(msg=( "Region must be specified as a parameter in AWS_DEFAULT_REGION environment variable or in boto configuration file." )) if module.params.get('state') == 'absent': deregister_image(module, connection) elif module.params.get('state') == 'present': if module.params.get('image_id'): update_image(module, connection, module.params.get('image_id')) if not module.params.get('instance_id') and not module.params.get( 'device_mapping'): module.fail_json( msg= "The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image." ) create_image(module, connection)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( stack_name=dict(), all_facts=dict(required=False, default=False, type='bool'), stack_policy=dict(required=False, default=False, type='bool'), stack_events=dict(required=False, default=False, type='bool'), stack_resources=dict(required=False, default=False, type='bool'), stack_template=dict(required=False, default=False, type='bool'), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') service_mgr = CloudFormationServiceManager(module) result = {'ansible_facts': {'cloudformation': {}}} for stack_description in service_mgr.describe_stacks( module.params.get('stack_name')): facts = {'stack_description': stack_description} stack_name = stack_description.get('StackName') # Create stack output and stack parameter dictionaries if facts['stack_description']: facts['stack_outputs'] = to_dict( facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') facts['stack_parameters'] = to_dict( facts['stack_description'].get('Parameters'), 'ParameterKey', 'ParameterValue') facts['stack_tags'] = boto3_tag_list_to_ansible_dict( facts['stack_description'].get('Tags')) # normalize stack description API output facts['stack_description'] = camel_dict_to_snake_dict( facts['stack_description']) # Create optional stack outputs all_facts = module.params.get('all_facts') if all_facts or module.params.get('stack_resources'): facts['stack_resource_list'] = service_mgr.list_stack_resources( stack_name) facts['stack_resources'] = to_dict( facts.get('stack_resource_list'), 'LogicalResourceId', 'PhysicalResourceId') if all_facts or module.params.get('stack_template'): facts['stack_template'] = service_mgr.get_template(stack_name) if all_facts or module.params.get('stack_policy'): facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) if all_facts or module.params.get('stack_events'): facts['stack_events'] = service_mgr.describe_stack_events( stack_name) result['ansible_facts']['cloudformation'][stack_name] = facts result['changed'] = False module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(az=dict(default=None, required=False), cidr=dict(default=None, required=True), state=dict(default='present', choices=['present', 'absent']), tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']), vpc_id=dict(default=None, required=True), map_public=dict(default=False, required=False, type='bool'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True) if region: connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params) else: module.fail_json(msg="region must be specified") vpc_id = module.params.get('vpc_id') tags = module.params.get('tags') cidr = module.params.get('cidr') az = module.params.get('az') state = module.params.get('state') map_public = module.params.get('map_public') try: if state == 'present': result = ensure_subnet_present(connection, module, vpc_id, cidr, az, tags, map_public, check_mode=module.check_mode) elif state == 'absent': result = ensure_subnet_absent(connection, module, vpc_id, cidr, check_mode=module.check_mode) except ClientError as e: module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( distribution_id=dict(required=False, type='str'), invalidation_id=dict(required=False, type='str'), origin_access_identity_id=dict(required=False, type='str'), domain_name_alias=dict(required=False, type='str'), all_lists=dict(required=False, default=False, type='bool'), distribution=dict(required=False, default=False, type='bool'), distribution_config=dict(required=False, default=False, type='bool'), origin_access_identity=dict(required=False, default=False, type='bool'), origin_access_identity_config=dict(required=False, default=False, type='bool'), invalidation=dict(required=False, default=False, type='bool'), streaming_distribution=dict(required=False, default=False, type='bool'), streaming_distribution_config=dict(required=False, default=False, type='bool'), list_origin_access_identities=dict(required=False, default=False, type='bool'), list_distributions=dict(required=False, default=False, type='bool'), list_distributions_by_web_acl_id=dict(required=False, default=False, type='bool'), list_invalidations=dict(required=False, default=False, type='bool'), list_streaming_distributions=dict(required=False, default=False, type='bool'), summary=dict(required=False, default=False, type='bool') )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') service_mgr = CloudFrontServiceManager(module) distribution_id = module.params.get('distribution_id') invalidation_id = module.params.get('invalidation_id') origin_access_identity_id = module.params.get('origin_access_identity_id') web_acl_id = module.params.get('web_acl_id') domain_name_alias = module.params.get('domain_name_alias') all_lists = module.params.get('all_lists') distribution = module.params.get('distribution') distribution_config = module.params.get('distribution_config') origin_access_identity = module.params.get('origin_access_identity') origin_access_identity_config = module.params.get('origin_access_identity_config') invalidation = module.params.get('invalidation') streaming_distribution = module.params.get('streaming_distribution') streaming_distribution_config = module.params.get('streaming_distribution_config') list_origin_access_identities = module.params.get('list_origin_access_identities') list_distributions = module.params.get('list_distributions') list_distributions_by_web_acl_id = module.params.get('list_distributions_by_web_acl_id') list_invalidations = module.params.get('list_invalidations') list_streaming_distributions = module.params.get('list_streaming_distributions') summary = module.params.get('summary') aliases = [] result = {'cloudfront': {}} facts = {} require_distribution_id = (distribution or distribution_config or invalidation or streaming_distribution or streaming_distribution_config or list_invalidations) # set default to summary if no option specified summary = summary or not (distribution or distribution_config or origin_access_identity or origin_access_identity_config or invalidation or streaming_distribution or streaming_distribution_config or list_origin_access_identities or list_distributions_by_web_acl_id or list_invalidations or list_streaming_distributions or list_distributions) # validations if require_distribution_id and distribution_id is None and domain_name_alias is None: module.fail_json(msg='Error distribution_id or domain_name_alias have not been specified.') if (invalidation and invalidation_id is None): module.fail_json(msg='Error invalidation_id has not been specified.') if (origin_access_identity or origin_access_identity_config) and origin_access_identity_id is None: module.fail_json(msg='Error origin_access_identity_id has not been specified.') if list_distributions_by_web_acl_id and web_acl_id is None: module.fail_json(msg='Error web_acl_id has not been specified.') # get distribution id from domain name alias if require_distribution_id and distribution_id is None: distribution_id = service_mgr.get_distribution_id_from_domain_name(domain_name_alias) if not distribution_id: module.fail_json(msg='Error unable to source a distribution id from domain_name_alias') # set appropriate cloudfront id if distribution_id and not list_invalidations: facts = {distribution_id: {}} aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) for alias in aliases: facts.update({alias: {}}) if invalidation_id: facts.update({invalidation_id: {}}) elif distribution_id and list_invalidations: facts = {distribution_id: {}} aliases = service_mgr.get_aliases_from_distribution_id(distribution_id) for alias in aliases: facts.update({alias: {}}) elif origin_access_identity_id: facts = {origin_access_identity_id: {}} elif web_acl_id: facts = {web_acl_id: {}} # get details based on options if distribution: facts_to_set = service_mgr.get_distribution(distribution_id) if distribution_config: facts_to_set = service_mgr.get_distribution_config(distribution_id) if origin_access_identity: facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity(origin_access_identity_id)) if origin_access_identity_config: facts[origin_access_identity_id].update(service_mgr.get_origin_access_identity_config(origin_access_identity_id)) if invalidation: facts_to_set = service_mgr.get_invalidation(distribution_id, invalidation_id) facts[invalidation_id].update(facts_to_set) if streaming_distribution: facts_to_set = service_mgr.get_streaming_distribution(distribution_id) if streaming_distribution_config: facts_to_set = service_mgr.get_streaming_distribution_config(distribution_id) if list_invalidations: facts_to_set = {'invalidations': service_mgr.list_invalidations(distribution_id)} if 'facts_to_set' in vars(): facts = set_facts_for_distribution_id_and_alias(facts_to_set, facts, distribution_id, aliases) # get list based on options if all_lists or list_origin_access_identities: facts['origin_access_identities'] = service_mgr.list_origin_access_identities() if all_lists or list_distributions: facts['distributions'] = service_mgr.list_distributions() if all_lists or list_streaming_distributions: facts['streaming_distributions'] = service_mgr.list_streaming_distributions() if list_distributions_by_web_acl_id: facts['distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(web_acl_id) if list_invalidations: facts['invalidations'] = service_mgr.list_invalidations(distribution_id) # default summary option if summary: facts['summary'] = service_mgr.summary() result['changed'] = False result['cloudfront'].update(facts) module.exit_json(msg="Retrieved cloudfront facts.", ansible_facts=result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(required=True, choices=['present', 'absent', 'has_instances'] ), name=dict(required=True, type='str' ), delay=dict(required=False, type='int', default=10), repeat=dict(required=False, type='int', default=10) )) required_together = ( ['state', 'name'] ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) if not HAS_BOTO: module.fail_json(msg='boto is required.') if not HAS_BOTO3: module.fail_json(msg='boto3 is required.') cluster_mgr = EcsClusterManager(module) try: existing = cluster_mgr.describe_cluster(module.params['name']) except Exception as e: module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e)) results = dict(changed=False) if module.params['state'] == 'present': if existing and 'status' in existing and existing['status']=="ACTIVE": results['cluster']=existing else: if not module.check_mode: # doesn't exist. create it. results['cluster'] = cluster_mgr.create_cluster(module.params['name']) results['changed'] = True # delete the cluster elif module.params['state'] == 'absent': if not existing: pass else: # it exists, so we should delete it and mark changed. # return info about the cluster deleted results['cluster'] = existing if 'status' in existing and existing['status']=="INACTIVE": results['changed'] = False else: if not module.check_mode: cluster_mgr.delete_cluster(module.params['name']) results['changed'] = True elif module.params['state'] == 'has_instances': if not existing: module.fail_json(msg="Cluster '"+module.params['name']+" not found.") return # it exists, so we should delete it and mark changed. # return info about the cluster deleted delay = module.params['delay'] repeat = module.params['repeat'] time.sleep(delay) count = 0 for i in range(repeat): existing = cluster_mgr.describe_cluster(module.params['name']) count = existing['registeredContainerInstancesCount'] if count > 0: results['changed'] = True break time.sleep(delay) if count == 0 and i is repeat-1: module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.") return module.exit_json(**results)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( mode=dict(choices=['push'], default='push'), file_change_strategy=dict( choices=['force', 'date_size', 'checksum'], default='date_size'), bucket=dict(required=True), key_prefix=dict(required=False, default=''), file_root=dict(required=True, type='path'), permission=dict(required=False, choices=[ 'private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control' ]), retries=dict(required=False), mime_map=dict(required=False, type='dict'), exclude=dict(required=False, default=".*"), include=dict(required=False, default="*"), cache_control=dict(required=False, default=''), # future options: encoding, metadata, storage_class, retries )) module = AnsibleModule(argument_spec=argument_spec, ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') result = {} mode = module.params['mode'] region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if not region: module.fail_json(msg="Region must be specified") s3 = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_kwargs) if mode == 'push': try: result['filelist_initial'] = gather_files( module.params['file_root'], exclude=module.params['exclude'], include=module.params['include']) result['filelist_typed'] = determine_mimetypes( result['filelist_initial'], module.params.get('mime_map')) result['filelist_s3'] = calculate_s3_path( result['filelist_typed'], module.params['key_prefix']) result['filelist_local_etag'] = calculate_local_etag( result['filelist_s3']) result['filelist_actionable'] = filter_list( s3, module.params['bucket'], result['filelist_local_etag'], module.params['file_change_strategy']) result['uploads'] = upload_files(s3, module.params['bucket'], result['filelist_actionable'], module.params) # mark changed if we actually upload something. if result.get('uploads') and len(result.get('uploads')): result['changed'] = True # result.update(filelist=actionable_filelist) except botocore.exceptions.ClientError as err: error_msg = boto_exception(err) module.fail_json(msg=error_msg, exception=traceback.format_exc(), **camel_dict_to_snake_dict(err.response)) module.exit_json(**result)
def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( iam_type=dict( default=None, required=True, choices=['user', 'group', 'role']), state=dict( default=None, required=True, choices=['present', 'absent']), iam_name=dict(default=None, required=False), policy_name=dict(default=None, required=True), policy_document=dict(default=None, required=False), policy_json=dict(type='json', default=None, required=False), skip_duplicates=dict(type='bool', default=True, required=False) )) module = AnsibleModule( argument_spec=argument_spec, ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state').lower() iam_type = module.params.get('iam_type').lower() state = module.params.get('state') name = module.params.get('iam_name') policy_name = module.params.get('policy_name') skip = module.params.get('skip_duplicates') if module.params.get('policy_document') is not None and module.params.get('policy_json') is not None: module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set') if module.params.get('policy_document') is not None: with open(module.params.get('policy_document'), 'r') as json_data: pdoc = json.dumps(json.load(json_data)) json_data.close() elif module.params.get('policy_json') is not None: pdoc = module.params.get('policy_json') # if its a string, assume it is already JSON if not isinstance(pdoc, string_types): try: pdoc = json.dumps(pdoc) except Exception as e: module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e)) else: pdoc = None region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: if region: iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) else: iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) changed = False if iam_type == 'user': changed, user_name, current_policies = user_action(module, iam, name, policy_name, skip, pdoc, state) module.exit_json(changed=changed, user_name=name, policies=current_policies) elif iam_type == 'role': changed, role_name, current_policies = role_action(module, iam, name, policy_name, skip, pdoc, state) module.exit_json(changed=changed, role_name=name, policies=current_policies) elif iam_type == 'group': changed, group_name, current_policies, msg = group_action(module, iam, name, policy_name, skip, pdoc, state) module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( command=dict(choices=['create', 'facts', 'delete', 'modify'], required=True), identifier=dict(required=True), node_type=dict(choices=[ 'ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc2.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge' ], required=False), username=dict(required=False), password=dict(no_log=True, required=False), db_name=dict(required=False), cluster_type=dict(choices=['multi-node', 'single-node'], default='single-node'), cluster_security_groups=dict(aliases=['security_groups'], type='list'), vpc_security_group_ids=dict(aliases=['vpc_security_groups'], type='list'), skip_final_cluster_snapshot=dict(aliases=['skip_final_snapshot'], type='bool', default=False), final_cluster_snapshot_identifier=dict( aliases=['final_snapshot_id'], required=False), cluster_subnet_group_name=dict(aliases=['subnet']), availability_zone=dict(aliases=['aws_zone', 'zone']), preferred_maintenance_window=dict( aliases=['maintance_window', 'maint_window']), cluster_parameter_group_name=dict(aliases=['param_group_name']), automated_snapshot_retention_period=dict( aliases=['retention_period'], type='int'), port=dict(type='int'), cluster_version=dict(aliases=['version'], choices=['1.0']), allow_version_upgrade=dict(aliases=['version_upgrade'], type='bool', default=True), number_of_nodes=dict(type='int'), publicly_accessible=dict(type='bool', default=False), encrypted=dict(type='bool', default=False), elastic_ip=dict(required=False), new_cluster_identifier=dict(aliases=['new_identifier']), enhanced_vpc_routing=dict(type='bool', default=False), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300), )) required_if = [('command', 'delete', ['skip_final_cluster_snapshot']), ('command', 'create', ['node_type', 'username', 'password'])] module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if) command = module.params.get('command') skip_final_cluster_snapshot = module.params.get( 'skip_final_cluster_snapshot') final_cluster_snapshot_identifier = module.params.get( 'final_cluster_snapshot_identifier') # can't use module basic required_if check for this case if command == 'delete' and skip_final_cluster_snapshot is False and final_cluster_snapshot_identifier is None: module.fail_json( msg= "Need to specify final_cluster_snapshot_identifier if skip_final_cluster_snapshot is False" ) conn = module.client('redshift') changed = True if command == 'create': (changed, cluster) = create_cluster(module, conn) elif command == 'facts': (changed, cluster) = describe_cluster(module, conn) elif command == 'delete': (changed, cluster) = delete_cluster(module, conn) elif command == 'modify': (changed, cluster) = modify_cluster(module, conn) module.exit_json(changed=changed, cluster=cluster)
def main(): """ Main entry point. :return dict: ansible facts """ argument_spec = ec2_argument_spec() argument_spec.update( dict( service=dict(required=True, default=None, aliases=['service_name']), method=dict(required=True, default=None, aliases=['method_name', 'action']), params=dict(type='dict', required=False, default={}, aliases=['method_params']), convert_param_case=dict(required=False, default=None, choices=['camel', 'Pascal']), convert_to_integer=dict(required=False, default=True, choices=["yes", "no"]), filters=dict(default={}, type='dict'), tags=dict(default={}, type='dict'), )) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[], required_together=[]) # validate dependencies if not HAS_BOTO3: module.fail_json(msg='boto3 is required for this module.') try: region, endpoint, aws_connect_kwargs = get_aws_connection_info( module, boto3=True) aws_connect_kwargs.update( dict(region=region, endpoint=endpoint, conn_type='client', resource=module.params['service'])) client = boto3_conn(module, **aws_connect_kwargs) except (ClientError, ParamValidationError, MissingParametersError) as e: module.fail_json(msg="Can't authorize connection - {0}".format(e)) except EndpointConnectionError as e: module.fail_json(msg="Connection Error - {0}".format(e)) service_method = getattr(client, module.params['method']) if module.params['convert_param_case'] == 'camel': key_case = cc elif module.params['convert_param_case'] == 'Pascal': key_case = pc else: key_case = as_is params = fix_input(module.params['params'], module.params['convert_to_integer'], key_case) if not isinstance(params, dict): params = dict() if module.params['tags']: tags_key = key_case('tags') params[tags_key] = ansible_dict_to_boto3_tag_list( module.params['tags']) if module.params['filters']: filters_key = key_case('filters') params[filters_key] = ansible_dict_to_boto3_filter_list( module.params['filters']) try: response = service_method(**params) meta_data = response.pop('ResponseMetadata') response['boto3'] = boto3.__version__ if str(meta_data['HTTPStatusCode']).startswith('2'): response['changed'] = True except (ClientError, ParamValidationError, MissingParametersError) as e: module.fail_json(msg="Client error - {0}".format(e)) module.exit_json( **camel_dict_to_snake_dict(fix_return(response, convert_tags=True)))
def main(): """ elasticache ansible module """ argument_spec = ec2_argument_spec() argument_spec.update( dict( state=dict(required=True, choices=['present', 'absent', 'rebooted']), name=dict(required=True), engine=dict(default='memcached'), cache_engine_version=dict(default=""), node_type=dict(default='cache.t2.small'), num_nodes=dict(default=1, type='int'), # alias for compat with the original PR 1950 cache_parameter_group=dict(default="", aliases=['parameter_group']), cache_port=dict(type='int'), cache_subnet_group=dict(default=""), cache_security_groups=dict(default=[], type='list'), security_group_ids=dict(default=[], type='list'), zone=dict(), wait=dict(default=True, type='bool'), hard_modify=dict(type='bool'))) module = AnsibleModule(argument_spec=argument_spec, ) if not HAS_BOTO3: module.fail_json(msg='boto3 required for this module') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) name = module.params['name'] state = module.params['state'] engine = module.params['engine'] cache_engine_version = module.params['cache_engine_version'] node_type = module.params['node_type'] num_nodes = module.params['num_nodes'] cache_port = module.params['cache_port'] cache_subnet_group = module.params['cache_subnet_group'] cache_security_groups = module.params['cache_security_groups'] security_group_ids = module.params['security_group_ids'] zone = module.params['zone'] wait = module.params['wait'] hard_modify = module.params['hard_modify'] cache_parameter_group = module.params['cache_parameter_group'] if cache_subnet_group and cache_security_groups: module.fail_json( msg= "Can't specify both cache_subnet_group and cache_security_groups") if state == 'present' and not num_nodes: module.fail_json( msg= "'num_nodes' is a required parameter. Please specify num_nodes > 0" ) elasticache_manager = ElastiCacheManager( module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, cache_parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, hard_modify, region, **aws_connect_kwargs) if state == 'present': elasticache_manager.ensure_present() elif state == 'absent': elasticache_manager.ensure_absent() elif state == 'rebooted': elasticache_manager.ensure_rebooted() facts_result = dict(changed=elasticache_manager.changed, elasticache=elasticache_manager.get_info()) module.exit_json(**facts_result)