#!/usr/bin/env python import boto3 from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass sess = boto3.Session(aws_access_key_id=relay.get(D.aws.connection.accessKeyID), aws_secret_access_key=relay.get( D.aws.connection.secretAccessKey), aws_session_token=session_token) s3 = sess.client('s3') bucketName = relay.get(D.bucketName) key = relay.get(D.key) sourceContent = relay.get(D.sourceContent) tmpFile = open(key, "w") tmpFile.write(sourceContent) try: response = s3_client.upload_file(key, bucketName, key) print("Upload response {}".format(response)) except Exception as e:
#!/usr/bin/env python import boto3 from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass sess = boto3.Session(aws_access_key_id=relay.get(D.aws.connection.accessKeyID), aws_secret_access_key=relay.get( D.aws.connection.secretAccessKey), region_name=relay.get(D.aws.region), aws_session_token=session_token) route53domains = sess.client('route53domains') operationId = relay.get(D.operationId) operation_detail_dict = route53domains.get_operation_detail( OperationId=operationId) operation_detail_status = operation_detail_dict['Status'] print('Domain status: {}'.format(operation_detail_status)) relay.outputs.set('operation_detail_status', operation_detail_status)
#!/usr/bin/env python from nebula_sdk import Interface, Dynamic as D relay = Interface() to_modify = [] to_do_nothing = [] encryptionConfigurations = relay.get(D.encryptionConfigurations) for bucket in encryptionConfigurations.keys(): # If the encryption configuration of a bucket is none, bucket is unencrypted. Adding these to list of buckets to encrypt. if encryptionConfigurations[bucket] == None: to_modify.append(bucket) else: to_do_nothing.append(bucket) print("\nFound {} bucket(s) that are encrypted:".format(len(to_do_nothing))) print(*[bucket for bucket in to_do_nothing], sep="\n") print("\nFound {} bucket(s) that are NOT encrypted:".format(len(to_modify))) print(*[bucket for bucket in to_modify], sep="\n") print( '\nSetting output variable `buckets` with list of {} bucket(s) that are NOT encrypted.' .format(len(to_modify))) relay.outputs.set('buckets', to_modify)
# Description: This is an example script that you can author or modify that retrieves # a list of Azure Load Balancers and filters the ones that are empty e.g. # no backend configurations. # Inputs: # - loadBalancers - list of Azure Load Balancers # Outputs: # - resourceIDs - list of Azure Virtual Machine resource IDs to be terminated in the subsequent step from nebula_sdk import Interface, Dynamic as D relay = Interface() to_terminate = [] to_keep = [] lbs = relay.get(D.loadBalancers) for lb in lbs: if len(lb['backend_address_pools']) == 0: to_terminate.append(lb['id']) else: to_keep.append(lb['id']) continue print('\nFound {} Load Balancers that are NOT empty:'.format(len(to_keep))) print(*[i for i in to_keep], sep = "\n") print('\nFound {} Load Balancers that are empty:'.format(len(to_terminate))) print(*[i for i in to_terminate], sep = "\n") print('\nSetting output `resourceIDs` to list of {0} load balancer resource ids to terminate.'.format(len(to_terminate)))
#!/usr/bin/env python import boto3 from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass sess = boto3.Session(aws_access_key_id=relay.get(D.aws.connection.accessKeyID), aws_secret_access_key=relay.get( D.aws.connection.secretAccessKey), region_name=relay.get(D.aws.region), aws_session_token=session_token) route53domains = sess.client('route53domains') domainName = relay.get(D.domainName) durationInYears = int(relay.get(D.durationInYears)) autoRenew = bool(relay.get(D.autoRenew)) firstName = relay.get(D.firstName) lastName = relay.get(D.lastName) contactType = relay.get(D.contactType) organizationName = relay.get(D.organizationName) addressLine1 = relay.get(D.addressLine1) addressLine2 = relay.get(D.addressLine2) city = relay.get(D.city)
#!/usr/bin/env python import boto3 from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass sess = boto3.Session(aws_access_key_id=relay.get(D.aws.connection.accessKeyID), aws_secret_access_key=relay.get( D.aws.connection.secretAccessKey), aws_session_token=session_token) s3 = sess.client('s3') # region = relay.get(D.aws.region) bucketName = relay.get(D.bucketName) redirectAllRequestsToHostName = relay.get(D.redirectAllRequestsToHostName) print("Redirect all requests value {}".format(redirectAllRequestsToHostName)) if redirectAllRequestsToHostName: try: # bucket_website = s3.BucketWebsite(bucketName) websiteConfiguration = { 'RedirectAllRequestsTo': {
#!/usr/bin/env python import re from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass domainName = relay.get(D.domainName) if domainName and re.fullmatch( "^((?!-)[A-Z0-9-]{1, 63}(?<!-)\\.)+[A-Z]{2, 6}$", domainName, re.IGNORECASE) is None: raise Exception("Invalid domainName") durationInYears = relay.get(D.durationInYears) if durationInYears and re.fullmatch("\\d+", durationInYears) is None: raise Exception("Invalid durationInYears") autoRenew = relay.get(D.autoRenew) if autoRenew and re.fullmatch("(True|False)", autoRenew) is None: raise Exception("Invalid autoRenew") firstName = relay.get(D.firstName) if firstName and re.fullmatch("\\w+", firstName) is None: raise Exception("Invalid firstName")
# a list of Volumes from the Relay Interface (in the form of parameters) # and filters the volumes that are unattached. It then sets the output # variable `volumeIDs` to the list of EBS volumes that are unattached. # Inputs: # - volumes - list of EBS volumes # Outputs: # - volumeids - list of EBS volume ids to be terminated in the subsequent step from nebula_sdk import Interface, Dynamic as D relay = Interface() to_terminate = [] # Filtering volumes with no attachments volumes = filter(lambda i: len(i['Attachments']) == 0, relay.get(D.volumes)) for volume in volumes: try: to_terminate.append(volume['VolumeId']) print('Terminating EBS volume {0} with no attachments'.format( volume['VolumeId'])) except Exception as e: print( 'EBS volume {0} not considered for termination because of a processing error: {1}' .format(volume['VolumeId'], e)) if len(to_terminate) == 0: print('No volumes to terminate! Exiting.') exit() else: relay.outputs.set('volumeIDs', to_terminate)
# variable `resource_ids` to the list of Azure Disks volumes that are unattached. # Inputs: # - disks - list of Azure Disks # Outputs: # - resourceIDs - list of Azure Disk resource IDs to be terminated in the subsequent step import logging from nebula_sdk import Interface, Dynamic as D relay = Interface() to_terminate = [] # Filtering volumes with no attachments disks = filter(lambda i: i['disk_state'] == 'Unattached', relay.get(D.disks)) for disk in disks: try: to_terminate.append(disk['id']) print('Adding Azure Disk {0} with no attachments to termination list'.format(disk['name'])) except Exception as e: print('Azure Disk {0} not considered for termination because of a processing error: {1}'.format(disk['name'], e)) # Adding list of Azure Disk resource ids to output `resource_ids` if len(to_terminate) == 0: print('No volumes to terminate! Exiting.') exit() else: print('Setting output `resourceIDs` to list of {0} disks'.format(len(to_terminate))) relay.outputs.set('resourceIDs', to_terminate)
# to the list of Azure Virtual Machines resource IDs that are untagged. # Inputs: # - virtualMachines - list of Azure Virtual Machines # Outputs: # - resourceIDs - list of Azure Virtual Machine resource IDs to be terminated in the subsequent step from nebula_sdk import Interface, Dynamic as D relay = Interface() to_terminate = [] to_keep = [] # Filtering Azure virtual machines with no tags print('Looking for all Virtual Machines with no tags') vms = relay.get(D.virtualMachines) # Queries for `virtual_machines` parameter from Relay for vm in vms: if 'tags' in vm.keys(): to_keep.append(vm['id']) continue else: try: to_terminate.append(vm['id']) except Exception as e: print('\nAzure Virtual Machine {0} not considered for termination because of a processing error: {1}'.format(vm['name'], e)) print('\nFound {} Virtual machines (with tags) not considered for termination:'.format(len(to_keep))) print(*[vm_id for vm_id in to_keep], sep = "\n") if len(to_terminate) == 0: print('\nNo Virtual Machines to terminate! Exiting.')
#!/usr/bin/env python import time from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass sleepTime = relay.get(D.sleepTime) time.sleep(sleepTime)
#!/usr/bin/env python import boto3 from nebula_sdk import Interface, Dynamic as D relay = Interface() sess = boto3.Session( aws_access_key_id=relay.get(D.aws.connection.accessKeyID), aws_secret_access_key=relay.get(D.aws.connection.secretAccessKey), region_name=relay.get(D.aws.region), ) sts = sess.client('sts') try: roleARN = relay.get(D.roleARN) roleSessionName = relay.get(D.roleSessionName) except: print('Requires both roleARN and roleSessionName to be defined. Exiting.') exit(1) print('Creating credentials...\n') response = sts.assume_role(RoleArn=roleARN, RoleSessionName=roleSessionName) connection = {} connection['accessKeyID'] = response['Credentials']['AccessKeyId'] connection['secretAccessKey'] = response['Credentials']['SecretAccessKey'] connection['sessionToken'] = response['Credentials']['SessionToken'] print('\nAdding temporary credentials to the outputs `connection`') relay.outputs.set('connection', connection)
#!/usr/bin/env python import boto3 from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass sess = boto3.Session(aws_access_key_id=relay.get(D.aws.connection.accessKeyID), aws_secret_access_key=relay.get( D.aws.connection.secretAccessKey), aws_session_token=session_token) s3 = sess.client('s3') bucketName = relay.get(D.bucketName) currentACL = s3.get_bucket_acl(Bucket=bucketName) print("Current ACL {}".format(currentACL)) acl = relay.get(D.acl) grantFullControl = relay.get(D.grantFullControl) grantRead = relay.get(D.grantRead) grantReadACP = relay.get(D.grantReadACP) grantWrite = relay.get(D.grantWrite) grantWriteACP = relay.get(D.grantWriteACP)
# File: filter-loadbalancers.py # Description: This is an example script that you can author or modify that retrieves # a list of Load Balancers and Target Groups from the Relay Interface (in # the form of parameters) and filters the load balancers that are empty. # Inputs: # - loadbalancers - list of ELB v2 load balancers # - targetgroups - list of target groups # Outputs: # - loadbalancerARNs - list of empty ELBv2 load balancer ARNs to be deleted from nebula_sdk import Interface, Dynamic as D relay = Interface() loadbalancer_arns = list( map(lambda i: i['LoadBalancerArn'], relay.get(D.loadbalancers))) targets = relay.get(D.targets) to_terminate = [] to_keep = [] # Only 1 Load Balancer can be associated per Target Group - https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-limits.html for arn in loadbalancer_arns: terminate = True for group in relay.get(D.targetgroups): if arn in group['LoadBalancerArns'] and len( targets[group['TargetGroupArn']]) != 0: terminate = False to_keep.append(arn) if terminate: to_terminate.append(arn)
#!/usr/bin/env python import boto3 import json from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass sess = boto3.Session(aws_access_key_id=relay.get(D.aws.connection.accessKeyID), aws_secret_access_key=relay.get( D.aws.connection.secretAccessKey), aws_session_token=session_token) s3 = sess.client('s3') bucketName = relay.get(D.bucketName) policy = json.dumps(relay.get(D.policy)) try: response = s3.put_bucket_policy(Bucket=bucketName, Policy=policy) print("Added policy for bucket {}".format(bucketName)) except Exception as e: print(e)
# and filters the instances that are untagged. It then sets the output # variable `instanceIDs` to the list of instances that are untagged. # Inputs: # - instances - List of instances to evaluate # Outputs: # - instanceIDs - list of instance IDs to stop in the next step from nebula_sdk import Interface, Dynamic as D relay = Interface() to_stop = [] to_keep = [] instances = filter(lambda i: i['State']['Name'] == 'running', relay.get(D.instances)) if len(instances) == 0: print("No instances found!") exit(1) for instance in instances: try: if instance['Tags'] is None: to_stop.append(instance['InstanceId']) else: to_keep.append(instance['InstanceId']) except Exception as e: print( '\nEC2 instance {0} not considered for termination because of a processing error: {1}' .format(instance['InstanceId'], e))
#!/usr/bin/env python from nebula_sdk import Interface, Dynamic as D relay = Interface() to_modify = [] to_do_nothing = [] bucketACLs = relay.get(D.bucketACLs) for bucketName in bucketACLs.keys(): public_bucket = False # If the URI of the grant is "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and the permission contains "READ_ACP", adding to list to remediate. for grant in bucketACLs[bucketName]: if grant['Grantee']['Type'] == "Group" and grant['Grantee']['URI'] == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" and "READ_ACP" in str(grant['Permission']): public_bucket = True else: continue if public_bucket: to_modify.append(bucketName) else: to_do_nothing.append(bucketName) print("\nFound {} bucket(s) that DON'T have public READ permissions:".format(len(to_do_nothing))) print(*[bucket for bucket in to_do_nothing], sep = "\n") print("\nFound {} bucket(s) that have public READ permissions:".format(len(to_modify))) print(*[bucket for bucket in to_modify], sep = "\n")
#!/usr/bin/env python import boto3 from nebula_sdk import Interface, Dynamic as D relay = Interface() session_token = None try: session_token = relay.get(D.aws.connection.sessionToken) except: pass sess = boto3.Session(aws_access_key_id=relay.get(D.aws.connection.accessKeyID), aws_secret_access_key=relay.get( D.aws.connection.secretAccessKey), aws_session_token=session_token) s3 = sess.client('s3') bucketName = relay.get(D.bucketName) targetBucket = relay.get(D.targetBucket) targetPrefix = relay.get(D.targetPrefix) get_response = s3.get_bucket_logging(Bucket=bucketName) print("Bucket get info 1 {}".format(get_response)) try: bucketLoggingStatus = { 'LoggingEnabled': { 'TargetBucket': targetBucket,
import datetime import re from nebula_sdk import Interface, Dynamic as D relay = Interface() # The `MINUTES_TO_WAIT` global variable is the number of minutes to wait for # a termination_date label to appear for the GCP instance. MINUTES_TO_WAIT = 4 # The Indefinite lifetime constant INDEFINITE = 'indefinite' # Tag names (user-configurable) TERMINATION_DATE_LABEL = relay.get(D.terminationDateLabel) LIFETIME_LABEL = relay.get(D.lifetimeLabel) def get_label(gcp_instance, label_name): """ :param gcp_instance: a description of a GCP instance. :param label_name: A string of the key name you are searching for. This method returns None if the GCP instance currently has no tags or if the label is not found. If the tag is found, it returns the label value. """ if gcp_instance['labels'] is None: return None
# and filters the key pairs that are unused. It then sets the output # variable `keypairs` to the list of key pairs that are unused. # Inputs: # - keyPairs - List of keyPairs to evaluate # - instances - List of instances to compare against # Outputs: # - keyPairNames - list of key pair names from nebula_sdk import Interface, Dynamic as D relay = Interface() to_delete = [] to_keep = [] all_keyPairs = list(map(lambda i: i['KeyName'], relay.get(D.keyPairs))) used_keyPairs = list(map(lambda i: i['KeyName'], relay.get(D.instances))) for key in all_keyPairs: if key in used_keyPairs: to_keep.append(key) else: to_delete.append(key) print('\nFound {} used key pairs:'.format(len(to_keep))) print(*[key for key in to_keep], sep="\n") print('\nFound {} unused key pairs:'.format(len(to_delete))) print(*[key for key in to_delete], sep="\n") print(
#!/usr/bin/env python from nebula_sdk import Interface, Dynamic as D relay = Interface() if __name__ == '__main__': to_terminate = [] disks = relay.get(D.disks) for disk in disks: if "users" not in disk.keys(): to_terminate.append(disk) print('Found {} disks that are unattached'.format(len(to_terminate))) print('Setting output `disks` to list of {} disks to terminate'.format(len(to_terminate))) relay.outputs.set('disks', to_terminate)
from nebula_sdk import Interface, Dynamic as D ni = Interface() # The `MINUTES_TO_WAIT` global variable is the number of minutes to wait for # a termination_date tag to appear for the EC2 instance. Please note that the # AWS Lambdas are limited to a 5 minute maximum for their total run time. MINUTES_TO_WAIT = 4 # The Indefinite lifetime constant INDEFINITE = 'indefinite' # Tag names (user-configurable) TERMINATION_DATE_TAG = ni.get(D.terminationDateTag) LIFETIME_TAG = ni.get(D.lifetimeTag) def get_tag(ec2_instance, tag_name): """ :param ec2_instance: a boto3 resource representing an Amazon EC2 Instance. :param tag_name: A string of the key name you are searching for. This method returns None if the ec2 instance currently has no tags or if the tag is not found. If the tag is found, it returns the tag value. """ if ec2_instance['Tags'] is None: return None for tag in ec2_instance['Tags']:
# Description: This is an example script that you can author or modify that retrieves # a list of Azure Network Interfaces and filters the ones that are unused e.g. # no VM configuration. # Inputs: # - networkInterfaces - list of Azure NICs # Outputs: # - resourceIDs - list of Azure Virtual Machine resource IDs to be terminated in the subsequent step from nebula_sdk import Interface, Dynamic as D relay = Interface() to_terminate = [] to_keep = [] nics = relay.get(D.networkInterfaces) for nic in nics: if 'virtual_machine' in nic.keys(): to_keep.append(nic['id']) else: to_terminate.append(nic['id']) continue print('\nFound {} Network Interfaces that are used:'.format(len(to_keep))) print(*[i for i in to_keep], sep="\n") print('\nFound {} Network Interfaces that are NOT used:'.format( len(to_terminate))) print(*[i for i in to_terminate], sep="\n") print(