def lambda_handler(event, context): aws_access_key_id = common.get_access_key(event) aws_secret_access_key = common.get_secret_access_key(event) aws_session_token = common.get_session_token(event) regions = common.get_regions(event) for region in regions: regionName = region['RegionName'] try: sagemaker = boto3.client( 'sagemaker', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=regionName, aws_session_token=aws_session_token) endpoints = sagemaker.list_endpoints() for endpoint in endpoints['Endpoints']: endpointName = endpoint['EndpointName'] sagemaker.delete_endpoint(EndpointName=endpointName) instances = sagemaker.list_notebook_instances() for instance in instances['NotebookInstances']: instanceName = instance['NotebookInstanceName'] sagemaker.delete_notebook_instance( NotebookInstanceName=instanceName) except Exception as e: print(e) return event
def lambda_handler(event, context): aws_access_key_id = common.get_access_key(event) aws_secret_access_key = common.get_secret_access_key(event) aws_session_token = common.get_session_token(event) regions = common.get_regions(event) for region in regions: regionName = region['RegionName'] ec2client = boto3.client('ec2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=regionName, aws_session_token=aws_session_token) reservations = ec2client.describe_instances()['Reservations'] for reservation in reservations: instances = reservation['Instances'] for instance in instances: instanceId = instance['InstanceId'] state = instance['State']['Name'] if (state == "running") or (state == "stopped"): print('Terminating instance ' + instanceId) ec2client.terminate_instances(InstanceIds=[instanceId]) return event
def lambda_handler(event, context): aws_access_key_id = common.get_access_key(event) aws_secret_access_key = common.get_secret_access_key(event) aws_session_token = common.get_session_token(event) regions = common.get_regions(event) for region in regions: regionName = region['RegionName'] rds = boto3.client('rds', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=regionName, aws_session_token=aws_session_token) clusters = rds.describe_db_clusters()['DBClusters'] for cluster in clusters: clusterId = cluster['DBClusterIdentifier'] rds.delete_db_cluster(DBClusterIdentifier=clusterId, SkipFinalSnapshot=True) subnets = rds.describe_db_subnet_groups()["DBSubnetGroups"] for subnet in subnets: subnet_group_name = subnet["DBSubnetGroupName"] rds.delete_db_subnet_group(DBSubnetGroupName=subnet_group_name) return event
def lambda_handler(event, context): aws_access_key_id = common.get_access_key(event) aws_secret_access_key = common.get_secret_access_key(event) aws_session_token = common.get_session_token(event) sts = boto3.client('sts', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token) accountId = sts.get_caller_identity()["Account"] regions = common.get_regions(event) for region in regions: regionName = region['RegionName'] ec2client = boto3.client('ec2', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=regionName, aws_session_token=aws_session_token) volumes = ec2client.describe_volumes() for volume in volumes['Volumes']: print('Deleting EBS (' + volume['VolumeId'] + ')') ec2client.delete_volume(VolumeId=volume['VolumeId'], DryRun=False) snapshots = ec2client.describe_snapshots(OwnerIds=[accountId]) for snapshot in snapshots['Snapshots']: print('Deleting Snapshot (' + snapshot['SnapshotId'] + ')') ec2client.delete_snapshot(SnapshotId=snapshot['SnapshotId'], DryRun=False) return event
def lambda_handler(event, context): aws_access_key_id = common.get_access_key(event); aws_secret_access_key = common.get_secret_access_key(event); aws_session_token = common.get_session_token(event); iam = boto3.client('iam',aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key, aws_session_token = aws_session_token) users = iam.list_users()["Users"] for user in users: groups = iam.list_groups_for_user( UserName=user["UserName"] )["Groups"] for group in groups: iam.remove_user_from_group( GroupName=group["GroupName"], UserName=user["UserName"] ) keys = iam.list_access_keys(UserName = user["UserName"])['AccessKeyMetadata'] for key in keys: iam.delete_access_key(UserName = user["UserName"],AccessKeyId = key['AccessKeyId']) iam.delete_login_profile(UserName = user["UserName"]) iam.delete_user( UserName=user["UserName"] ) print("Deleting User (" + user["UserName"] + ")") groups = iam.list_groups()["Groups"] for group in groups: policies = iam.list_attached_group_policies( GroupName=group["GroupName"] )["AttachedPolicies"] for policy in policies: response = iam.detach_group_policy( GroupName=group["GroupName"], PolicyArn=policy["PolicyArn"]) iam.delete_group( GroupName= group["GroupName"] ) roles = iam.list_roles()["Roles"] for role in roles: role_name = role["RoleName"] if not role_name.startswith("AWSServiceRoleFor"): if (("AdminAccessRole" != role_name) and not (role_name.startswith('AWS'))): policies = iam.list_attached_role_policies( RoleName=role_name )["AttachedPolicies"] for policy in policies: policy_arn = policy["PolicyArn"] iam.detach_role_policy( RoleName=role_name, PolicyArn= policy_arn ) profiles = iam.list_instance_profiles_for_role( RoleName= role_name )["InstanceProfiles"] for profile in profiles: response = iam.remove_role_from_instance_profile( InstanceProfileName=profile['InstanceProfileName'], RoleName= role_name ) iam.delete_instance_profile( InstanceProfileName=profile['InstanceProfileName'] ) policies = iam.list_role_policies( RoleName=role_name )["PolicyNames"] for policy in policies: iam.delete_role_policy( RoleName=role_name, PolicyName=policy ) iam.delete_role( RoleName=role_name ) return event
def lambda_handler(event, context): aws_access_key_id = common.get_access_key(event) aws_secret_access_key = common.get_secret_access_key(event) aws_session_token = common.get_session_token(event) sts = boto3.client('sts', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token) accountId = sts.get_caller_identity()["Account"] regions = common.get_regions(event) for region in regions: regionName = region['RegionName'] try: sagemaker = boto3.client( 'sagemaker', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=regionName, aws_session_token=aws_session_token) print(regionName) instances = sagemaker.list_notebook_instances() for instance in instances['NotebookInstances']: instanceName = instance['NotebookInstanceName'] sagemaker.stop_notebook_instance( NotebookInstanceName=instanceName) print("Stopping (SageMaker) :" + instanceName) except Exception as e: print(e) return event
def lambda_handler(event, context): aws_access_key_id = common.get_access_key(event) aws_secret_access_key = common.get_secret_access_key(event) aws_session_token = common.get_session_token(event) regions = common.get_regions(event) for region in regions: try: regionName = region['RegionName'] client = boto3.client('athena', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=regionName, aws_session_token=aws_session_token) s3_bucket = str(uuid.uuid4()) response = client.start_query_execution( QueryString='show databases;', ResultConfiguration={ 'OutputLocation': 's3://' + s3_bucket, 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3', 'KmsKey': 'string' } }) databases_names_file = response['QueryExecutionId'] + ".txt" time.sleep(2) s3 = boto3.resource('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=regionName, aws_session_token=aws_session_token) s3.meta.client.download_file(s3_bucket, databases_names_file, "/tmp/" + databases_names_file) # Reading a file with open("/tmp/" + databases_names_file, 'r') as myfile: databases_string = myfile.read() databases = databases_string.splitlines() for database in databases: response = client.start_query_execution( QueryString='show tables', QueryExecutionContext={'Database': database}, ResultConfiguration={ 'OutputLocation': 's3://' + s3_bucket, 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3', 'KmsKey': 'string' } }) tables_names_file = response['QueryExecutionId'] + ".txt" time.sleep(2) s3.meta.client.download_file(s3_bucket, tables_names_file, "/tmp/" + tables_names_file) # Reading a file with open("/tmp/" + tables_names_file, 'r') as myfile: tables_file = myfile.read() tables = tables_file.splitlines() for table in tables: client.start_query_execution( QueryString='drop table ' + table, QueryExecutionContext={'Database': database}, ResultConfiguration={ 'OutputLocation': 's3://' + s3_bucket, 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3', 'KmsKey': 'string' } }) print('Deleting table (' + table + ')') client.start_query_execution( QueryString='drop database ' + database, ResultConfiguration={ 'OutputLocation': 's3://' + s3_bucket, 'EncryptionConfiguration': { 'EncryptionOption': 'SSE_S3', 'KmsKey': 'string' } }) print('Deleting database (' + database + ')') except EndpointConnectionError: print("Endpoint Connection Error - OK") return event