def set_concurrency(name, concurrency, preview): if concurrency: if preview: stderr('\npreview: concurrency:', concurrency) else: client('lambda').put_function_concurrency(FunctionName=name, ReservedConcurrentExecutions=concurrency) stderr('\nconcurrency:', concurrency)
def ensure_instance_profile_has_role(name, role_name, preview): stderr('\nensure instance profile has role:') profiles = [profile for page in client('iam').get_paginator('list_instance_profiles').paginate() for profile in page['InstanceProfiles'] if profile['InstanceProfileName'] == name] if 0 == len(profiles): if preview: stderr(' preview: created:', name) profile = None else: profile = client('iam').create_instance_profile(InstanceProfileName=name)['InstanceProfile'] stderr(' created:', name) elif 1 == len(profiles): if preview: stderr(' preview: exists:', name) else: stderr(' exists:', name) profile = profiles[0] else: assert False, profiles if profile: roles = [role['RoleName'] for role in profile['Roles']] if role_name not in roles: client('iam').add_role_to_instance_profile(InstanceProfileName=name, RoleName=role_name)
def ensure_trigger_api(name, arn_lambda, metadata, preview): for trigger in metadata['trigger']: if trigger.split()[0] == 'api': if preview: stderr('\npreview: ensure triggers api') else: stderr('\nensure triggers api') try: rest_api_id = aws.api.api_id(name) except AssertionError: rest_api_id = client('apigateway').create_rest_api(name=name, binaryMediaTypes=['*/*'], endpointConfiguration={'types': ['REGIONAL']})['id'] parent_id = aws.api.resource_id(rest_api_id, '/') resource_id = aws.api.resource_id(rest_api_id, '/{proxy+}') if not resource_id: resource_id = client('apigateway').create_resource(restApiId=rest_api_id, parentId=parent_id, pathPart='{proxy+}')['id'] api = client('lambda').meta.service_model.api_version uri = f"arn:aws:apigateway:{aws.region()}:lambda:path/{api}/functions/" uri += f'arn:aws:lambda:{aws.region()}:{aws.account()}:function:{name}/invocations' for id in [parent_id, resource_id]: try: client('apigateway').put_method(restApiId=rest_api_id, resourceId=id, httpMethod='ANY', authorizationType='NONE') except client('apigateway').exceptions.ConflictException: pass else: client('apigateway').put_integration(restApiId=rest_api_id, resourceId=id, httpMethod='ANY', type="AWS_PROXY", integrationHttpMethod='POST', uri=uri) client('apigateway').create_deployment(restApiId=rest_api_id, stageName=stage_name) arn = f"arn:aws:execute-api:{aws.region()}:{aws.account()}:{rest_api_id}/*/*/*" ensure_permission(name, 'apigateway.amazonaws.com', arn) break
def ensure_trigger_sqs(name, arn_lambda, metadata, preview): triggers = [] for trigger in metadata['trigger']: if trigger.split()[0] == 'sqs': kind, queue_name, *attrs = trigger.split() triggers.append([queue_name, attrs]) if triggers: stderr('\nensure triggers sqs:') for queue_name, attrs in triggers: ensure_attrs = {k: int(v) if v.isdigit() else v for a in attrs for k, v in [a.split('=')]} for k, v in trigger_sqs_attr_shortcuts.items(): if k in ensure_attrs: ensure_attrs[v] = ensure_attrs.pop(k) if 'StartingPosition' in ensure_attrs: ensure_attrs['StartingPosition'] = ensure_attrs['StartingPosition'].upper() if preview: stderr(' preview:', queue_name) else: stream_arn = aws.dynamodb.stream_arn(queue_name) try: client('lambda').create_event_source_mapping(EventSourceArn=stream_arn, FunctionName=name, Enabled=True, **ensure_attrs) stderr('', queue_name) except client('lambda').exceptions.ResourceConflictException as e: *_, kind, uuid = e.args[0].split() resp = client('lambda').get_event_source_mapping(UUID=uuid) for k, v in ensure_attrs.items(): if k != 'StartingPosition': assert resp[k] == v, [resp[k], v] stderr('', queue_name)
def ensure_infra_log_group(name, preview): name = f'/aws/lambda/{name}' stderr('ensure infra logs:') try: if preview: stderr(' preview:', name) else: client('logs').create_log_group(logGroupName=name) stderr('', name) except client('logs').exceptions.ResourceAlreadyExistsException: stderr('', name)
def ensure_permission(name, principal, arn): not_found = client('lambda').exceptions.ResourceNotFoundException try: res = json.loads(retry(client('lambda').get_policy, not_found)(FunctionName=name)['Policy']) except not_found: statements = [] else: statements = [x['Sid'] for x in res['Statement']] id = principal.replace('.', '-') if id not in statements: client('lambda').add_permission(FunctionName=name, StatementId=id, Action='lambda:InvokeFunction', Principal=principal, SourceArn=arn)
def rm_table(name, print_fn=stderr): in_use = client('dynamodb').exceptions.ResourceInUseException not_found = client('dynamodb').exceptions.ResourceNotFoundException try: retry(client('dynamodb').delete_table, in_use, not_found)(TableName=name) except in_use as e: assert str(e).endswith(f'Table is being deleted: {name}'), e except not_found: pass else: print_fn('dynamodb deleted:', name)
def ensure_key_allows_role(arn_key, arn_role, preview): if not preview: resp = aws.client('kms').get_key_policy(KeyId=arn_key, PolicyName='default') policy = json.loads(resp['Policy']) # ensure that every Statement.Principal.AWS is a list, it can be either a # string or a list of strings. for statement in policy['Statement']: if statement.get('Principal', {}).get('AWS'): if isinstance(statement['Principal']['AWS'], str): statement['Principal']['AWS'] = [ statement['Principal']['AWS'] ] # remove invalid principals from all statements, these are caused by the # deletion of an iam role referenced by this policy, which transforms the # principal from something like "arn:..." to "AIEKFJ...". for statement in policy['Statement']: if statement.get('Principal', {}).get('AWS'): for arn in statement['Principal']['AWS'].copy(): if not arn.startswith('arn:'): statement['Principal']['AWS'].remove(arn) # ensure that the "allow use of key" Statement contains our role's arn for statement in policy['Statement']: if statement['Sid'] == 'Allow use of the key': if arn_role not in statement['Principal']['AWS']: statement['Principal']['AWS'].append(arn_role) break # if an "allow use of key" Statement didn't exist, create it else: policy['Statement'].append({ "Sid": "Allow use of the key", "Effect": "Allow", "Principal": { "AWS": [arn_role] }, "Action": [ "kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey" ], "Resource": "*" }) try: retry(aws.client('kms').put_key_policy, silent=True)(KeyId=arn_key, Policy=json.dumps(policy), PolicyName='default') except aws.client('kms').exceptions.MalformedPolicyDocumentException: stderr(f'fatal: failed to put to key: {arn_key}, policy:\n' + json.dumps(policy, indent=2)) raise
def ensure_key(name, arn_user, arn_role, preview): stderr('\nensure kms key:') if preview: stderr(' preview: kms:', name) else: keys = [ x for x in all_keys() if x['AliasArn'].endswith(f':alias/lambda/{name}') ] if 0 == len(keys): arn_root = ':'.join(arn_user.split(':')[:-1]) + ':root' policy = """ {"Version": "2012-10-17", "Statement": [{"Sid": "Enable IAM User Permissions", "Effect": "Allow", "Principal": {"AWS": ["%(arn_user)s", "%(arn_root)s"]}, "Action": "kms:*", "Resource": "*"}, {"Sid": "Allow use of the key", "Effect": "Allow", "Principal": {"AWS": ["%(arn_user)s", "%(arn_role)s", "%(arn_root)s"]}, "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], "Resource": "*"}, {"Sid": "Allow attachment of persistent resources", "Effect": "Allow", "Principal": {"AWS": ["%(arn_user)s", "%(arn_role)s", "%(arn_root)s"]}, "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], "Resource": "*", "Condition": {"Bool": {"kms:GrantIsForAWSResource": true}}}]} """ % { 'arn_role': arn_role, 'arn_user': arn_user, 'arn_root': arn_root } _key_id = retry(aws.client('kms').create_key, silent=True)( Policy=policy, Description=name)['KeyMetadata']['KeyId'] aws.client('kms').create_alias(AliasName=f'alias/lambda/{name}', TargetKeyId=_key_id) keys = [ x for x in all_keys() if x['AliasArn'].endswith(f':alias/lambda/{name}') ] assert len(keys) == 1 stderr('', keys[0]['AliasArn']) return key_id(keys[0]) elif 1 == len(keys): stderr('', keys[0]['AliasArn']) return key_id(keys[0]) else: stderr('fatal: found more than 1 key for:', name, '\n' + '\n'.join(keys)) sys.exit(1)
def ensure_allows(name, allows, preview): if allows: stderr('\nensure allows:') for allow in allows: action, resource = allow.split() if preview: stderr(' preview:', allow) else: stderr('', allow) policy = f'''{{"Version": "2012-10-17", "Statement": [{{"Effect": "Allow", "Action": "{action}", "Resource": "{resource}"}}]}}''' client('iam').put_role_policy(RoleName=name, PolicyName=_policy_name(allow), PolicyDocument=policy)
def apis(name=None): for page in client('apigateway').get_paginator('get_rest_apis').paginate(): for item in page['items']: if not name or item['name'] == name: yield item['name'], item['id'], ','.join( item['endpointConfiguration'] ['types']), item['createdDate']
def ensure_bucket(name, acl='private', versioning=False, noencrypt=False, print_fn=stderr, preview=False): if preview: print_fn(' preview:', name) else: try: client('s3').create_bucket( ACL=acl, Bucket=name, CreateBucketConfiguration={'LocationConstraint': aws.region()}, ) except client('s3').exceptions.BucketAlreadyOwnedByYou: print_fn('', name) else: print_fn('', name) if acl == 'private': client('s3').put_public_access_block( Bucket=name, PublicAccessBlockConfiguration={ 'BlockPublicAcls': True, 'IgnorePublicAcls': True, 'BlockPublicPolicy': True, 'RestrictPublicBuckets': True }, ) if versioning: resource('s3').BucketVersioning(name).enable() else: resource('s3').BucketVersioning(name).suspend() if noencrypt: client('s3').delete_bucket_encryption(Bucket=name) else: client('s3').put_bucket_encryption( Bucket=name, ServerSideEncryptionConfiguration={ 'Rules': [{ 'ApplyServerSideEncryptionByDefault': { 'SSEAlgorithm': 'AES256' } }] }, )
def most_recent_streams(group_name, max_age_seconds=60 * 60 * 24): streams = [] for stream in aws.client('logs').describe_log_streams( logGroupName=group_name, orderBy='LastEventTime', descending=True)['logStreams']: if time.time() - (stream['lastEventTimestamp'] // 1000) < max_age_seconds: streams.append(stream['logStreamName']) return streams
def rm_extra_allows(name, allows, preview): to_remove = [] try: role_policies = [policy for page in client('iam').get_paginator('list_role_policies').paginate(RoleName=name) for policy in page['PolicyNames']] except client('iam').exceptions.NoSuchEntityException: pass else: for policy in role_policies: if policy not in [_policy_name(x) for x in allows]: to_remove.append(policy) if to_remove: stderr('\nremove extra allows:') for policy in to_remove: if preview: stderr(' preview:', policy) else: stderr('', policy) client('iam').delete_role_policy(RoleName=name, PolicyName=policy)
def rm_extra_policies(name, policies, preview): to_remove = [] try: attached_role_policies = [policy for page in client('iam').get_paginator('list_attached_role_policies').paginate(RoleName=name) for policy in page['AttachedPolicies']] except client('iam').exceptions.NoSuchEntityException: pass else: for policy in attached_role_policies: if policy['PolicyName'] not in policies: to_remove.append(policy) if to_remove: stderr('\nremove extra policies:') for policy in to_remove: if preview: stderr(' preview:', policy['PolicyName']) else: stderr('', policy['PolicyName']) client('iam').detach_role_policy(RoleName=name, PolicyArn=policy["PolicyArn"])
def ensure_infra_sns(snss, preview): not_found = client('sns').exceptions.NotFoundException if snss: stderr('\nensure infra sns:') for sns in snss: name, *attrs = sns.split() if preview: stderr(' preview:', name) else: attrs = {k: int(v) if v.isdigit() else v for attr in attrs for k, v in [attr.split('=')]} try: sns_attrs = client('sns').get_topic_attributes(TopicArn=aws.sns.arn(name)) except not_found: client('sns').create_topic(Name=sns) stderr('', name) else: for k, v in attrs.items(): assert sns_attrs[k] == v, f'sns attr mismatch {k} {v} != {sns_attrs[k]}' stderr('', name)
def ensure_role(name, principal, preview): stderr('\nensure role:') if preview: stderr(' preview:', name) else: role_path = f'/{principal}/{name}-path/' roles = [role for page in client('iam').get_paginator('list_roles').paginate(PathPrefix=role_path) for role in page['Roles']] if 0 == len(roles): stderr('', name) policy = '''{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Principal": {"Service": "%s.amazonaws.com"}, "Action": "sts:AssumeRole"}]}''' % principal client('iam').create_role(Path=role_path, RoleName=name, AssumeRolePolicyDocument=policy) elif 1 == len(roles): stderr('', name) else: stderr(' error: there is more than 1 role under path:', role_path) for role in roles: stderr('', role) sys.exit(1)
def ensure_infra_sqs(sqss, preview): assert False, 'use dotted dict and move to aws' not_found = client('sqs').exceptions.QueueDoesNotExist if sqss: stderr('\nensure infra sqs:') for sqs in sqss: name, *attrs = sqs.split() if preview: stderr(' preview:', name) else: attrs = {k: int(v) if v.isdigit() else v for attr in attrs for k, v in [attr.split('=')]} try: queue_url = client('sqs').get_queue_url(QueueName=sqs) except not_found: client('sqs').create_queue(QueueName=sqs, Attributes=attrs) stderr('', name) else: queue_attrs = client('sqs').get_queue_attributes(QueueUrl=queue_url)['Attributes'] for k, v in attrs.items(): assert queue_attrs[k] == v, f'sqs attr mismatch {k} {v} != {queue_attrs[k]}' stderr('', name)
def ensure_trigger_cloudwatch(name, arn_lambda, metadata, preview): triggers = [] for trigger in metadata['trigger']: if trigger.split()[0] == 'cloudwatch': kind, schedule = trigger.split(None, 1) triggers.append(schedule) if triggers: stderr('\nensure triggers cloudwatch:') assert len(triggers) == 1, f'only 1 cloudwatch schedule is currently supported: {triggers}' for schedule in triggers: if preview: stderr(' preview:', schedule) else: arn_rule = client('events').put_rule(Name=name, ScheduleExpression=schedule)['RuleArn'] ensure_permission(name, 'events.amazonaws.com', arn_rule) targets = retry(client('events').list_targets_by_rule)(Rule=name)['Targets'] assert all(t['Arn'] == arn_lambda for t in targets), f'there are unknown targets in cloudwatch rule: {name}' if len(targets) == 0: stderr('', schedule) client('events').put_targets(Rule=name, Targets=[{'Id': '1', 'Arn': arn_lambda}]) elif len(targets) == 1: assert targets[0]['Arn'] == arn_lambda, f'cloudwatch target mismatch: {arn_lambda} {targets[0]}' stderr('', schedule) elif len(targets) > 1: stderr(' removing:', schedule) targets = sorted(targets, key=lambda x: x['Id']) client('events').remove_targets(Rule=name, Ids=[t['Id'] for t in targets[1:]]) def ensure_only_one_target(): targets = client('events').list_targets_by_rule(Rule=name)['Targets'] assert len(targets) == 1, f'more than one target found for cloudwatch rule: {name} {schedule} {targets}' retry(ensure_only_one_target)()
def ensure_trigger_sns(name, arn_lambda, metadata, preview): triggers = [] for trigger in metadata['trigger']: if trigger.split()[0] == 'sns': kind, sns_name, *_ = trigger.split() triggers.append(sns_name) if triggers: stderr('\nensure triggers sns:') for sns in triggers: if preview: stderr(' preview:', sns_name) else: arn_sns = aws.sns.arn(sns_name) subs = (sub for page in client('sns').get_paginator('list_subscriptions_by_topic').paginate(TopicArn=arn_sns) for sub in page['Subscriptions']) for sub in subs: if sub['Endpoint'] == arn_lambda: stderr('', sns_name) break else: client('sns').subscribe(TopicArn=arn_sns, Protocol='lambda', Endpoint=arn_lambda) ensure_permission(name, 'sns.amazonaws.com', arn_sns) stderr('', sns_name)
def ensure_policies(name, policies, preview): if policies: stderr('\nensure policies:') if preview: all_policies = [] else: all_policies = [policy for page in client('iam').get_paginator('list_policies').paginate() for policy in page['Policies']] for policy in policies: if preview: stderr(' preview:', policy) else: matched_polices = [x for x in all_policies if x['Arn'].split('/')[-1] == policy] if 0 == len(matched_polices): stderr('fatal: didnt find any policy:', policy) sys.exit(1) elif 1 == len(matched_polices): client('iam').attach_role_policy(RoleName=name, PolicyArn=matched_polices[0]["Arn"]) stderr('', policy) else: stderr('fatal: found more than 1 policy:', policy) for p in matched_polices: stderr(p['Arn']) sys.exit(1)
def rm_instance_profile(name): try: client('iam').get_instance_profile(InstanceProfileName=name) except client('iam').exceptions.NoSuchEntityException: return else: for role in client('iam').get_instance_profile(InstanceProfileName=name)['InstanceProfile']['Roles']: rm_role(role['RoleName']) client('iam').delete_instance_profile(InstanceProfileName=name) stderr(' deleted instance profile:', name)
def main(): try: ec2 = aws.client('ec2') hostname, instance_id = start_instance(ec2) wait_ssh(hostname, user=ssh_user) connect(hostname) run(['sudo', 'pacman', '--noconfirm', '-Syu']) run(['sudo', 'pacman', '--noconfirm', '-Sy', 'base-devel', 'git', 'noto-fonts', 'python-pip', 'subversion']) run(['sudo', 'pip', 'install', 'awscli']) run(['sudo', 'reboot'], allow_error=True) wait_ssh(hostname, user=ssh_user) run(["svn", "export", ("https://github.com/" + os.path.join(REPO_OWNER, REPO_NAME, "tags", REPO_TAG)), WORKING_COPY]) run(['makepkg', '-s', '--noconfirm'], cwd="/home/arch/ungoogled-chromium-archlinux") run(['/bin/sh', '-c', 'aws s3 cp --no-progress \ ungoogled-chromium-*.pkg.tar.zst s3://ethant-build-scratch/'], cwd="/home/arch/ungoogled-chromium-archlinux") finally: ec2.terminate_instances(InstanceIds=[instance_id])
def encrypt(key_id, text): text = bytes(text, 'utf-8') text = aws.client('kms').encrypt(KeyId=key_id, Plaintext=text)['CiphertextBlob'] return base64.b64encode(text).decode('utf-8')
def all_keys(): return [ alias for page in aws.client('kms').get_paginator('list_aliases').paginate() for alias in page['Aliases'] ]
def resource_id(rest_api_id, path): for page in client('apigateway').get_paginator('get_resources').paginate( restApiId=rest_api_id): for item in page['items']: if item['path'] == path: return item['id']
def stream_arn(name): not_found = client('dynamodb').exceptions.ResourceNotFoundException return retry(client('dynamodb').describe_table, not_found)(TableName=name)['Table']['LatestStreamArn']
def tail(group_name, follow=False, timestamps=False, exit_after=None): stderr('group:', group_name) if follow: tokens = {} limit = 3 # when starting to follow, dont page all history, just grab the last few entries and then start following while True: try: stream_names = most_recent_streams(group_name) except (IndexError, aws.client('logs').exceptions.ResourceNotFoundException): pass else: for stream_name in stream_names: kw: Dict[str, Any] = {} token = tokens.get(stream_name) if token: kw['nextToken'] = token if limit != 0: kw['limit'] = limit limit = 0 resp = aws.client('logs').get_log_events( logGroupName=group_name, logStreamName=stream_name, **kw) if resp['events']: tokens[stream_name] = resp['nextForwardToken'] for log in resp['events']: if log['message'].split()[0] not in [ 'START', 'END', 'REPORT' ]: if timestamps: print(datetime.datetime.fromtimestamp( log['timestamp'] / 1000), log['message'].replace('\t', ' ').strip(), flush=True) else: print(log['message'].replace('\t', ' ').strip(), flush=True) if exit_after and exit_after in log['message']: sys.exit(0) time.sleep(1) else: try: stream_names = most_recent_streams(group_name) except IndexError: stderr('no logs available') sys.exit(1) else: for stream_name in stream_names: stderr('group:', group_name, 'stream:', stream_name) logs = aws.client('logs').get_log_events( logGroupName=group_name, logStreamName=stream_name)['events'] for log in logs: if log['message'].split()[0] not in [ 'START', 'END', 'REPORT' ]: if timestamps: print(datetime.datetime.fromtimestamp( log['timestamp'] / 1000), log['message'].replace('\t', ' ').strip(), flush=True) else: print(log['message'].replace('\t', ' ').strip(), flush=True)
def ensure_table(name, *attrs, preview=False, yes=False, print_fn=stderr): # grab some exception shortcuts table_exists = client('dynamodb').exceptions.ResourceInUseException not_found = client('dynamodb').exceptions.ResourceNotFoundException client_error = client('dynamodb').exceptions.ClientError # start ensure_attrs with columns columns = [attr for attr in attrs if '=' not in attr] ensure_attrs = dicts.to_dotted({ 'AttributeDefinitions': [{ 'AttributeName': attr_name, 'AttributeType': attr_type.upper() } for column in columns for attr_name, attr_type, _ in [column.split(':')]], 'KeySchema': [{ 'AttributeName': attr_name, 'KeyType': key_type.upper() } for column in columns for attr_name, _, key_type in [column.split(':')]] }) # update ensure_attrs with the rest of the passed attributes ensure_attrs.update({ k: int(v) if v.isdigit() else v for attr in attrs if '=' in attr for k, v in [attr.split('=')] }) # resolve any attribute shortcuts for k, v in table_attr_shortcuts.items(): if k in ensure_attrs: ensure_attrs[v] = ensure_attrs.pop(k) # allow lower case for stream view type if 'StreamSpecification.StreamViewType' in ensure_attrs: ensure_attrs['StreamSpecification.StreamEnabled'] = True ensure_attrs['StreamSpecification.StreamViewType'] = ensure_attrs[ 'StreamSpecification.StreamViewType'].upper() # check provisioning and set billing type read = ensure_attrs.get('ProvisionedThroughput.ReadCapacityUnits') write = ensure_attrs.get('ProvisionedThroughput.WriteCapacityUnits') assert (not read and not write) or ( read and write ), 'both read and write must be provisioned, or neither for on-demand' ensure_attrs['BillingMode'] = 'PROVISIONED' if read else 'PAY_PER_REQUEST' # print and prompt print_fn() print_fn('TableName:', name) for k, v in ensure_attrs.items(): print_fn(f' {k}: {v}') print_fn() # fetch existing table attrs try: existing_attrs = dicts.to_dotted( client('dynamodb').describe_table(TableName=name)['Table']) # create table except not_found: # create if preview: print_fn(' preview: created:', name) else: if not yes: print_fn('\nproceed? y/n ') assert sh.getch() == 'y' retry(client('dynamodb').create_table, table_exists, client_error)(TableName=name, **dicts.from_dotted(ensure_attrs)) print_fn(' created:', name) # check and maybe update existing table else: if preview: print_fn(' preview: exists:', name) else: print_fn(' exists:', name) # join tags into existing attributes existing_attrs.update( dicts.to_dotted({ 'Tags': [ tag for page in retry( client('dynamodb').get_paginator( 'list_tags_of_resource').paginate)( ResourceArn=arn(name)) for tag in page['Tags'] ] })) # remap existing attributes to the same schema as table attributes existing_attrs = { k.replace('BillingModeSummary.', ''): v for k, v in existing_attrs.items() } # check every attribute needs_update = False for k, v in ensure_attrs.items(): if v != existing_attrs.get(k): needs_update = True if preview: print_fn(f' preview: {k}: {existing_attrs.get(k)} -> {v}') else: print_fn(f' {k}: {existing_attrs.get(k)} -> {v}') assert k.split( '.' )[0] != 'KeySchema', 'KeySchema cannot be updated on existing tables' # collect tags to remove tags_to_remove = [] for tag in dicts.from_dotted(existing_attrs).get('Tags', []): if tag['Key'] not in [ t['Key'] for t in dicts.from_dotted(ensure_attrs).get('Tags', []) ]: tags_to_remove.append(tag['Key']) if preview: print_fn(f' preview: untag: {tag["Key"]}') else: print_fn(f' untag: {tag["Key"]}') if not preview: # prompt if updates if (needs_update or tags_to_remove) and not yes: print_fn('\nproceed? y/n ') assert sh.getch() == 'y' # update if needed if needs_update: # update tags ensure_attrs = dicts.from_dotted(ensure_attrs) if 'Tags' in ensure_attrs: for tag in ensure_attrs['Tags']: tag['Value'] = str(tag['Value']) client('dynamodb').tag_resource(ResourceArn=arn(name), Tags=ensure_attrs['Tags']) del ensure_attrs['Tags'] # update table. note: KeySchema cannot be updated del ensure_attrs['KeySchema'] client('dynamodb').update_table(TableName=name, **ensure_attrs) # untag if needed if tags_to_remove: # remove unused tags if tags_to_remove: client('dynamodb').untag_resource(ResourceArn=arn(name), TagKeys=tags_to_remove)
def rm_bucket(name, print_fn=stderr): try: for page in client('s3').get_paginator('list_objects_v2').paginate( Bucket=name): keys = [key['Key'] for key in page.get('Contents', [])] if keys: client('s3').delete_objects( Bucket=name, Delete={'Objects': [{ 'Key': key } for key in keys]}) for key in keys: print_fn(f'deleted object: s3://{name}/{key}') for page in client('s3').get_paginator( 'list_object_versions').paginate(Bucket=name): keys = page.get('Versions', []) if keys: client('s3').delete_objects(Bucket=name, Delete={ 'Objects': [{ 'Key': key['Key'], 'VersionId': key['VersionId'] } for key in keys] }) for key in keys: print_fn( f'deleted version: s3://{name}/{key["Key"]} {key["VersionId"]}' ) keys = page.get('DeleteMarkers', []) if keys: client('s3').delete_objects(Bucket=name, Delete={ 'Objects': [{ 'Key': key['Key'], 'VersionId': key['VersionId'] } for key in keys] }) for key in keys: print_fn( f'deleted version: s3://{name}/{key["Key"]} {key["VersionId"]}' ) client('s3').delete_bucket(Bucket=name) print_fn(f'deleted bucket: s3://{name}') except client('s3').exceptions.NoSuchBucket: pass