def run_terminate_cron_lambda(name, settings): function_name = settings['NAME'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('unlink event and lambda') cmd = [ 'events', 'remove-targets', '--rule', function_name + 'CronRule', '--ids', '["1"]' ] aws_cli.run(cmd, ignore_error=True) print_message('remove event permission') cmd = [ 'lambda', 'remove-permission', '--function-name', function_name, '--statement-id', function_name + 'StatementId' ] aws_cli.run(cmd, ignore_error=True) print_message('delete cron event') cmd = ['events', 'delete-rule', '--name', function_name + 'CronRule'] aws_cli.run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True)
def _build(deploy_folder, function_name, phase, settings, template_name, template_path): git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] git_hash_template = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_path).communicate()[0] print_session('packaging lambda: %s' % function_name) print_message('cleanup generated files') subprocess.Popen(['git', 'clean', '-d', '-f', '-x'], cwd=deploy_folder).communicate() requirements_path = '%s/requirements.txt' % deploy_folder if os.path.exists(requirements_path): print_message('install dependencies') cmd = ['pip3', 'install', '-r', requirements_path, '-t', deploy_folder] subprocess.Popen(cmd).communicate() settings_path = '%s/settings_local_sample.py' % deploy_folder if os.path.exists(settings_path): print_message('create environment values') lines = read_file(settings_path) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/settings_local.py' % deploy_folder, lines) print_message('zip files') cmd = ['zip', '-r', 'deploy.zip', '.'] subprocess.Popen(cmd, cwd=deploy_folder).communicate() return [ 'git_hash_johanna=%s' % git_hash_johanna.decode('utf-8').strip(), 'git_hash_%s=%s' % (template_name, git_hash_template.decode('utf-8').strip()) ]
def run_create_default_lambda(name, settings): description = settings['DESCRIPTION'] function_name = settings['NAME'] phase = env['common']['PHASE'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('create lambda: %s' % function_name) gitignore_path = '%s/.gitignore' % deploy_folder if os.path.exists(gitignore_path): ll = read_file(gitignore_path) print_message('cleanup generated files') subprocess.Popen(' '.join(['rm', '-rf'] + ll), shell=True, cwd=deploy_folder).communicate() print_message('install dependencies') requirements_path = '%s/requirements.txt' % deploy_folder if os.path.exists(requirements_path): print_message('install dependencies') cmd = ['pip', 'install', '-r', requirements_path, '-t', deploy_folder] subprocess.Popen(cmd).communicate() settings_path = '%s/settings_local_sample.py' % deploy_folder if os.path.exists(settings_path): print_message('create environment values') lines = read_file(settings_path) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/settings_local.py' % deploy_folder, lines) print_message('zip files') cmd = ['zip', '-r', 'deploy.zip', '.'] subprocess.Popen(cmd, cwd=deploy_folder).communicate() print_message('create lambda function') role_arn = aws_cli.get_role_arn('aws-lambda-default-role') cmd = [ 'lambda', 'create-function', '--function-name', function_name, '--description', description, '--zip-file', 'fileb://deploy.zip', '--role', role_arn, '--handler', 'lambda.handler', '--runtime', 'python3.6', '--timeout', '120' ] aws_cli.run(cmd, cwd=deploy_folder)
def run_terminate_acm_certificate(arn): print_session('terminate acm certificate') aws_cli = AWSCli() cmd = ['acm', 'delete-certificate'] cmd += ['--certificate-arn', arn] aws_cli.run(cmd)
def _manual_backup(): aws_cli = AWSCli() ################################################################################ print_session('dump mysql data') engine = env['rds']['ENGINE'] if engine not in ('mysql', 'aurora'): print('not supported:', engine) raise Exception() ################################################################################ print_message('get database address') if env['common']['PHASE'] != 'dv': host = aws_cli.get_rds_address(read_replica=True) else: while True: answer = input('Do you use a database of Vagrant VM? (yes/no): ') if answer.lower() == 'no': host = aws_cli.get_rds_address(read_replica=True) break if answer.lower() == 'yes': host = 'dv-database.hbsmith.io' break database = env['rds']['DATABASE'] password = env['rds']['USER_PASSWORD'] user = env['rds']['USER_NAME'] template_name = env['template']['NAME'] filename_path = 'template/%s/rds/mysql_data.sql' % template_name _mysql_dump(host, user, password, database, filename_path)
def _manual_backup(): aws_cli = AWSCli() ################################################################################ print_session('dump mysql data') engine = env['rds']['ENGINE'] if engine not in ('mysql', 'aurora'): print('not supported:', engine) raise Exception() ################################################################################ print_message('get database address') if env['common']['PHASE'] != 'dv': host = aws_cli.get_rds_address(read_replica=True) else: while True: answer = input('Do you use a database of Vagrant VM? (yes/no): ') if answer.lower() == 'no': host = aws_cli.get_rds_address(read_replica=True) break if answer.lower() == 'yes': host = 'dv-database.hbsmith.io' break database = env['rds']['DATABASE'] password = env['rds']['USER_PASSWORD'] user = env['rds']['USER_NAME'] template_name = env['template']['NAME'] filename_path = 'template/%s/rds/mysql_data.sql' % template_name _mysql_dump(host, user, password, database, filename_path)
def _update(aws_cli, deploy_folder, function_name, build_info, queue_arn): print_session('update lambda: %s' % function_name) cmd = ['lambda', 'update-function-code', '--function-name', function_name, '--zip-file', 'fileb://deploy.zip'] result = aws_cli.run(cmd, cwd=deploy_folder) function_arn = result['FunctionArn'] print_message('update lambda tags') cmd = ['lambda', 'tag-resource', '--resource', function_arn, '--tags', ','.join(build_info)] aws_cli.run(cmd, cwd=deploy_folder) 'aws lambda --function-name sachiel_send_email' print_message('update sqs event source for %s' % function_name) cmd = ['lambda', 'list-event-source-mappings', '--function-name', function_name] mappings = aws_cli.run(cmd)['EventSourceMappings'] for mapping in mappings: cmd = ['lambda', 'delete-event-source-mapping', '--uuid', mapping['UUID']] aws_cli.run(cmd) print_message('wait two minutes until deletion is complete') time.sleep(120) cmd = ['lambda', 'create-event-source-mapping', '--event-source-arn', queue_arn, '--function-name', function_name] aws_cli.run(cmd)
def _create(aws_cli, deploy_folder, description, function_name, build_info, queue_arn): print_session('create lambda: %s' % function_name) role_arn = aws_cli.get_role_arn('aws-lambda-sqs-role') cmd = ['lambda', 'create-function', '--function-name', function_name, '--description', description, '--zip-file', 'fileb://deploy.zip', '--role', role_arn, '--handler', 'lambda.handler', '--runtime', 'python3.6', '--tags', ','.join(build_info), '--timeout', '120'] aws_cli.run(cmd, cwd=deploy_folder) print_message('give event permission') cmd = ['lambda', 'add-permission', '--function-name', function_name, '--statement-id', function_name + 'StatementId', '--action', 'lambda:InvokeFunction', '--principal', 'events.amazonaws.com', '--source-arn', queue_arn] aws_cli.run(cmd) print_message('create sqs event source for %s' % function_name) cmd = ['lambda', 'create-event-source-mapping', '--event-source-arn', queue_arn, '--function-name', function_name] aws_cli.run(cmd)
def run_terminate_default_lambda(function_name, settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, ignore_error=True)
def find_host_zone_id(host_zone_name): print_session('find_host_zone_id') aws_cli = AWSCli() cmd = ['route53', 'list-hosted-zones-by-name'] cmd += ['--dns-name', host_zone_name] rr = aws_cli.run(cmd) if not (len(rr['HostedZones']) == 1): raise Exception('wrong host zone') return rr['HostedZones'][0]['Id']
def run_create_acm_certificate(domain_name, additional_names, validation_method): print_session('create acm certificate') aws_cli = AWSCli() cmd = ['acm', 'request-certificate'] cmd += ['--domain-name', domain_name] if additional_names: cmd += ['--subject-alternative-names', ' '.join(additional_names)] cmd += ['--validation-method', validation_method] aws_cli.run(cmd)
def run_terminate_s3_script_bucket_lifecycle(name): aws_cli = AWSCli() bucket_name = args.bucket_name ################################################################################ print_session('terminate %s' % name) ################################################################################ print_message('delete life cycle') cmd = ['s3api', 'delete-bucket-lifecycle', '--bucket', bucket_name] aws_cli.run(cmd, ignore_error=True)
def run_terminate_cron_lambda(name, settings): function_name = settings['NAME'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('unlink event and lambda') cmd = [ 'events', 'list-targets-by-rule', '--rule', function_name + 'CronRule' ] result = aws_cli.run(cmd, ignore_error=True) if type(result) is dict: target_list = result['Targets'] else: target_list = list() ids_list = [] for target in target_list: target_id = '"%s"' % target['Id'] ids_list.append(target_id) ids_list = '[%s]' % ','.join(ids_list) cmd = [ 'events', 'remove-targets', '--rule', function_name + 'CronRule', '--ids', ids_list ] aws_cli.run(cmd, ignore_error=True) print_message('remove event permission') cmd = [ 'lambda', 'remove-permission', '--function-name', function_name, '--statement-id', function_name + 'StatementId' ] aws_cli.run(cmd, ignore_error=True) print_message('delete cron event') cmd = ['events', 'delete-rule', '--name', function_name + 'CronRule'] aws_cli.run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True)
def run_terminate_sns_lambda(name, settings): function_name = settings['NAME'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('terminate lambda: %s' % function_name) cmd = ['lambda', 'get-policy', '--function-name', function_name] result = aws_cli.run(cmd, ignore_error=True) if result: policy = result['Policy'] policy = json.loads(policy) statement_list = policy['Statement'] for statement in statement_list: print_message('remove subscription') arn_like = statement['Condition']['ArnLike'] source_arn = arn_like['AWS:SourceArn'] sns_region = source_arn.split(':')[3] cmd = [ 'sns', 'list-subscriptions-by-topic', '--topic-arn', source_arn ] result = AWSCli(sns_region).run(cmd, ignore_error=True) if not result: continue subscription_list = result['Subscriptions'] for subscription in subscription_list: if subscription['Protocol'] != 'lambda': continue subscription_arn = subscription['SubscriptionArn'] cmd = [ 'sns', 'unsubscribe', '--subscription-arn', subscription_arn ] AWSCli(sns_region).run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True)
def run_terminate_default_lambda(name, settings): function_name = settings['NAME'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True)
def run_terminate_sns_lambda(name, settings): function_name = settings['NAME'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('terminate lambda: %s' % function_name) cmd = ['lambda', 'get-policy', '--function-name', function_name] result = aws_cli.run(cmd, ignore_error=True) if result: policy = result['Policy'] policy = json.loads(policy) statement_list = policy['Statement'] for statement in statement_list: print_message('remove subscription') arn_like = statement['Condition']['ArnLike'] source_arn = arn_like['AWS:SourceArn'] sns_region = source_arn.split(':')[3] cmd = ['sns', 'list-subscriptions-by-topic', '--topic-arn', source_arn] result = AWSCli(sns_region).run(cmd, ignore_error=True) if not result: continue subscription_list = result['Subscriptions'] for subscription in subscription_list: if subscription['Protocol'] != 'lambda': continue subscription_arn = subscription['SubscriptionArn'] cmd = ['sns', 'unsubscribe', '--subscription-arn', subscription_arn] AWSCli(sns_region).run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True)
def run_terminate_cron_lambda(name, settings): function_name = settings['NAME'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('unlink event and lambda') cmd = ['events', 'list-targets-by-rule', '--rule', function_name + 'CronRule'] result = aws_cli.run(cmd, ignore_error=True) if type(result) is dict: target_list = result['Targets'] else: target_list = list() ids_list = [] for target in target_list: target_id = '"%s"' % target['Id'] ids_list.append(target_id) ids_list = '[%s]' % ','.join(ids_list) cmd = ['events', 'remove-targets', '--rule', function_name + 'CronRule', '--ids', ids_list] aws_cli.run(cmd, ignore_error=True) print_message('remove event permission') cmd = ['lambda', 'remove-permission', '--function-name', function_name, '--statement-id', function_name + 'StatementId'] aws_cli.run(cmd, ignore_error=True) print_message('delete cron event') cmd = ['events', 'delete-rule', '--name', function_name + 'CronRule'] aws_cli.run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True)
def run_terminate_default_lambda(name, settings): function_name = settings['NAME'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True)
def run_terminate_sns_lambda(function_name, settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) ################################################################################ print_session(f'terminate lambda: {function_name}') cmd = ['lambda', 'get-policy', '--function-name', function_name] result = aws_cli.run(cmd, ignore_error=True) if result: policy = result['Policy'] policy = json.loads(policy) statement_list = policy['Statement'] for statement in statement_list: print_message('remove subscription') arn_like = statement['Condition']['ArnLike'] source_arn = arn_like['AWS:SourceArn'] sns_region = source_arn.split(':')[3] cmd = [ 'sns', 'list-subscriptions-by-topic', '--topic-arn', source_arn ] result = AWSCli(sns_region).run(cmd, ignore_error=True) if not result: continue subscription_list = result['Subscriptions'] for subscription in subscription_list: if subscription['Protocol'] != 'lambda': continue subscription_arn = subscription['SubscriptionArn'] cmd = [ 'sns', 'unsubscribe', '--subscription-arn', subscription_arn ] AWSCli(sns_region).run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, ignore_error=True)
def run_terminate_cron_lambda(function_name, settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('unlink event and lambda') cmd = [ 'events', 'list-targets-by-rule', '--rule', function_name + 'CronRule' ] result = aws_cli.run(cmd, ignore_error=True) if type(result) is dict: target_list = result['Targets'] else: target_list = list() ids_list = [] for target in target_list: target_id = '"%s"' % target['Id'] ids_list.append(target_id) ids_list = '[%s]' % ','.join(ids_list) cmd = [ 'events', 'remove-targets', '--rule', function_name + 'CronRule', '--ids', ids_list ] aws_cli.run(cmd, ignore_error=True) print_message('remove event permission') cmd = [ 'lambda', 'remove-permission', '--function-name', function_name, '--statement-id', function_name + 'StatementId' ] aws_cli.run(cmd, ignore_error=True) print_message('delete cron event') cmd = ['events', 'delete-rule', '--name', function_name + 'CronRule'] aws_cli.run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, ignore_error=True)
def run_terminate_sqs_lambda(function_name, settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('delete event sources for %s' % function_name) cmd = [ 'lambda', 'list-event-source-mappings', '--function-name', function_name ] mappings = aws_cli.run(cmd)['EventSourceMappings'] for mapping in mappings: cmd = [ 'lambda', 'delete-event-source-mapping', '--uuid', mapping['UUID'] ] aws_cli.run(cmd) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, ignore_error=True)
def run_terminate_event_lambda(function_name, settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('unlink event and lambda') cmd = [ 'events', 'remove-targets', '--rule', settings['EVENT_NAME'], '--ids', settings['EVENT_NAME'] ] aws_cli.run(cmd, ignore_error=True) cmd = ['events', 'delete-rule', '--name', settings['EVENT_NAME']] aws_cli.run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, ignore_error=True)
def run_terminate_sqs_lambda(name, settings): function_name = settings['NAME'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('delete event sources for %s' % function_name) cmd = ['lambda', 'list-event-source-mappings', '--function-name', function_name] mappings = aws_cli.run(cmd)['EventSourceMappings'] for mapping in mappings: cmd = ['lambda', 'delete-event-source-mapping', '--uuid', mapping['UUID']] aws_cli.run(cmd) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True)
def is_exist_record_set(host_zone_name, name): print_session('is_exist_record_set') aws_cli = AWSCli() cmd = ['route53', 'list-hosted-zones-by-name'] cmd += ['--dns-name', host_zone_name] rr = aws_cli.run(cmd) if not (len(rr['HostedZones']) == 1): raise Exception('wrong host zone') hosted_zone_id = rr['HostedZones'][0]['Id'] cmd = ['route53', 'list-resource-record-sets'] cmd += ['--hosted-zone-id', hosted_zone_id] rr = aws_cli.run(cmd) for vv in rr['ResourceRecordSets']: if vv['Name'] == name: print_session('exist record set(%s)' % name) return True return False
def run_terminate_s3_bucket(name, settings): aws_cli = AWSCli() bucket_name = settings['BUCKET_NAME'] ################################################################################ print_session('terminate %s' % name) ################################################################################ print_message('delete life cycle') cmd = ['s3api', 'delete-bucket-lifecycle', '--bucket', bucket_name] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete web hosting') cmd = ['s3api', 'delete-bucket-website', '--bucket', bucket_name] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete policy') cmd = ['s3api', 'delete-bucket-policy', '--bucket', bucket_name] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('restore public access block') pp = { "BlockPublicAcls": True, "IgnorePublicAcls": True, "BlockPublicPolicy": True, "RestrictPublicBuckets": True } cmd = ['s3api', 'put-public-access-block', '--bucket', bucket_name] cmd += ['--public-access-block-configuration', json.dumps(pp)] aws_cli.run(cmd, ignore_error=True)
def main(settings): aws_availability_zone_1 = settings['AWS_AVAILABILITY_ZONE_1'] aws_availability_zone_2 = settings['AWS_AVAILABILITY_ZONE_2'] aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) rds_engine = env['rds']['ENGINE'] rds_subnet_name = env['rds']['DB_SUBNET_NAME'] service_name = env['common'].get('SERVICE_NAME', '') name_prefix = '%s_' % service_name if service_name else '' cidr_vpc = aws_cli.cidr_vpc cidr_subnet = aws_cli.cidr_subnet print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() if rds_vpc_id or eb_vpc_id: print_message('VPC already exists') print('RDS: %s \n' % rds_vpc_id) print('EB: %s \n' % eb_vpc_id) print_session('finish python code') sys.exit(0) ################################################################################ # # EB Application # ################################################################################ print_session('create eb application') ################################################################################ print_message('import key pair') cmd = ['ec2', 'import-key-pair'] cmd += ['--key-name', env['common']['AWS_KEY_PAIR_NAME']] cmd += ['--public-key-material', env['common']['AWS_KEY_PAIR_MATERIAL']] aws_cli.run(cmd) ################################################################################ print_message('create application') eb_service_role_arn = aws_cli.get_iam_role('aws-elasticbeanstalk-service-role')['Role']['Arn'] config_format = '%s=%s' eb_max_count_rule = list() eb_max_count_rule.append(config_format % ('DeleteSourceFromS3', 'true')) eb_max_count_rule.append(config_format % ('Enabled', 'true')) eb_max_count_rule.append(config_format % ('MaxCount', 100)) cmd = ['elasticbeanstalk', 'create-application'] cmd += ['--application-name', env['elasticbeanstalk']['APPLICATION_NAME']] cmd += ['--resource-lifecycle-config', 'ServiceRole=%s,VersionLifecycleConfig={MaxCountRule={%s}}' % ( eb_service_role_arn, ','.join(eb_max_count_rule))] aws_cli.run(cmd) ################################################################################ # # RDS # ################################################################################ print_session('rds') ################################################################################ print_message('create vpc') cmd = ['ec2', 'create-vpc'] cmd += ['--cidr-block', cidr_vpc['rds']] result = aws_cli.run(cmd) rds_vpc_id = result['Vpc']['VpcId'] aws_cli.set_name_tag(rds_vpc_id, '%srds' % name_prefix) ################################################################################ print_message('create subnet') rds_subnet_id = dict() cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', rds_vpc_id] cmd += ['--cidr-block', cidr_subnet['rds']['private_1']] cmd += ['--availability-zone', aws_availability_zone_1] result = aws_cli.run(cmd) rds_subnet_id['private_1'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(rds_subnet_id['private_1'], '%srds_private_1' % name_prefix) cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', rds_vpc_id] cmd += ['--cidr-block', cidr_subnet['rds']['private_2']] cmd += ['--availability-zone', aws_availability_zone_2] result = aws_cli.run(cmd) rds_subnet_id['private_2'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(rds_subnet_id['private_2'], '%srds_private_2' % name_prefix) ################################################################################ print_message('create db subnet group') cmd = ['rds', 'create-db-subnet-group'] cmd += ['--db-subnet-group-name', rds_subnet_name] cmd += ['--db-subnet-group-description', rds_subnet_name] cmd += ['--subnet-ids', rds_subnet_id['private_1'], rds_subnet_id['private_2']] aws_cli.run(cmd) ################################################################################ print_message('create ' + 'route table') # [FYI] PyCharm inspects 'create route table' as SQL query. rds_route_table_id = dict() cmd = ['ec2', 'create-route-table'] cmd += ['--vpc-id', rds_vpc_id] result = aws_cli.run(cmd) rds_route_table_id['private'] = result['RouteTable']['RouteTableId'] aws_cli.set_name_tag(rds_route_table_id['private'], '%srds_private' % name_prefix) ################################################################################ print_message('associate route table') cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', rds_subnet_id['private_1']] cmd += ['--route-table-id', rds_route_table_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', rds_subnet_id['private_2']] cmd += ['--route-table-id', rds_route_table_id['private']] aws_cli.run(cmd) ################################################################################ print_message('create security group') rds_security_group_id = dict() cmd = ['ec2', 'create-security-group'] cmd += ['--group-name', '%srds' % name_prefix] cmd += ['--description', '%srds' % name_prefix] cmd += ['--vpc-id', rds_vpc_id] result = aws_cli.run(cmd) rds_security_group_id['private'] = result['GroupId'] ################################################################################ print_message('authorize security group ingress') cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', rds_security_group_id['private']] cmd += ['--protocol', 'all'] cmd += ['--source-group', rds_security_group_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', rds_security_group_id['private']] cmd += ['--protocol', 'tcp'] if rds_engine == 'aurora-postgresql': cmd += ['--port', '5432'] else: cmd += ['--port', '3306'] cmd += ['--cidr', cidr_vpc['eb']] aws_cli.run(cmd) ################################################################################ # # EB # ################################################################################ print_session('eb') ################################################################################ print_message('create vpc') cmd = ['ec2', 'create-vpc'] cmd += ['--cidr-block', cidr_vpc['eb']] result = aws_cli.run(cmd) eb_vpc_id = result['Vpc']['VpcId'] aws_cli.set_name_tag(eb_vpc_id, '%seb' % name_prefix) ################################################################################ print_message('create subnet') eb_subnet_id = dict() cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', eb_vpc_id] cmd += ['--cidr-block', cidr_subnet['eb']['private_1']] cmd += ['--availability-zone', aws_availability_zone_1] result = aws_cli.run(cmd) eb_subnet_id['private_1'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(eb_subnet_id['private_1'], '%seb_private_1' % name_prefix) cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', eb_vpc_id] cmd += ['--cidr-block', cidr_subnet['eb']['private_2']] cmd += ['--availability-zone', aws_availability_zone_2] result = aws_cli.run(cmd) eb_subnet_id['private_2'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(eb_subnet_id['private_2'], '%seb_private_2' % name_prefix) cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', eb_vpc_id] cmd += ['--cidr-block', cidr_subnet['eb']['public_1']] cmd += ['--availability-zone', aws_availability_zone_1] result = aws_cli.run(cmd) eb_subnet_id['public_1'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(eb_subnet_id['public_1'], '%seb_public_1' % name_prefix) cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', eb_vpc_id] cmd += ['--cidr-block', cidr_subnet['eb']['public_2']] cmd += ['--availability-zone', aws_availability_zone_2] result = aws_cli.run(cmd) eb_subnet_id['public_2'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(eb_subnet_id['public_2'], '%seb_public_2' % name_prefix) ################################################################################ print_message('create internet gateway') cmd = ['ec2', 'create-internet-gateway'] result = aws_cli.run(cmd) internet_gateway_id = result['InternetGateway']['InternetGatewayId'] aws_cli.set_name_tag(internet_gateway_id, '%seb' % name_prefix) ################################################################################ print_message('attach internet gateway') cmd = ['ec2', 'attach-internet-gateway'] cmd += ['--internet-gateway-id', internet_gateway_id] cmd += ['--vpc-id', eb_vpc_id] aws_cli.run(cmd) ################################################################################ print_message('create eip') # We use only one NAT gateway at subnet 'public_1' cmd = ['ec2', 'allocate-address'] cmd += ['--domain', 'vpc'] result = aws_cli.run(cmd) eb_eip_id = result['AllocationId'] aws_cli.set_name_tag(eb_eip_id, '%snat' % name_prefix) ################################################################################ print_message('create nat gateway') # We use only one NAT gateway at subnet 'public_1' cmd = ['ec2', 'create-nat-gateway'] cmd += ['--subnet-id', eb_subnet_id['public_1']] cmd += ['--allocation-id', eb_eip_id] result = aws_cli.run(cmd) eb_nat_gateway_id = result['NatGateway']['NatGatewayId'] aws_cli.set_name_tag(eb_nat_gateway_id, '%seb' % name_prefix) ################################################################################ print_message('wait create nat gateway') aws_cli.wait_create_nat_gateway(eb_vpc_id) ################################################################################ print_message('create ' + 'route table') # [FYI] PyCharm inspects 'create route table' as SQL query. eb_route_table_id = dict() cmd = ['ec2', 'create-route-table'] cmd += ['--vpc-id', eb_vpc_id] result = aws_cli.run(cmd) eb_route_table_id['private'] = result['RouteTable']['RouteTableId'] aws_cli.set_name_tag(eb_route_table_id['private'], '%seb_private' % name_prefix) cmd = ['ec2', 'create-route-table'] cmd += ['--vpc-id', eb_vpc_id] result = aws_cli.run(cmd) eb_route_table_id['public'] = result['RouteTable']['RouteTableId'] aws_cli.set_name_tag(eb_route_table_id['public'], '%seb_public' % name_prefix) ################################################################################ print_message('associate route table') cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', eb_subnet_id['private_1']] cmd += ['--route-table-id', eb_route_table_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', eb_subnet_id['private_2']] cmd += ['--route-table-id', eb_route_table_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', eb_subnet_id['public_1']] cmd += ['--route-table-id', eb_route_table_id['public']] aws_cli.run(cmd) cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', eb_subnet_id['public_2']] cmd += ['--route-table-id', eb_route_table_id['public']] aws_cli.run(cmd) ################################################################################ print_message('create route') cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['public']] cmd += ['--destination-cidr-block', '0.0.0.0/0'] cmd += ['--gateway-id', internet_gateway_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['private']] cmd += ['--destination-cidr-block', '0.0.0.0/0'] cmd += ['--nat-gateway-id', eb_nat_gateway_id] aws_cli.run(cmd) ################################################################################ print_message('create security group') eb_security_group_id = dict() cmd = ['ec2', 'create-security-group'] cmd += ['--group-name', '%seb_private' % name_prefix] cmd += ['--description', '%seb_private' % name_prefix] cmd += ['--vpc-id', eb_vpc_id] result = aws_cli.run(cmd) eb_security_group_id['private'] = result['GroupId'] cmd = ['ec2', 'create-security-group'] cmd += ['--group-name', '%seb_public' % name_prefix] cmd += ['--description', '%seb_public' % name_prefix] cmd += ['--vpc-id', eb_vpc_id] result = aws_cli.run(cmd) eb_security_group_id['public'] = result['GroupId'] ################################################################################ print_message('authorize security group ingress') cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['private']] cmd += ['--protocol', 'all'] cmd += ['--source-group', eb_security_group_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['private']] cmd += ['--protocol', 'all'] cmd += ['--source-group', eb_security_group_id['public']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['public']] cmd += ['--protocol', 'all'] cmd += ['--source-group', eb_security_group_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['public']] cmd += ['--protocol', 'all'] cmd += ['--source-group', eb_security_group_id['public']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['public']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '22'] cmd += ['--cidr', cidr_vpc['eb']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['public']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '80'] cmd += ['--cidr', '0.0.0.0/0'] aws_cli.run(cmd) ################################################################################ # # ElastiCache # ################################################################################ print_session('elasticache') ################################################################################ if env.get('elasticache'): elasticache_subnet_name = env['elasticache']['CACHE_SUBNET_NAME'] print_message('create cache subnet group') cmd = ['elasticache', 'create-cache-subnet-group'] cmd += ['--cache-subnet-group-name', elasticache_subnet_name] cmd += ['--cache-subnet-group-description', elasticache_subnet_name] cmd += ['--subnet-ids', eb_subnet_id['private_1'], eb_subnet_id['private_2']] aws_cli.run(cmd) ################################################################################ # # vpc peering connection # ################################################################################ print_session('vpc peering connection') ################################################################################ print_message('create vpc peering connection') cmd = ['ec2', 'create-vpc-peering-connection'] cmd += ['--vpc-id', rds_vpc_id] cmd += ['--peer-vpc-id', eb_vpc_id] result = aws_cli.run(cmd) peering_connection_id = result['VpcPeeringConnection']['VpcPeeringConnectionId'] aws_cli.set_name_tag(peering_connection_id, '%s' % service_name) cmd = ['ec2', 'accept-vpc-peering-connection'] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) ################################################################################ print_message('create route: rds -> eb') cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', rds_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['eb']['private_1']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', rds_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['eb']['private_2']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', rds_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['eb']['public_1']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', rds_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['eb']['public_2']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) ################################################################################ print_message('create route: eb -> rds') cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['rds']['private_1']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['rds']['private_2']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['public']] cmd += ['--destination-cidr-block', cidr_subnet['rds']['private_1']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['public']] cmd += ['--destination-cidr-block', cidr_subnet['rds']['private_2']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) ################################################################################ # # Network Interface # ################################################################################ print_session('network interface') ################################################################################ environment_list = env['elasticbeanstalk']['ENVIRONMENTS'] for environment in environment_list: cname = environment['CNAME'] private_ip = environment.get('PRIVATE_IP') if cname and private_ip: print_message('create network interface for %s' % cname) cmd = ['ec2', 'create-network-interface'] cmd += ['--subnet-id', eb_subnet_id['private_1']] cmd += ['--description', cname] cmd += ['--private-ip-address', private_ip] cmd += ['--groups', eb_security_group_id['private']] result = aws_cli.run(cmd) network_interface_id = result['NetworkInterface']['NetworkInterfaceId'] aws_cli.set_name_tag(network_interface_id, '%snat' % name_prefix)
def run_create_lambda_sns(name, settings): aws_cli = AWSCli() description = settings['DESCRIPTION'] function_name = settings['NAME'] phase = env['common']['PHASE'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] git_hash_template = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_path).communicate()[0] ################################################################################ topic_arn_list = list() for sns_topic_name in settings['SNS_TOPICS_NAMES']: print_message('check topic exists: %s' % sns_topic_name) region, topic_name = sns_topic_name.split('/') topic_arn = AWSCli(region).get_topic_arn(topic_name) if not topic_arn: print('sns topic: "%s" is not exists in %s' % (settings['SNS_TOPIC_NAME'], region)) raise Exception() topic_arn_list.append(topic_arn) ################################################################################ print_session('packaging lambda: %s' % function_name) print_message('cleanup generated files') subprocess.Popen(['git', 'clean', '-d', '-f', '-x'], cwd=deploy_folder).communicate() requirements_path = '%s/requirements.txt' % deploy_folder if os.path.exists(requirements_path): print_message('install dependencies') cmd = ['pip3', 'install', '-r', requirements_path, '-t', deploy_folder] subprocess.Popen(cmd).communicate() settings_path = '%s/settings_local_sample.py' % deploy_folder if os.path.exists(settings_path): print_message('create environment values') lines = read_file(settings_path) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/settings_local.py' % deploy_folder, lines) print_message('zip files') cmd = ['zip', '-r', 'deploy.zip', '.'] subprocess.Popen(cmd, cwd=deploy_folder).communicate() print_message('create lambda function') role_arn = aws_cli.get_role_arn('aws-lambda-default-role') tags = list() # noinspection PyUnresolvedReferences tags.append('git_hash_johanna=%s' % git_hash_johanna.decode('utf-8').strip()) # noinspection PyUnresolvedReferences tags.append('git_hash_%s=%s' % (template_name, git_hash_template.decode('utf-8').strip())) ################################################################################ print_message('check previous version') need_update = False cmd = ['lambda', 'list-functions'] result = aws_cli.run(cmd) for ff in result['Functions']: if function_name == ff['FunctionName']: need_update = True break ################################################################################ if need_update: print_session('update lambda: %s' % function_name) cmd = ['lambda', 'update-function-code', '--function-name', function_name, '--zip-file', 'fileb://deploy.zip'] result = aws_cli.run(cmd, cwd=deploy_folder) function_arn = result['FunctionArn'] print_message('update lambda tags') cmd = ['lambda', 'tag-resource', '--resource', function_arn, '--tags', ','.join(tags)] aws_cli.run(cmd, cwd=deploy_folder) return ################################################################################ print_session('create lambda: %s' % function_name) cmd = ['lambda', 'create-function', '--function-name', function_name, '--description', description, '--zip-file', 'fileb://deploy.zip', '--role', role_arn, '--handler', 'lambda.handler', '--runtime', 'python3.6', '--tags', ','.join(tags), '--timeout', '120'] result = aws_cli.run(cmd, cwd=deploy_folder) function_arn = result['FunctionArn'] for topic_arn in topic_arn_list: print_message('create subscription') topic_region = topic_arn.split(':')[3] cmd = ['sns', 'subscribe', '--topic-arn', topic_arn, '--protocol', 'lambda', '--notification-endpoint', function_arn] AWSCli(topic_region).run(cmd) print_message('Add permission to lambda') statement_id = '%s_%s_Permission' % (function_name, topic_region) cmd = ['lambda', 'add-permission', '--function-name', function_name, '--statement-id', statement_id, '--action', 'lambda:InvokeFunction', '--principal', 'sns.amazonaws.com', '--source-arn', topic_arn] aws_cli.run(cmd) print_message('update tag with subscription info') cmd = ['lambda', 'tag-resource', '--resource', function_arn, '--tags', ','.join(tags)] aws_cli.run(cmd, cwd=deploy_folder)
#!/usr/bin/env python3 import datetime import os.path import subprocess import sys from env import env from run_common import AWSCli from run_common import check_template_availability from run_common import print_message from run_common import print_session aws_cli = AWSCli() print_session('alter database') check_template_availability() engine = env['rds']['ENGINE'] if engine not in ('mysql', 'aurora'): print('not supported:', engine) raise Exception() print_message('get database address') if env['common']['PHASE'] != 'dv': db_host = aws_cli.get_rds_address() else: while True: answer = input('Do you use a database of Vagrant VM? (yes/no): ') if answer.lower() == 'no':
aws_cli = AWSCli(region) dashboard_name = '%s_%s' % (name, region) print_message('terminate cloudwatch dashboard: %s' % dashboard_name) cmd = ['cloudwatch', 'delete-dashboards'] cmd += ['--dashboard-names', dashboard_name] aws_cli.run(cmd) ################################################################################ # # start # ################################################################################ print_session('terminate cloudwatch dashboard') cw = env.get('cloudwatch', dict()) if len(args) == 2: target_cw_dashboard_name = args[1] target_cw_dashboard_name_exists = False for cw_dashboard_env in cw.get('DASHBOARDS', list()): if cw_dashboard_env['NAME'] == target_cw_dashboard_name: target_cw_dashboard_name_exists = True run_terminate_cw_dashboard(cw_dashboard_env['NAME'], cw_dashboard_env) if not target_cw_dashboard_name_exists: print('"%s" is not exists in config.json' % target_cw_dashboard_name) else: for cw_dashboard_env in cw.get('DASHBOARDS', list()):
def run_create_lambda_default(name, settings): aws_cli = AWSCli() description = settings['DESCRIPTION'] function_name = settings['NAME'] phase = env['common']['PHASE'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] git_hash_template = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_path).communicate()[0] ################################################################################ print_session('packaging lambda: %s' % function_name) print_message('cleanup generated files') subprocess.Popen(['git', 'clean', '-d', '-f', '-x'], cwd=deploy_folder).communicate() requirements_path = '%s/requirements.txt' % deploy_folder if os.path.exists(requirements_path): print_message('install dependencies') cmd = ['pip3', 'install', '-r', requirements_path, '-t', deploy_folder] subprocess.Popen(cmd).communicate() settings_path = '%s/settings_local_sample.py' % deploy_folder if os.path.exists(settings_path): print_message('create environment values') lines = read_file(settings_path) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/settings_local.py' % deploy_folder, lines) print_message('zip files') cmd = ['zip', '-r', 'deploy.zip', '.'] subprocess.Popen(cmd, cwd=deploy_folder).communicate() print_message('create lambda function') role_arn = aws_cli.get_role_arn('aws-lambda-default-role') tags = list() # noinspection PyUnresolvedReferences tags.append('git_hash_johanna=%s' % git_hash_johanna.decode('utf-8').strip()) # noinspection PyUnresolvedReferences tags.append('git_hash_%s=%s' % (template_name, git_hash_template.decode('utf-8').strip())) ################################################################################ print_message('check previous version') need_update = False cmd = ['lambda', 'list-functions'] result = aws_cli.run(cmd) for ff in result['Functions']: if function_name == ff['FunctionName']: need_update = True break ################################################################################ if need_update: print_session('update lambda: %s' % function_name) cmd = ['lambda', 'update-function-code', '--function-name', function_name, '--zip-file', 'fileb://deploy.zip'] result = aws_cli.run(cmd, cwd=deploy_folder) function_arn = result['FunctionArn'] print_message('update lambda tags') cmd = ['lambda', 'tag-resource', '--resource', function_arn, '--tags', ','.join(tags)] aws_cli.run(cmd, cwd=deploy_folder) return ################################################################################ print_session('create lambda: %s' % function_name) cmd = ['lambda', 'create-function', '--function-name', function_name, '--description', description, '--zip-file', 'fileb://deploy.zip', '--role', role_arn, '--handler', 'lambda.handler', '--runtime', 'python3.6', '--tags', ','.join(tags), '--timeout', '120'] aws_cli.run(cmd, cwd=deploy_folder)
if __name__ == "__main__": from run_common import parse_args parse_args() aws_cli = AWSCli() aws_default_region = aws_cli.env['AWS_DEFAULT_REGION'] eb_application_name = env['elasticbeanstalk']['APPLICATION_NAME'] ################################################################################ # # start # ################################################################################ print_session('terminate nova') ################################################################################ print_message('terminate nova') elapsed_time = 0 while True: cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] result = aws_cli.run(cmd) count = 0 for r in result['Environments']: if not r['EnvironmentName'].startswith('nova'): continue
def main(settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) rds_subnet_name = env['rds']['DB_SUBNET_NAME'] service_name = env['common'].get('SERVICE_NAME', '') name_prefix = '%s_' % service_name if service_name else '' ################################################################################ print_message('wait terminate rds') aws_cli.wait_terminate_rds() ################################################################################ print_message('wait terminate elasticache') aws_cli.wait_terminate_elasticache() ################################################################################ print_message('wait terminate eb') aws_cli.wait_terminate_eb() ################################################################################ print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() ################################################################################ print_message('delete network interface') cmd = ['ec2', 'describe-network-interfaces'] result = aws_cli.run(cmd, ignore_error=True) for r in result['NetworkInterfaces']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue network_interface_id = r['NetworkInterfaceId'] if 'Attachment' in r: attachment_id = r['Attachment']['AttachmentId'] cmd = ['ec2', 'detach-network-interface'] cmd += ['--attachment-id', attachment_id] aws_cli.run(cmd, ignore_error=True) cmd = ['ec2', 'delete-network-interface'] cmd += ['--network-interface-id', network_interface_id] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete vpc peering connection') cmd = ['ec2', 'describe-vpc-peering-connections'] result = aws_cli.run(cmd, ignore_error=True) for vpc_peer in result['VpcPeeringConnections']: if vpc_peer['RequesterVpcInfo']['VpcId'] == rds_vpc_id and vpc_peer['AccepterVpcInfo']['VpcId'] == eb_vpc_id: peering_connection_id = vpc_peer['VpcPeeringConnectionId'] print('delete vpc peering connnection (id: %s)' % peering_connection_id) cmd = ['ec2', 'delete-vpc-peering-connection'] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('revoke security group ingress') security_group_id_1 = None security_group_id_2 = None cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd, ignore_error=True) for r in result['SecurityGroups']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue if r['GroupName'] == '%seb_private' % name_prefix: security_group_id_1 = r['GroupId'] if r['GroupName'] == '%seb_public' % name_prefix: security_group_id_2 = r['GroupId'] if security_group_id_1 and security_group_id_2: cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', security_group_id_1] cmd += ['--protocol', 'all'] cmd += ['--source-group', security_group_id_2] aws_cli.run(cmd, ignore_error=True) cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', security_group_id_2] cmd += ['--protocol', 'all'] cmd += ['--source-group', security_group_id_1] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete security group') cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd, ignore_error=True) for r in result['SecurityGroups']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue if r['GroupName'] == 'default': continue print('delete security group (id: %s)' % r['GroupId']) cmd = ['ec2', 'delete-security-group'] cmd += ['--group-id', r['GroupId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete route') cmd = ['ec2', 'describe-route-tables'] result = aws_cli.run(cmd, ignore_error=True) for r in result['RouteTables']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue for route in r['Routes']: if route['DestinationCidrBlock'] == '0.0.0.0/0': print('delete route (route table id: %s)' % r['RouteTableId']) cmd = ['ec2', 'delete-route'] cmd += ['--route-table-id', r['RouteTableId']] cmd += ['--destination-cidr-block', '0.0.0.0/0'] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('disassociate route table') cmd = ['ec2', 'describe-route-tables'] result = aws_cli.run(cmd, ignore_error=True) for r in result['RouteTables']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue for association in r['Associations']: if association['Main']: continue print('disassociate route table (route table id: %s, route table association id: %s)' % (r['RouteTableId'], association['RouteTableAssociationId'])) cmd = ['ec2', 'disassociate-route-table'] cmd += ['--association-id', association['RouteTableAssociationId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete route table') cmd = ['ec2', 'describe-route-tables'] result = aws_cli.run(cmd, ignore_error=True) for r in result['RouteTables']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue if len(r['Associations']) != 0: continue print('delete route table (route table id: %s)' % r['RouteTableId']) cmd = ['ec2', 'delete-route-table'] cmd += ['--route-table-id', r['RouteTableId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete nat gateway') cmd = ['ec2', 'describe-nat-gateways'] result = aws_cli.run(cmd, ignore_error=True) for r in result['NatGateways']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue print('delete nat gateway (nat gateway id: %s)' % r['NatGatewayId']) cmd = ['ec2', 'delete-nat-gateway'] cmd += ['--nat-gateway-id', r['NatGatewayId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('wait delete nat gateway') aws_cli.wait_delete_nat_gateway(eb_vpc_id=eb_vpc_id) ################################################################################ print_message('release eip') cmd = ['ec2', 'describe-addresses'] result = aws_cli.run(cmd, ignore_error=True) for r in result['Addresses']: if 'AssociationId' in r: continue print('release address (address id: %s)' % r['AllocationId']) cmd = ['ec2', 'release-address'] cmd += ['--allocation-id', r['AllocationId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ if env.get('elasticache'): elasticache_subnet_name = env['elasticache']['CACHE_SUBNET_NAME'] print_message('delete cache subnet group') cmd = ['elasticache', 'delete-cache-subnet-group'] cmd += ['--cache-subnet-group-name', elasticache_subnet_name] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete db subnet group') cmd = ['rds', 'delete-db-subnet-group'] cmd += ['--db-subnet-group-name', rds_subnet_name] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('detach internet gateway') cmd = ['ec2', 'describe-internet-gateways'] result = aws_cli.run(cmd, ignore_error=True) for r in result['InternetGateways']: if len(r['Attachments']) != 1: continue if r['Attachments'][0]['VpcId'] != eb_vpc_id: continue print('detach internet gateway (internet gateway id: %s)' % r['InternetGatewayId']) cmd = ['ec2', 'detach-internet-gateway'] cmd += ['--internet-gateway-id', r['InternetGatewayId']] cmd += ['--vpc-id', r['Attachments'][0]['VpcId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete internet gateway') cmd = ['ec2', 'describe-internet-gateways'] result = aws_cli.run(cmd, ignore_error=True) for r in result['InternetGateways']: if len(r['Attachments']) != 0: continue print('delete internet gateway (internet gateway id: %s)' % r['InternetGatewayId']) cmd = ['ec2', 'delete-internet-gateway'] cmd += ['--internet-gateway-id', r['InternetGatewayId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete subnet') cmd = ['ec2', 'describe-subnets'] result = aws_cli.run(cmd, ignore_error=True) for r in result['Subnets']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue print('delete subnet (subnet id: %s)' % r['SubnetId']) cmd = ['ec2', 'delete-subnet'] cmd += ['--subnet-id', r['SubnetId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete vpc') while rds_vpc_id or eb_vpc_id: if rds_vpc_id: print('delete vpc (vpc id: %s)' % rds_vpc_id) cmd = ['ec2', 'delete-vpc'] cmd += ['--vpc-id', rds_vpc_id] aws_cli.run(cmd, ignore_error=True) if eb_vpc_id: print('delete vpc (vpc id: %s)' % eb_vpc_id) cmd = ['ec2', 'delete-vpc'] cmd += ['--vpc-id', eb_vpc_id] aws_cli.run(cmd, ignore_error=True) rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() ################################################################################ # # EB Application # ################################################################################ print_session('terminate eb application') ################################################################################ print_message('delete application') cmd = ['elasticbeanstalk', 'delete-application'] cmd += ['--application-name', env['elasticbeanstalk']['APPLICATION_NAME']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete key pair') cmd = ['ec2', 'delete-key-pair'] cmd += ['--key-name', env['common']['AWS_KEY_PAIR_NAME']] aws_cli.run(cmd, ignore_error=True)
def run_create_eb_django(name, settings): aws_cli = AWSCli() aws_asg_max_value = settings['AWS_ASG_MAX_VALUE'] aws_asg_min_value = settings['AWS_ASG_MIN_VALUE'] aws_default_region = env['aws']['AWS_DEFAULT_REGION'] aws_eb_notification_email = settings['AWS_EB_NOTIFICATION_EMAIL'] cname = settings['CNAME'] debug = env['common']['DEBUG'] eb_application_name = env['elasticbeanstalk']['APPLICATION_NAME'] git_url = settings['GIT_URL'] key_pair_name = env['common']['AWS_KEY_PAIR_NAME'] phase = env['common']['PHASE'] ssl_certificate_id = settings['SSL_CERTIFICATE_ID'] subnet_type = settings['SUBNET_TYPE'] template_name = env['template']['NAME'] service_name = env['common'].get('SERVICE_NAME', '') name_prefix = '%s_' % service_name if service_name else '' cidr_subnet = aws_cli.cidr_subnet str_timestamp = str(int(time.time())) zip_filename = '%s-%s.zip' % (name, str_timestamp) eb_environment_name = '%s-%s' % (name, str_timestamp) eb_environment_name_old = None template_path = 'template/%s' % template_name environment_path = '%s/elasticbeanstalk/%s' % (template_path, name) etc_config_path = '%s/configuration/etc' % environment_path app_config_path = '%s/%s' % (etc_config_path, name) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen( git_rev, stdout=subprocess.PIPE).communicate()[0] git_hash_template = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_path).communicate()[0] ################################################################################ print_session('create %s' % name) ################################################################################ print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() if not eb_vpc_id: print('ERROR!!! No VPC found') raise Exception() ################################################################################ print_message('get subnet id') subnet_id_1 = None subnet_id_2 = None cmd = ['ec2', 'describe-subnets'] result = aws_cli.run(cmd) for r in result['Subnets']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['public_1']: subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['public_2']: subnet_id_2 = r['SubnetId'] elif 'private' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['private_1']: subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_2']: subnet_id_2 = r['SubnetId'] else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('get security group id') security_group_id = None cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd) for r in result['SecurityGroups']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['GroupName'] == '%seb_public' % name_prefix: security_group_id = r['GroupId'] break elif 'private' == subnet_type: if r['GroupName'] == '%seb_private' % name_prefix: security_group_id = r['GroupId'] break else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('get database address') db_address = aws_cli.get_rds_address() ################################################################################ print_message('configuration %s' % name) with open('%s/configuration/phase' % environment_path, 'w') as f: f.write(phase) f.close() lines = read_file('%s/.ebextensions/%s.config.sample' % (environment_path, name)) lines = re_sub_lines(lines, 'AWS_ASG_MAX_VALUE', aws_asg_max_value) lines = re_sub_lines(lines, 'AWS_ASG_MIN_VALUE', aws_asg_min_value) lines = re_sub_lines(lines, 'AWS_EB_NOTIFICATION_EMAIL', aws_eb_notification_email) lines = re_sub_lines(lines, 'SSL_CERTIFICATE_ID', ssl_certificate_id) write_file('%s/.ebextensions/%s.config' % (environment_path, name), lines) lines = read_file('%s/my_sample.cnf' % app_config_path) lines = re_sub_lines(lines, '^(host).*', '\\1 = %s' % db_address) lines = re_sub_lines(lines, '^(user).*', '\\1 = %s' % env['rds']['USER_NAME']) lines = re_sub_lines(lines, '^(password).*', '\\1 = %s' % env['rds']['USER_PASSWORD']) write_file('%s/my.cnf' % app_config_path, lines) lines = read_file('%s/collectd_sample.conf' % etc_config_path) write_file('%s/collectd.conf' % etc_config_path, lines) lines = read_file('%s/settings_local_sample.py' % app_config_path) lines = re_sub_lines(lines, '^(DEBUG).*', '\\1 = %s' % debug) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/settings_local.py' % app_config_path, lines) ################################################################################ print_message('git clone') subprocess.Popen(['rm', '-rf', './%s' % name], cwd=environment_path).communicate() if phase == 'dv': git_command = ['git', 'clone', '--depth=1', git_url] else: git_command = ['git', 'clone', '--depth=1', '-b', phase, git_url] subprocess.Popen(git_command, cwd=environment_path).communicate() if not os.path.exists('%s/%s' % (environment_path, name)): raise Exception() git_hash_app = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd='%s/%s' % (environment_path, name)).communicate()[0] subprocess.Popen(['rm', '-rf', './%s/.git' % name], cwd=environment_path).communicate() subprocess.Popen(['rm', '-rf', './%s/.gitignore' % name], cwd=environment_path).communicate() ################################################################################ print_message('check previous version') cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] result = aws_cli.run(cmd) for r in result['Environments']: if 'CNAME' not in r: continue if r['CNAME'] == '%s.%s.elasticbeanstalk.com' % (cname, aws_default_region): if r['Status'] == 'Terminated': continue elif r['Status'] != 'Ready': print('previous version is not ready.') raise Exception() eb_environment_name_old = r['EnvironmentName'] cname += '-%s' % str_timestamp break ################################################################################ print_message('create storage location') cmd = ['elasticbeanstalk', 'create-storage-location'] result = aws_cli.run(cmd) s3_bucket = result['S3Bucket'] s3_zip_filename = '/'.join( ['s3://' + s3_bucket, eb_application_name, zip_filename]) ################################################################################ print_message('create application version') cmd = ['zip', '-r', zip_filename, '.', '.ebextensions'] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=environment_path).communicate() cmd = ['s3', 'cp', zip_filename, s3_zip_filename] aws_cli.run(cmd, cwd=environment_path) cmd = ['rm', '-rf', zip_filename] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=environment_path).communicate() cmd = ['elasticbeanstalk', 'create-application-version'] cmd += ['--application-name', eb_application_name] cmd += [ '--source-bundle', 'S3Bucket="%s",S3Key="%s/%s"' % (s3_bucket, eb_application_name, zip_filename) ] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=environment_path) ################################################################################ print_message('create environment %s' % name) option_settings = list() oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'EC2KeyName' oo['Value'] = key_pair_name option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'InstanceType' oo['Value'] = 't2.nano' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'IamInstanceProfile' oo['Value'] = 'aws-elasticbeanstalk-ec2-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'SecurityGroups' oo['Value'] = security_group_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'AssociatePublicIpAddress' oo['Value'] = 'true' if 'private' == subnet_type: oo['Value'] = 'false' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBScheme' oo['Value'] = '...' if 'private' == subnet_type: oo['Value'] = 'internal' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBSubnets' oo['Value'] = ','.join([subnet_id_1, subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'Subnets' oo['Value'] = ','.join([subnet_id_1, subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'VPCId' oo['Value'] = eb_vpc_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'EnvironmentType' oo['Value'] = 'LoadBalanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'ServiceRole' oo['Value'] = 'aws-elasticbeanstalk-service-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'SystemType' oo['Value'] = 'enhanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'ConfigDocument' cw_env = dict() cw_env['ApplicationRequestsTotal'] = 60 cw_env['ApplicationRequests2xx'] = 60 cw_env['ApplicationRequests3xx'] = 60 cw_env['ApplicationRequests4xx'] = 60 cw_env['ApplicationRequests5xx'] = 60 cw_instance = dict() cw_instance['RootFilesystemUtil'] = 60 cw_instance['InstanceHealth'] = 60 cw_instance['CPUIdle'] = 60 cw = dict() cw['Environment'] = cw_env cw['Instance'] = cw_instance cfg_doc = dict() cfg_doc['CloudWatchMetrics'] = cw cfg_doc['Version'] = 1 oo['Value'] = json.dumps(cfg_doc) option_settings.append(oo) option_settings = json.dumps(option_settings) tag0 = 'Key=git_hash_johanna,Value=%s' % git_hash_johanna.decode( 'utf-8').strip() tag1 = 'Key=git_hash_%s,Value=%s' % ( template_name, git_hash_template.decode('utf-8').strip()) tag2 = 'Key=git_hash_%s,Value=%s' % (name, git_hash_app.decode('utf-8').strip()) cmd = ['elasticbeanstalk', 'create-environment'] cmd += ['--application-name', eb_application_name] cmd += ['--cname-prefix', cname] cmd += ['--environment-name', eb_environment_name] cmd += ['--option-settings', option_settings] cmd += [ '--solution-stack-name', '64bit Amazon Linux 2017.09 v2.6.5 running Python 3.6' ] cmd += ['--tags', tag0, tag1, tag2] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=environment_path) elapsed_time = 0 while True: cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] cmd += ['--environment-name', eb_environment_name] result = aws_cli.run(cmd) ee = result['Environments'][0] print(json.dumps(ee, sort_keys=True, indent=4)) if ee.get('Health', '') == 'Green' and ee.get('Status', '') == 'Ready': break print('creating... (elapsed time: \'%d\' seconds)' % elapsed_time) time.sleep(5) elapsed_time += 5 if elapsed_time > 60 * 30: raise Exception() subprocess.Popen(['rm', '-rf', './%s' % name], cwd=environment_path).communicate() ################################################################################ print_message('revoke security group ingress') cmd = ['ec2', 'describe-security-groups'] cmd += [ '--filters', 'Name=tag-key,Values=Name,Name=tag-value,Values=%s' % eb_environment_name ] result = aws_cli.run(cmd) for ss in result['SecurityGroups']: cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', ss['GroupId']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '22'] cmd += ['--cidr', '0.0.0.0/0'] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('swap CNAME if the previous version exists') if eb_environment_name_old: cmd = ['elasticbeanstalk', 'swap-environment-cnames'] cmd += ['--source-environment-name', eb_environment_name_old] cmd += ['--destination-environment-name', eb_environment_name] aws_cli.run(cmd)
engine = env['rds']['ENGINE'] engine_version = env['rds']['ENGINE_VERSION'] license_model = env['rds']['LICENSE_MODEL'] logs_export_to_cloudwatch = json.dumps(['error', 'general', 'audit', 'slowquery']) master_user_name = env['rds']['USER_NAME'] master_user_password = env['rds']['USER_PASSWORD'] monitoring_interval = env['rds']['MONITORING_INTERVAL'] cidr_subnet = aws_cli.cidr_subnet ################################################################################ # # start # ################################################################################ print_session('create rds') check_template_availability() create_iam_for_rds() ################################################################################ print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() if not rds_vpc_id or not eb_vpc_id: print('ERROR!!! No VPC found') raise Exception() ################################################################################
def run_create_eb_cron_job(name, settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) aws_asg_max_value = settings['AWS_ASG_MAX_VALUE'] aws_asg_min_value = settings['AWS_ASG_MIN_VALUE'] aws_default_region = settings['AWS_DEFAULT_REGION'] aws_eb_notification_email = settings['AWS_EB_NOTIFICATION_EMAIL'] cname = settings['CNAME'] debug = env['common']['DEBUG'] eb_application_name = env['elasticbeanstalk']['APPLICATION_NAME'] git_url = settings['GIT_URL'] key_pair_name = env['common']['AWS_KEY_PAIR_NAME'] phase = env['common']['PHASE'] subnet_type = settings['SUBNET_TYPE'] template_name = env['template']['NAME'] service_name = env['common'].get('SERVICE_NAME', '') name_prefix = '%s_' % service_name if service_name else '' if hasattr(settings, 'PRIVATE_IP'): private_ip = settings['PRIVATE_IP'] else: private_ip = None cidr_subnet = aws_cli.cidr_subnet str_timestamp = str(int(time.time())) zip_filename = '%s-%s.zip' % (name, str_timestamp) eb_environment_name = '%s-%s' % (name, str_timestamp) eb_environment_name_old = None template_path = 'template/%s' % template_name environment_path = '%s/elasticbeanstalk/%s' % (template_path, name) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] git_hash_template = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_path).communicate()[0] ################################################################################ print_session('create %s' % name) ################################################################################ print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() if not eb_vpc_id: print('ERROR!!! No VPC found') raise Exception() ################################################################################ print_message('get subnet id') subnet_id_1 = None subnet_id_2 = None cmd = ['ec2', 'describe-subnets'] result = aws_cli.run(cmd) for r in result['Subnets']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['public_1']: subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['public_2']: subnet_id_2 = r['SubnetId'] elif 'private' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['private_1']: subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_2']: subnet_id_2 = r['SubnetId'] else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('get security group id') security_group_id = None cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd) for r in result['SecurityGroups']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['GroupName'] == '%seb_public' % name_prefix: security_group_id = r['GroupId'] break elif 'private' == subnet_type: if r['GroupName'] == '%seb_private' % name_prefix: security_group_id = r['GroupId'] break else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('configuration %s' % name) with open('%s/configuration/phase' % environment_path, 'w') as f: f.write(phase) f.close() lines = read_file('%s/.ebextensions/%s.config.sample' % (environment_path, name)) lines = re_sub_lines(lines, 'AWS_ASG_MIN_VALUE', aws_asg_min_value) lines = re_sub_lines(lines, 'AWS_ASG_MAX_VALUE', aws_asg_max_value) lines = re_sub_lines(lines, 'AWS_EB_NOTIFICATION_EMAIL', aws_eb_notification_email) write_file('%s/.ebextensions/%s.config' % (environment_path, name), lines) ################################################################################ print_message('git clone') subprocess.Popen(['rm', '-rf', './%s' % name], cwd=environment_path).communicate() if phase == 'dv': git_command = ['git', 'clone', '--depth=1', git_url] else: git_command = ['git', 'clone', '--depth=1', '-b', phase, git_url] subprocess.Popen(git_command, cwd=environment_path).communicate() if not os.path.exists('%s/%s' % (environment_path, name)): raise Exception() git_hash_app = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd='%s/%s' % (environment_path, name)).communicate()[0] subprocess.Popen(['rm', '-rf', './%s/.git' % name], cwd=environment_path).communicate() subprocess.Popen(['rm', '-rf', './%s/.gitignore' % name], cwd=environment_path).communicate() ################################################################################ for ss in settings['SETTINGS_LOCAL_PATH']: lines = read_file('%s/%s/settings_local_sample.py' % (environment_path, ss)) lines = re_sub_lines(lines, '^(DEBUG).*', '\\1 = %s' % debug) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/%s/settings_local.py' % (environment_path, ss), lines) ################################################################################ print_message('check previous version') cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] result = aws_cli.run(cmd) for r in result['Environments']: if 'CNAME' not in r: continue if r['CNAME'] == '%s.%s.elasticbeanstalk.com' % (cname, aws_default_region): if r['Status'] == 'Terminated': continue elif r['Status'] != 'Ready': print('previous version is not ready.') raise Exception() eb_environment_name_old = r['EnvironmentName'] cname += '-%s' % str_timestamp break ################################################################################ print_message('create storage location') cmd = ['elasticbeanstalk', 'create-storage-location'] result = aws_cli.run(cmd) s3_bucket = result['S3Bucket'] s3_zip_filename = '/'.join(['s3://' + s3_bucket, eb_application_name, zip_filename]) ################################################################################ print_message('create application version') cmd = ['zip', '-r', zip_filename, '.', '.ebextensions'] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=environment_path).communicate() cmd = ['s3', 'cp', zip_filename, s3_zip_filename] aws_cli.run(cmd, cwd=environment_path) cmd = ['rm', '-rf', zip_filename] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=environment_path).communicate() cmd = ['elasticbeanstalk', 'create-application-version'] cmd += ['--application-name', eb_application_name] cmd += ['--source-bundle', 'S3Bucket="%s",S3Key="%s/%s"' % (s3_bucket, eb_application_name, zip_filename)] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=environment_path) ################################################################################ print_message('create environment %s' % name) option_settings = list() oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'EC2KeyName' oo['Value'] = key_pair_name option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'InstanceType' oo['Value'] = 't2.micro' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'IamInstanceProfile' oo['Value'] = 'aws-elasticbeanstalk-ec2-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'SecurityGroups' oo['Value'] = security_group_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'AssociatePublicIpAddress' oo['Value'] = 'true' if 'private' == subnet_type: oo['Value'] = 'false' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBScheme' oo['Value'] = '...' if 'private' == subnet_type: oo['Value'] = 'internal' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBSubnets' oo['Value'] = ','.join([subnet_id_1, subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'Subnets' oo['Value'] = ','.join([subnet_id_1, subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'VPCId' oo['Value'] = eb_vpc_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'EnvironmentType' oo['Value'] = 'LoadBalanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'ServiceRole' oo['Value'] = 'aws-elasticbeanstalk-service-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'SystemType' oo['Value'] = 'enhanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'ConfigDocument' cw_instance = dict() cw_instance['RootFilesystemUtil'] = 60 cw_instance['InstanceHealth'] = 60 cw_instance['CPUIdle'] = 60 cw = dict() cw['Instance'] = cw_instance cfg_doc = dict() cfg_doc['CloudWatchMetrics'] = cw cfg_doc['Version'] = 1 oo['Value'] = json.dumps(cfg_doc) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:cloudwatch:logs' oo['OptionName'] = 'StreamLogs' oo['Value'] = 'true' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:cloudwatch:logs' oo['OptionName'] = 'DeleteOnTerminate' oo['Value'] = 'true' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:cloudwatch:logs' oo['OptionName'] = 'RetentionInDays' oo['Value'] = '3' option_settings.append(oo) option_settings = json.dumps(option_settings) tag0 = 'Key=git_hash_johanna,Value=%s' % git_hash_johanna.decode('utf-8').strip() tag1 = 'Key=git_hash_%s,Value=%s' % (template_name, git_hash_template.decode('utf-8').strip()) tag2 = 'Key=git_hash_%s,Value=%s' % (name, git_hash_app.decode('utf-8').strip()) cmd = ['elasticbeanstalk', 'create-environment'] cmd += ['--application-name', eb_application_name] cmd += ['--cname-prefix', cname] cmd += ['--environment-name', eb_environment_name] cmd += ['--option-settings', option_settings] cmd += ['--solution-stack-name', '64bit Amazon Linux 2018.03 v2.7.1 running Python 3.6'] cmd += ['--tags', tag0, tag1, tag2] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=environment_path) elapsed_time = 0 while True: cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] cmd += ['--environment-name', eb_environment_name] result = aws_cli.run(cmd) ee = result['Environments'][0] print(json.dumps(ee, sort_keys=True, indent=4)) if ee.get('Health', '') == 'Green' and ee.get('Status', '') == 'Ready': break print('creating... (elapsed time: \'%d\' seconds)' % elapsed_time) time.sleep(5) elapsed_time += 5 if elapsed_time > 60 * 30: raise Exception() subprocess.Popen(['rm', '-rf', './%s' % name], cwd=environment_path).communicate() ################################################################################ print_message('revoke security group ingress') cmd = ['ec2', 'describe-security-groups'] cmd += ['--filters', 'Name=tag-key,Values=Name,Name=tag-value,Values=%s' % eb_environment_name] result = aws_cli.run(cmd) for ss in result['SecurityGroups']: cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', ss['GroupId']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '22'] cmd += ['--cidr', '0.0.0.0/0'] aws_cli.run(cmd, ignore_error=True) ################################################################################ if private_ip is not None: print_message('attach network interface') elapsed_time = 0 while True: cmd = ['ec2', 'describe-network-interfaces'] cmd += ['--filters', 'Name=private-ip-address,Values=%s' % private_ip] result = aws_cli.run(cmd) network_interface_id = result['NetworkInterfaces'][0]['NetworkInterfaceId'] if 'Attachment' not in result['NetworkInterfaces'][0]: cmd = ['ec2', 'describe-instances'] cmd += ['--filters', 'Name=tag-key,Values=Name,Name=tag-value,Values=%s' % eb_environment_name] result = aws_cli.run(cmd) instance_id = result['Reservations'][0]['Instances'][0]['InstanceId'] cmd = ['ec2', 'attach-network-interface'] cmd += ['--network-interface-id', network_interface_id] cmd += ['--instance-id', instance_id] cmd += ['--device-index', '1'] aws_cli.run(cmd) break attachment_id = result['NetworkInterfaces'][0]['Attachment']['AttachmentId'] cmd = ['ec2', 'detach-network-interface'] cmd += ['--attachment-id', attachment_id] aws_cli.run(cmd, ignore_error=True) print('detaching network interface... (elapsed time: \'%d\' seconds)' % elapsed_time) time.sleep(5) elapsed_time += 5 ################################################################################ print_message('swap CNAME if the previous version exists') if eb_environment_name_old: cmd = ['elasticbeanstalk', 'swap-environment-cnames'] cmd += ['--source-environment-name', eb_environment_name_old] cmd += ['--destination-environment-name', eb_environment_name] aws_cli.run(cmd)
dw['properties']['title'] = title dashboard_body = json.dumps(dashboard_body) cmd = ['cloudwatch', 'put-dashboard'] cmd += ['--dashboard-name', dashboard_name] cmd += ['--dashboard-body', dashboard_body] aws_cli.run(cmd) ################################################################################ # # start # ################################################################################ print_session('create cloudwatch dashboard') check_template_availability() cw = env.get('cloudwatch', dict()) target_cw_dashboard_name = None region = None check_exists = False if len(args) > 1: target_cw_dashboard_name = args[1] if len(args) > 2: region = args[2] for cw_dashboard_env in cw.get('DASHBOARDS', list()):
redrive_policy = dict() redrive_policy['deadLetterTargetArn'] = dead_letter_queue_arn redrive_policy['maxReceiveCount'] = receive_count attr = dict() if dead_letter_queue_arn is not None: attr['RedrivePolicy'] = json.dumps(redrive_policy) attr['DelaySeconds'] = delay_seconds attr['MessageRetentionPeriod'] = retention attr['ReceiveMessageWaitTimeSeconds'] = receive_message_wait_time_seconds attr['VisibilityTimeout'] = timeout cmd = ['sqs', 'create-queue'] cmd += ['--queue-name', name] cmd += ['--attributes', json.dumps(attr)] result = aws_cli.run(cmd) print('create :', result['QueueUrl']) ################################################################################ # # start # ################################################################################ print_session('create sqs') sqs = env['sqs'] for sqs_env in sqs: run_create_queue(sqs_env['NAME'], sqs_env)
def terminate_iam(): ################################################################################ # # IAM # ################################################################################ print_session('terminate iam') aws_cli = AWSCli() ################################################################################ print_message('terminate iam: aws-elasticbeanstalk-service-role') cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'delete-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('terminate iam: aws-elasticbeanstalk-ec2-role') cmd = ['iam', 'delete-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-name', 'aws-elasticbeanstalk-ec2-policy'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkWorkerTier'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkMulticontainerDocker'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'remove-role-from-instance-profile'] cmd += ['--instance-profile-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'delete-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'delete-instance-profile'] cmd += ['--instance-profile-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd, ignore_error=True)
cmd += ['--policy-document', 'file://aws_iam/aws-elasticbeanstalk-ec2-policy.json'] aws_cli.run(cmd) ################################################################################ print_message('create iam: aws-elasticbeanstalk-service-role') cmd = ['iam', 'create-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--assume-role-policy-document', 'file://aws_iam/aws-elasticbeanstalk-service-role.json'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService'] aws_cli.run(cmd) ################################################################################ # # start # ################################################################################ print_session('create iam') create_iam()
aws_cli.run(cmd) sleep_required = True if sleep_required: print_message( 'wait two minutes to let iam role and policy propagated to all regions...' ) time.sleep(120) ################################################################################ # # start # ################################################################################ print_session('create lambda') ################################################################################ check_template_availability() create_iam_for_lambda() lambdas_list = env['lambda'] if len(args) == 2: target_lambda_name = args[1] target_lambda_name_exists = False for lambda_env in lambdas_list: if lambda_env['NAME'] == target_lambda_name: target_lambda_name_exists = True if lambda_env['TYPE'] == 'default': run_create_lambda_default(lambda_env['NAME'], lambda_env)
def run_create_eb_spring(name, settings): aws_cli = AWSCli() aws_asg_max_value = settings['AWS_ASG_MAX_VALUE'] aws_asg_min_value = settings['AWS_ASG_MIN_VALUE'] aws_default_region = env['aws']['AWS_DEFAULT_REGION'] cname = settings['CNAME'] db_conn_str_suffix = settings.get('DB_CONNECTION_STR_SUFFIX', '') eb_application_name = env['elasticbeanstalk']['APPLICATION_NAME'] git_url = settings['GIT_URL'] instance_type = settings.get('INSTANCE_TYPE', 't2.medium') key_pair_name = env['common']['AWS_KEY_PAIR_NAME'] phase = env['common']['PHASE'] service_name = env['common'].get('SERVICE_NAME', '') subnet_type = settings['SUBNET_TYPE'] name_prefix = '%s_' % service_name if service_name else '' cidr_subnet = aws_cli.cidr_subnet str_timestamp = str(int(time.time())) war_filename = '%s-%s.war' % (name, str_timestamp) eb_environment_name = '%s-%s' % (name, str_timestamp) eb_environment_name_old = None template_folder = 'template/%s' % name target_folder = 'template/%s/target' % name ebextensions_folder = 'template/%s/_provisioning/.ebextensions' % name configuration_folder = 'template/%s/_provisioning/configuration' % name properties_file = 'template/%s/%s' % (name, settings['PROPERTIES_FILE']) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] ################################################################################ print_session('create %s' % name) ################################################################################ print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() if not eb_vpc_id: print('ERROR!!! No VPC found') raise Exception() ################################################################################ print_message('get subnet id') elb_subnet_id_1 = None elb_subnet_id_2 = None ec2_subnet_id_1 = None ec2_subnet_id_2 = None cmd = ['ec2', 'describe-subnets'] result = aws_cli.run(cmd) for r in result['Subnets']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['public_1']: elb_subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['public_2']: elb_subnet_id_2 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_1']: ec2_subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_2']: ec2_subnet_id_2 = r['SubnetId'] elif 'private' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['private_1']: elb_subnet_id_1 = ec2_subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_2']: elb_subnet_id_2 = ec2_subnet_id_2 = r['SubnetId'] else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('get security group id') security_group_id = None cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd) for r in result['SecurityGroups']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['GroupName'] == '%seb_private' % name_prefix: security_group_id = r['GroupId'] break elif 'private' == subnet_type: if r['GroupName'] == '%seb_private' % name_prefix: security_group_id = r['GroupId'] break else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('get database address') db_address = aws_cli.get_rds_address() ################################################################################ print_message('get cache address') cache_address = aws_cli.get_elasticache_address() ################################################################################ print_message('git clone') subprocess.Popen(['mkdir', '-p', 'template']).communicate() subprocess.Popen(['rm', '-rf', '%s/' % name], cwd='template').communicate() branch = aws_cli.env.get('GIT_BRANCH_APP', phase) git_command = ['git', 'clone', '--depth=1'] if branch != 'dv': git_command += ['-b', branch] git_command += [git_url] subprocess.Popen(git_command, cwd='template').communicate() if not os.path.exists('%s' % template_folder): raise Exception() git_hash_app = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_folder).communicate()[0] subprocess.Popen(['rm', '-rf', '.git'], cwd=template_folder).communicate() subprocess.Popen(['rm', '-rf', '.gitignore'], cwd=template_folder).communicate() ################################################################################ print_message('configuration %s' % name) with open('%s/phase' % configuration_folder, 'w') as f: f.write(phase) f.close() lines = read_file('%s/etc/logstash/conf.d/logstash_sample.conf' % configuration_folder) write_file('%s/etc/logstash/conf.d/logstash.conf' % configuration_folder, lines) lines = read_file('%s/%s.config.sample' % (ebextensions_folder, name)) lines = re_sub_lines(lines, 'AWS_ASG_MAX_VALUE', aws_asg_max_value) lines = re_sub_lines(lines, 'AWS_ASG_MIN_VALUE', aws_asg_min_value) write_file('%s/%s.config' % (ebextensions_folder, name), lines) sample_file = properties_file.replace('.properties', '-sample.properties') lines = read_file(sample_file) option_list = list() option_list.append(['jdbc.url', 'jdbc:mysql://%s%s' % (db_address, db_conn_str_suffix)]) option_list.append(['jdbc.username', env['rds']['USER_NAME']]) option_list.append(['jdbc.password', env['rds']['USER_PASSWORD']]) option_list.append(['redis.host', cache_address]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s)=.*' % oo[0], '\\1=%s' % oo[1]) write_file(properties_file, lines) ################################################################################ print_message('check previous version') cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] result = aws_cli.run(cmd) for r in result['Environments']: if 'CNAME' not in r: continue if r['CNAME'] == '%s.%s.elasticbeanstalk.com' % (cname, aws_default_region): if r['Status'] == 'Terminated': continue elif r['Status'] != 'Ready': print('previous version is not ready.') raise Exception() eb_environment_name_old = r['EnvironmentName'] cname += '-%s' % str_timestamp break ################################################################################ print_message('build artifact') build_command = ['mvn'] if phase != 'dv': build_command += ['exec:exec'] build_command += ['package'] print_message('build %s: %s' % (name, ' '.join(build_command))) subprocess.Popen(build_command, cwd=template_folder).communicate() ################################################################################ print_message('create storage location') cmd = ['elasticbeanstalk', 'create-storage-location'] result = aws_cli.run(cmd) s3_bucket = result['S3Bucket'] s3_war_filename = '/'.join(['s3://' + s3_bucket, eb_application_name, war_filename]) ################################################################################ print_message('create application version') cmd = ['mv', 'ROOT.war', war_filename] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=target_folder).communicate() cmd = ['s3', 'cp', war_filename, s3_war_filename] aws_cli.run(cmd, cwd=target_folder) cmd = ['rm', '-rf', war_filename] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=target_folder).communicate() cmd = ['elasticbeanstalk', 'create-application-version'] cmd += ['--application-name', eb_application_name] cmd += ['--source-bundle', 'S3Bucket="%s",S3Key="%s/%s"' % (s3_bucket, eb_application_name, war_filename)] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=template_folder) ################################################################################ print_message('create environment %s' % name) option_settings = list() oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'EC2KeyName' oo['Value'] = key_pair_name option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'InstanceType' oo['Value'] = instance_type option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'IamInstanceProfile' oo['Value'] = 'aws-elasticbeanstalk-ec2-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'SecurityGroups' oo['Value'] = security_group_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'AssociatePublicIpAddress' oo['Value'] = 'false' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBScheme' oo['Value'] = 'public' if 'private' == subnet_type: oo['Value'] = 'internal' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBSubnets' oo['Value'] = ','.join([elb_subnet_id_1, elb_subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'Subnets' oo['Value'] = ','.join([ec2_subnet_id_1, ec2_subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'VPCId' oo['Value'] = eb_vpc_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'EnvironmentType' oo['Value'] = 'LoadBalanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'ServiceRole' oo['Value'] = 'aws-elasticbeanstalk-service-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'SystemType' oo['Value'] = 'enhanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'ConfigDocument' cw_env = dict() cw_env['ApplicationRequestsTotal'] = 60 cw_env['ApplicationRequests2xx'] = 60 cw_env['ApplicationRequests3xx'] = 60 cw_env['ApplicationRequests4xx'] = 60 cw_env['ApplicationRequests5xx'] = 60 cw_instance = dict() cw_instance['RootFilesystemUtil'] = 60 cw_instance['InstanceHealth'] = 60 cw_instance['CPUIdle'] = 60 cw = dict() cw['Environment'] = cw_env cw['Instance'] = cw_instance cfg_doc = dict() cfg_doc['CloudWatchMetrics'] = cw cfg_doc['Version'] = 1 oo['Value'] = json.dumps(cfg_doc) option_settings.append(oo) option_settings = json.dumps(option_settings) tag0 = 'Key=git_hash_johanna,Value=%s' % git_hash_johanna.decode('utf-8').strip() tag2 = 'Key=git_hash_%s,Value=%s' % (name, git_hash_app.decode('utf-8').strip()) cmd = ['elasticbeanstalk', 'create-environment'] cmd += ['--application-name', eb_application_name] cmd += ['--cname-prefix', cname] cmd += ['--environment-name', eb_environment_name] cmd += ['--option-settings', option_settings] cmd += ['--solution-stack-name', '64bit Amazon Linux 2018.03 v3.0.1 running Tomcat 8.5 Java 8'] cmd += ['--tags', tag0, tag2] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=template_folder) elapsed_time = 0 while True: cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] cmd += ['--environment-name', eb_environment_name] result = aws_cli.run(cmd) ee = result['Environments'][0] print(json.dumps(ee, sort_keys=True, indent=4)) if ee.get('Health', '') == 'Green' and ee.get('Status', '') == 'Ready': break print('creating... (elapsed time: \'%d\' seconds)' % elapsed_time) time.sleep(5) elapsed_time += 5 if elapsed_time > 60 * 30: raise Exception() subprocess.Popen(['rm', '-rf', '%s/' % name], cwd='template').communicate() ################################################################################ print_message('revoke security group ingress') cmd = ['ec2', 'describe-security-groups'] cmd += ['--filters', 'Name=tag-key,Values=Name,Name=tag-value,Values=%s' % eb_environment_name] result = aws_cli.run(cmd) for ss in result['SecurityGroups']: cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', ss['GroupId']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '22'] cmd += ['--cidr', '0.0.0.0/0'] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('swap CNAME if the previous version exists') if eb_environment_name_old: cmd = ['elasticbeanstalk', 'swap-environment-cnames'] cmd += ['--source-environment-name', eb_environment_name_old] cmd += ['--destination-environment-name', eb_environment_name] aws_cli.run(cmd)
#!/usr/bin/env python3 import re import subprocess from subprocess import PIPE from env import env from run_common import AWSCli from run_common import print_message from run_common import print_session aws_cli = AWSCli() print_session('dump mysql schema') engine = env['rds']['ENGINE'] if engine != 'mysql': print('not supported:', engine) raise Exception() print_message('get database address') db_host = aws_cli.get_rds_address(read_replica=True) db_user = env['rds']['USER_NAME'] db_password = env['rds']['USER_PASSWORD'] database = env['rds']['DATABASE'] print_message('dump schema') cmd = ['mysqldump'] cmd += ['-h' + db_host] cmd += ['-u' + db_user]
print('wrong old timestamp (too small)') return False if old_timestamp + max_age_seconds > timestamp: print('skip this time') return False return True ################################################################################ # # start # ################################################################################ print_session('terminate old environment version') timestamp = int(time.time()) max_age_seconds = 60 * 60 * 24 * 3 ################################################################################ print_message('terminate old environment version (current timestamp: %d)' % timestamp) eb_application_name = env['elasticbeanstalk']['APPLICATION_NAME'] for vpc_env in env['vpc']: aws_cli = AWSCli(vpc_env['AWS_DEFAULT_REGION']) aws_default_region = vpc_env['AWS_DEFAULT_REGION'] cmd = ['elasticbeanstalk', 'describe-application-versions'] cmd += ['--application-name', eb_application_name]
'--subscription-arn', subscription_arn] AWSCli(sns_region).run(cmd, ignore_error=True) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, cwd=deploy_folder, ignore_error=True) ################################################################################ # # start # ################################################################################ print_session('terminate lambda') lambdas_list = env.get('lambda', list()) if len(args) == 2: target_lambda_name = args[1] target_lambda_name_exists = False for lambda_env in lambdas_list: if lambda_env['NAME'] == target_lambda_name: target_lambda_name_exists = True if lambda_env['TYPE'] == 'default': run_terminate_default_lambda(lambda_env['NAME'], lambda_env) break if lambda_env['TYPE'] == 'cron': run_terminate_cron_lambda(lambda_env['NAME'], lambda_env) break if lambda_env['TYPE'] == 'sns':
title = dw['properties']['title'] if title.startswith('SQS: dv-'): title = title.replace('SQS: dv-', 'SQS: PHASE-') if title.startswith('SQS: qa-'): title = title.replace('SQS: qa-', 'SQS: PHASE-') if title.startswith('SQS: op-'): title = title.replace('SQS: op-', 'SQS: PHASE-') dw['properties']['title'] = title template_name = env['template']['NAME'] filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name) with open(filename_path, 'w') as ff: json.dump(dashboard_body, ff, sort_keys=True, indent=2) ################################################################################ # # start # ################################################################################ print_session('export cloudwatch dashboard') cw = env['cloudwatch'] cw_dashboards = cw['DASHBOARDS'] for cd in cw_dashboards: if cd['TYPE'] == 'sqs,lambda': run_export_cloudwatch_dashboard_sqs_lambda(cd['NAME'], cd) else: run_export_cloudwatch_dashboard(cd['NAME'], cd)
aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete key pair') cmd = ['ec2', 'delete-key-pair'] cmd += ['--key-name', env['common']['AWS_KEY_PAIR_NAME']] aws_cli.run(cmd, ignore_error=True) ################################################################################ # # start # ################################################################################ print_session('terminate vpc') region = None check_exists = False if len(args) > 1: region = args[1] for vpc_env in env['vpc']: if region and vpc_env.get('AWS_DEFAULT_REGION') != region: continue check_exists = True main(vpc_env)
def run_terminate_sns_tpoic(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) ################################################################################ print_message('terminate sns topic: "%s" in %s' % (name, region)) topic_arn = aws_cli.get_topic_arn(name) if not topic_arn: return cmd = ['sns', 'delete-topic'] cmd += ['--topic-arn', topic_arn] aws_cli.run(cmd) ################################################################################ # # start # ################################################################################ print_session('terminate sns') sns_list = env.get('sns', list()) for sns_env in sns_list: if sns_env['TYPE'] == 'topic': run_terminate_sns_tpoic(sns_env['NAME'], sns_env)
ee['AWS_SECRET_ACCESS_KEY'] = BILLING_AWS_SECRET_ACCESS_KEY s3_filename = '/'.join( ['s3://' + AWS_S3_BACKUP_BUCKET, PHASE + '-' + yyyymmdd, filename]) cmd = ['aws', 's3', 'cp', filename, s3_filename] subprocess.Popen(cmd, cwd=cwd, env=ee, stdout=PIPE).communicate() ################################################################################ # # start # ################################################################################ print_session('mysqldump data') ################################################################################ if __name__ == "__main__": from run_common import parse_args args = parse_args() if len(args) != 2 \ or not os.path.exists(args[1] + '/my.cnf') \ or not os.path.exists(args[1] + '/settings_local.py'): print('input the path of \'my.conf\' and \'settings_local.py\'') sys.exit() _auto_hourly_backup(args[1]) else:
if __name__ == "__main__": from run_common import parse_args parse_args() def run_create_sns_topic(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) ################################################################################ print_message('create sns topic: %s' % name) cmd = ['sns', 'create-topic'] cmd += ['--name', name] result = aws_cli.run(cmd) print('created:', result['TopicArn']) ################################################################################ # # start # ################################################################################ print_session('create sns') sns_list = env.get('sns', list()) for sns_env in sns_list: if sns_env['TYPE'] == 'topic': run_create_sns_topic(sns_env['NAME'], sns_env)
sqs_name = settings['QUEUE_NAME'] alarm_name = '%s-%s_%s_%s_%s' % (phase, name, region, sqs_name, settings['METRIC_NAME']) print_message('terminate cloudwatch alarm: %s' % alarm_name) cmd = ['cloudwatch', 'delete-alarms'] cmd += ['--alarm-names', alarm_name] aws_cli.run(cmd, ignore_error=True) ################################################################################ # # start # ################################################################################ print_session('terminate cloudwatch alarm') cw = env.get('cloudwatch', dict()) if len(args) == 2: target_cw_alarm_name = args[1] target_cw_alarm_name_exists = False for cw_alarm_env in cw.get('ALARMS', list()): if cw_alarm_env['NAME'] == target_cw_alarm_name: target_cw_alarm_name_exists = True run_terminate_cw_alarm(cw_alarm_env['NAME'], cw_alarm_env) if not target_cw_alarm_name_exists: print('"%s" is not exists in config.json' % target_cw_alarm_name) else: for cw_alarm_env in cw.get('ALARMS', list()):
cmd = ['ec2', 'create-network-interface'] cmd += ['--subnet-id', eb_subnet_id['private_1']] cmd += ['--description', cname] cmd += ['--private-ip-address', private_ip] cmd += ['--groups', eb_security_group_id['private']] result = aws_cli.run(cmd) network_interface_id = result['NetworkInterface']['NetworkInterfaceId'] aws_cli.set_name_tag(network_interface_id, '%snat' % name_prefix) ################################################################################ # # start # ################################################################################ print_session('create vpc') region = None check_exists = False if len(args) > 1: region = args[1] for vpc_env in env['vpc']: if region and vpc_env.get('AWS_DEFAULT_REGION') != region: continue check_exists = True main(vpc_env)
}) cmd = ['cloudwatch', 'put-dashboard'] cmd += ['--dashboard-name', dashboard_name] cmd += ['--dashboard-body', json.dumps({ 'widgets': widgets })] aws_cli.run(cmd) ################################################################################ # # start # ################################################################################ print_session('create cloudwatch dashboard') reset_template_dir() cw = env.get('cloudwatch', dict()) target_cw_dashboard_name = None region = None check_exists = False if len(args) > 1: target_cw_dashboard_name = args[1] if len(args) > 2: region = args[2] for cw_dashboard_env in cw.get('DASHBOARDS', list()):
print_message('invalidate cache from cloudfront') cf_dist_id = settings.get('CLOUDFRONT_DIST_ID', '') if len(cf_dist_id) > 0: path_list = list(settings['INVALIDATE_PATHS']) cmd = ['cloudfront', 'create-invalidation', '--distribution-id', cf_dist_id, '--paths', ' '.join(path_list)] invalidate_result = aws_cli.run(cmd) print(invalidate_result) ################################################################################ # # start # ################################################################################ print_session('terminate s3') s3_list = env.get('s3', list()) if len(args) == 2: target_s3_name = args[1] target_s3_name_exists = False for s3_env in s3_list: if s3_env['NAME'] == target_s3_name: target_s3_name_exists = True if s3_env['TYPE'] == 'angular-app': run_terminate_s3_webapp(s3_env['NAME'], s3_env) break if not target_s3_name_exists: print('"%s" is not exists in config.json' % target_s3_name) else: for s3_env in s3_list:
cmd += ['--policy-name', policy_name] cmd += ['--policy-document', 'file://aws_iam/aws-lambda-default-policy.json'] aws_cli.run(cmd) sleep_required = True if sleep_required: print_message('wait two minutes to let iam role and policy propagated to all regions...') time.sleep(120) ################################################################################ # # start # ################################################################################ print_session('create lambda') ################################################################################ check_template_availability() create_iam_for_lambda() lambdas_list = env['lambda'] if len(args) == 2: target_lambda_name = args[1] target_lambda_name_exists = False for lambda_env in lambdas_list: if lambda_env['NAME'] == target_lambda_name: target_lambda_name_exists = True if lambda_env['TYPE'] == 'default': run_create_lambda_default(lambda_env['NAME'], lambda_env)
cmd += ['--dimensions', ' '.join(dimension_list)] cmd += ['--evaluation-periods', settings['EVALUATION_PERIODS']] cmd += ['--metric-name', settings['METRIC_NAME']] cmd += ['--namespace', settings['NAMESPACE']] cmd += ['--period', settings['PERIOD']] cmd += ['--statistic', settings['STATISTIC']] cmd += ['--threshold', settings['THRESHOLD']] aws_cli.run(cmd) ################################################################################ # # start # ################################################################################ print_session('create cloudwatch alarm') cw = env.get('cloudwatch', dict()) target_cw_alarm_name = None region = None check_exists = False if len(args) > 1: target_cw_alarm_name = args[1] if len(args) > 2: region = args[2] for cw_alarm_env in cw.get('ALARMS', list()): if target_cw_alarm_name and cw_alarm_env['NAME'] != target_cw_alarm_name: continue
def create_iam(): ################################################################################ # # IAM # ################################################################################ print_session('create iam') aws_cli = AWSCli() ################################################################################ print_message('create iam: aws-elasticbeanstalk-ec2-role') cmd = ['iam', 'create-instance-profile'] cmd += ['--instance-profile-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd) cmd = ['iam', 'create-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--assume-role-policy-document', 'file://aws_iam/aws-elasticbeanstalk-ec2-role.json'] aws_cli.run(cmd) cmd = ['iam', 'add-role-to-instance-profile'] cmd += ['--instance-profile-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkMulticontainerDocker'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkWorkerTier'] aws_cli.run(cmd) cmd = ['iam', 'put-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-name', 'aws-elasticbeanstalk-ec2-policy'] cmd += ['--policy-document', 'file://aws_iam/aws-elasticbeanstalk-ec2-policy.json'] aws_cli.run(cmd) ################################################################################ print_message('create iam: aws-elasticbeanstalk-service-role') cmd = ['iam', 'create-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--assume-role-policy-document', 'file://aws_iam/aws-elasticbeanstalk-service-role.json'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService'] aws_cli.run(cmd)
ee['AWS_DEFAULT_REGION'] = AWS_DEFAULT_REGION ee['AWS_SECRET_ACCESS_KEY'] = BILLING_AWS_SECRET_ACCESS_KEY s3_filename = '/'.join(['s3://' + AWS_S3_BACKUP_BUCKET, PHASE + '-' + yyyymmdd, filename]) cmd = ['aws', 's3', 'cp', filename, s3_filename] subprocess.Popen(cmd, cwd=cwd, env=ee, stdout=PIPE).communicate() ################################################################################ # # start # ################################################################################ print_session('mysqldump data') ################################################################################ if __name__ == "__main__": from run_common import parse_args args = parse_args() if len(args) != 2 \ or not os.path.exists(args[1] + '/my_replica.cnf') \ or not os.path.exists(args[1] + '/settings_local.py'): print('input the path of \'my.conf\' and \'settings_local.py\'') sys.exit() _auto_hourly_backup(args[1]) else: