def run_create_cloudwatch_alarm_rds(name, settings): phase = env['common']['PHASE'] alarm_region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(alarm_region) alarm_name = '%s-%s_%s_%s' % (phase, name, alarm_region, settings['METRIC_NAME']) print_message('create or update cloudwatch alarm: %s' % alarm_name) topic_arn = aws_cli.get_topic_arn(settings['SNS_TOPIC_NAME']) if not topic_arn: print('sns topic: "%s" is not exists in %s' % (settings['SNS_TOPIC_NAME'], alarm_region)) raise Exception() dimension_list = list() if settings['DIMENSIONS'] == 'DBClusterIdentifier': db_cluster_id = env['rds']['DB_CLUSTER_ID'] dimension = 'Name=DBClusterIdentifier,Value=%s' % db_cluster_id dimension_list.append(dimension) cmd = ['cloudwatch', 'put-metric-alarm'] cmd += ['--alarm-actions', topic_arn] cmd += ['--alarm-description', settings['DESCRIPTION']] cmd += ['--alarm-name', alarm_name] cmd += ['--comparison-operator', settings['COMPARISON_OPERATOR']] cmd += ['--datapoints-to-alarm', settings['DATAPOINTS_TO_ALARM']] cmd += ['--dimensions', ' '.join(dimension_list)] cmd += ['--evaluation-periods', settings['EVALUATION_PERIODS']] cmd += ['--metric-name', settings['METRIC_NAME']] cmd += ['--namespace', settings['NAMESPACE']] cmd += ['--period', settings['PERIOD']] cmd += ['--statistic', settings['STATISTIC']] cmd += ['--threshold', settings['THRESHOLD']] aws_cli.run(cmd)
def create_route53(name, type, dns_name, alias_hostzon_id): rrs = dict() if alias_hostzon_id: rrs = { "Name": name, "Type": type, "AliasTarget": { "HostedZoneId": alias_hostzon_id, "DNSName": dns_name, "EvaluateTargetHealth": False } } else: rrs = { "Name": name, "Type": type, "ResourceRecord": [{ "Value": dns_name }] } dd = dict() dd['Changes'] = [{"Action": "CREATE", "ResourceRecordSet": rrs}] aws_cli = AWSCli() cmd = ['route53', 'change-resource-record-sets'] cmd += ['--change-batch', json.dumps(dd)] rr = aws_cli.run(cmd) return rr
def run_terminate_acm_certificate(arn): print_session('terminate acm certificate') aws_cli = AWSCli() cmd = ['acm', 'delete-certificate'] cmd += ['--certificate-arn', arn] aws_cli.run(cmd)
def _manual_backup(): aws_cli = AWSCli() ################################################################################ print_session('dump mysql data') engine = env['rds']['ENGINE'] if engine not in ('mysql', 'aurora'): print('not supported:', engine) raise Exception() ################################################################################ print_message('get database address') if env['common']['PHASE'] != 'dv': host = aws_cli.get_rds_address(read_replica=True) else: while True: answer = input('Do you use a database of Vagrant VM? (yes/no): ') if answer.lower() == 'no': host = aws_cli.get_rds_address(read_replica=True) break if answer.lower() == 'yes': host = 'dv-database.hbsmith.io' break database = env['rds']['DATABASE'] password = env['rds']['USER_PASSWORD'] user = env['rds']['USER_NAME'] template_name = env['template']['NAME'] filename_path = 'template/%s/rds/mysql_data.sql' % template_name _mysql_dump(host, user, password, database, filename_path)
def _put_policy_replication_bucket(replication_bucket_name, origin_bucket_account_id): aws_cli = AWSCli(aws_access_key=args.replication_aws_access_key, aws_secret_access_key=args.replication_aws_secret_key) s3_policy = { "Version": "2012-10-17", "Statement": [{ "Sid": "1", "Effect": "Allow", "Principal": { "AWS": f"arn:aws:iam::{origin_bucket_account_id}:root" }, "Action": [ "s3:GetBucketVersioning", "s3:PutBucketVersioning", "s3:ReplicateObject", "s3:ReplicateDelete", "s3:ObjectOwnerOverrideToBucketOwner" ], "Resource": [ f"arn:aws:s3:::{replication_bucket_name}", f"arn:aws:s3:::{replication_bucket_name}/*" ] }] } cmd = [ 's3api', 'put-bucket-policy', '--bucket', replication_bucket_name, '--policy', json.dumps(s3_policy) ] aws_cli.run(cmd)
def terminate_iam_for_appstream(): aws_cli = AWSCli() role_name = 'AmazonAppStreamServiceAccess' print_message('delete iam role') # noinspection PyShadowingNames cc = ['iam', 'detach-role-policy'] cc += ['--role-name', role_name] cc += [ '--policy-arn', 'arn:aws:iam::aws:policy/service-role/AmazonAppStreamServiceAccess' ] aws_cli.run(cc, ignore_error=True) # noinspection PyShadowingNames cc = ['iam', 'delete-role'] cc += ['--role-name', role_name] aws_cli.run(cc, ignore_error=True) role_name = 'ApplicationAutoScalingForAmazonAppStreamAccess' # noinspection PyShadowingNames cc = ['iam', 'detach-role-policy'] cc += ['--role-name', role_name] cc += [ '--policy-arn', 'arn:aws:iam::aws:policy/service-role/ApplicationAutoScalingForAmazonAppStreamAccess' ] aws_cli.run(cc, ignore_error=True) # noinspection PyShadowingNames cc = ['iam', 'delete-role'] cc += ['--role-name', role_name] aws_cli.run(cc, ignore_error=True)
def associate_fleet(stack_name, fleet_name, fleet_region): aws_cli = AWSCli(fleet_region) cmd = ['appstream', 'associate-fleet'] cmd += ['--fleet-name', fleet_name] cmd += ['--stack-name', stack_name] return aws_cli.run(cmd)
def run_create_s3_script_file_lifecycle(args): aws_cli = AWSCli() bucket_name = args.bucket_name expire_days = int(args.expire_days) print_message('set life cycle rule') cc = { "Rules": [{ "Expiration": { "Days": expire_days }, "ID": "script_file_manage_rule", "Filter": { "Prefix": "_trash/" }, "Status": "Enabled", "NoncurrentVersionExpiration": { "NoncurrentDays": expire_days }, "AbortIncompleteMultipartUpload": { "DaysAfterInitiation": 7 } }] } cmd = [ 's3api', 'put-bucket-lifecycle-configuration', '--bucket', bucket_name ] cmd += ['--lifecycle-configuration', json.dumps(cc)] aws_cli.run(cmd)
def wait_rds_available(): aws_cli = AWSCli() elapsed_time = 0 is_waiting = True while is_waiting: cmd = ['rds', 'describe-db-instances'] rr = aws_cli.run(cmd) is_available = True for r in rr['DBInstances']: if r['DBInstanceStatus'] != 'available': is_available = False break if is_available: return if elapsed_time > 1200: raise Exception('timeout: wait rds available') sleep(5) print('wait rds available... (elapsed time: \'%d\' seconds)' % elapsed_time) elapsed_time += 5
def create_route53(name, host_zone_name, type, domain_name, alias=None): rrs = dict() if alias: if alias['TYPE'] == 'cloudfront': rr = find_cloudfront(domain_name) domain_name = rr['DomainName'] rrs = { "Name": name, "Type": type, "AliasTarget": { "HostedZoneId": alias['HOSTED_ZONE_ID'], "DNSName": domain_name, "EvaluateTargetHealth": False } } else: rrs = { "Name": name, "Type": type, "ResourceRecord": [{ "Value": domain_name }] } dd = dict() dd['Changes'] = [{"Action": "CREATE", "ResourceRecordSet": rrs}] aws_cli = AWSCli() cmd = ['route53', 'change-resource-record-sets'] id = find_host_zone_id(host_zone_name) cmd += ['--hosted-zone-id', id] cmd += ['--change-batch', json.dumps(dd)] rr = aws_cli.run(cmd) return rr
def run_create_cloudwatch_alarm_sqs(name, settings): phase = env['common']['PHASE'] alarm_region = settings['AWS_DEFAULT_REGION'] sqs_name = settings['QUEUE_NAME'] aws_cli = AWSCli(alarm_region) alarm_name = '%s-%s_%s_%s_%s' % (phase, name, alarm_region, sqs_name, settings['METRIC_NAME']) print_message('create or update cloudwatch alarm: %s' % alarm_name) topic_arn = aws_cli.get_topic_arn(settings['SNS_TOPIC_NAME']) if not topic_arn: print('sns topic: "%s" is not exists in %s' % (settings['SNS_TOPIC_NAME'], alarm_region)) raise Exception() dimension_list = list() if settings['DIMENSIONS'] == 'QueueName': dimension = 'Name=QueueName,Value=%s' % sqs_name dimension_list.append(dimension) cmd = ['cloudwatch', 'put-metric-alarm'] cmd += ['--alarm-actions', topic_arn] cmd += ['--alarm-description', settings['DESCRIPTION']] cmd += ['--alarm-name', alarm_name] cmd += ['--comparison-operator', settings['COMPARISON_OPERATOR']] cmd += ['--datapoints-to-alarm', settings['DATAPOINTS_TO_ALARM']] cmd += ['--dimensions', ' '.join(dimension_list)] cmd += ['--evaluation-periods', settings['EVALUATION_PERIODS']] cmd += ['--metric-name', settings['METRIC_NAME']] cmd += ['--namespace', settings['NAMESPACE']] cmd += ['--period', settings['PERIOD']] cmd += ['--statistic', settings['STATISTIC']] cmd += ['--threshold', settings['THRESHOLD']] aws_cli.run(cmd)
def run_create_cloudwatch_alarm_sns(name, settings): phase = env['common']['PHASE'] alarm_region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(alarm_region) alarm_name = '%s-%s_%s_%s' % (phase, name, alarm_region, settings['METRIC_NAME']) print_message('create or update cloudwatch alarm: %s' % alarm_name) topic_arn = aws_cli.get_topic_arn(settings['SNS_TOPIC_NAME']) if not topic_arn: print('sns topic: "%s" is not exists in %s' % (settings['SNS_TOPIC_NAME'], alarm_region)) raise Exception() dimensions = list() for ii in settings['DIMENSIONS']: dd = dict() dd['Name'] = ii['name'] dd['Value'] = ii['value'] dimensions.append(dd) cmd = ['cloudwatch', 'put-metric-alarm'] cmd += ['--alarm-actions', topic_arn] cmd += ['--alarm-description', settings['DESCRIPTION']] cmd += ['--alarm-name', alarm_name] cmd += ['--comparison-operator', settings['COMPARISON_OPERATOR']] cmd += ['--datapoints-to-alarm', settings['DATAPOINTS_TO_ALARM']] cmd += ['--dimensions', json.dumps(dimensions)] cmd += ['--evaluation-periods', settings['EVALUATION_PERIODS']] cmd += ['--metric-name', settings['METRIC_NAME']] cmd += ['--namespace', settings['NAMESPACE']] cmd += ['--period', settings['PERIOD']] cmd += ['--statistic', settings['STATISTIC']] cmd += ['--threshold', settings['THRESHOLD']] aws_cli.run(cmd)
def get_build_id(name): aws_cli = AWSCli() cmd = ['codebuild', 'list-builds-for-project'] cmd += ['--project-name', name] rr = aws_cli.run(cmd) build_id = rr['ids'][0] return build_id
def disassociate_fleet(fleet_name, stack_name, fleet_region): aws_cli = AWSCli(fleet_region) cmd = ['appstream', 'disassociate-fleet'] cmd += ['--fleet-name', fleet_name] cmd += ['--stack-name', stack_name] rr = aws_cli.run(cmd, ignore_error=True) return bool(rr)
def find_cloudfront(domain_name): aws_cli = AWSCli() cmd = ['cloudfront', 'list-distributions'] rr = aws_cli.run(cmd) for vv in rr['DistributionList']['Items']: if 'Items' in vv['Aliases'] and domain_name in vv['Aliases']['Items']: return vv
def run_create_cloudwatch_alarm_lambda(name, settings): phase = env['common']['PHASE'] alarm_region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(alarm_region) alarm_name = '%s-%s_%s_%s' % (phase, name, alarm_region, 'NotSuccessIn5Min') topic_arn = aws_cli.get_topic_arn(settings['SNS_TOPIC_NAME']) if not topic_arn: print('sns topic: "%s" is not exists in %s' % (settings['SNS_TOPIC_NAME'], alarm_region)) raise Exception() dimensions = list() dimensions.append({"Name": "FunctionName", "Value": name}) dimensions.append({"Name": "Resource", "Value": name}) metrics = list() dd = dict() dd['Id'] = 'availability' dd['Expression'] = '100 - 100 * errors / invocations' dd['Label'] = 'Success rate (%)' dd['ReturnData'] = True metrics.append(dd) dd = dict() dd['Id'] = 'errors' dd['MetricStat'] = dict() dd['MetricStat']['Metric'] = dict() dd['MetricStat']['Metric']['Dimensions'] = dimensions dd['MetricStat']['Metric']['MetricName'] = 'Errors' dd['MetricStat']['Metric']['Namespace'] = 'AWS/Lambda' dd['MetricStat']['Period'] = 60 * 15 dd['MetricStat']['Stat'] = 'Sum' dd['ReturnData'] = False metrics.append(dd) dd = dict() dd['Id'] = 'invocations' dd['MetricStat'] = dict() dd['MetricStat']['Metric'] = dict() dd['MetricStat']['Metric']['Dimensions'] = dimensions dd['MetricStat']['Metric']['MetricName'] = 'Invocations' dd['MetricStat']['Metric']['Namespace'] = 'AWS/Lambda' dd['MetricStat']['Period'] = 60 * 15 dd['MetricStat']['Stat'] = 'Sum' dd['ReturnData'] = False metrics.append(dd) cmd = ['cloudwatch', 'put-metric-alarm'] cmd += ['--alarm-actions', topic_arn] cmd += ['--alarm-description', settings['DESCRIPTION']] cmd += ['--alarm-name', alarm_name] cmd += ['--comparison-operator', settings['COMPARISON_OPERATOR']] cmd += ['--datapoints-to-alarm', settings['DATAPOINTS_TO_ALARM']] cmd += ['--evaluation-periods', settings['EVALUATION_PERIODS']] cmd += ['--metrics', json.dumps(metrics)] cmd += ['--threshold', settings['THRESHOLD']] aws_cli.run(cmd)
def run_export_cloudwatch_dashboard(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) dashboard_name = '%s_%s' % (name, region) print_message('export cloudwatch dashboard: %s' % dashboard_name) cmd = ['cloudwatch', 'get-dashboard'] cmd += ['--dashboard-name', dashboard_name] result = aws_cli.run(cmd) service_type = settings['TYPE'] dashboard_body = json.loads(result['DashboardBody']) for dw in dashboard_body['widgets']: pm = dw['properties'].get('metrics') if not pm: return pm = pm[:1] prev = '' current_index = 0 if len(pm) < 1: return for dimension in pm[0]: if service_type == 'elasticbeanstalk': if prev == 'AutoScalingGroupName': pm[0][current_index] = 'AUTO_SCALING_GROUP_NAME' if prev == 'EnvironmentName': pm[0][current_index] = 'ENVIRONMENT_NAME' if prev == 'InstanceId': pm[0][current_index] = 'INSTANCE_ID' if prev == 'LoadBalancerName': pm[0][current_index] = 'LOAD_BALANCER_NAME' if prev == 'LoadBalancer': pm[0][current_index] = 'LOAD_BALANCER' if prev == 'TargetGroup': pm[0][current_index] = 'TARGET_GROUP' if type(dimension) == dict and 'label' in dimension \ and re.match(r'^%s-[0-9]{10}$' % name, dimension['label']): dimension['label'] = 'ENVIRONMENT_NAME' if service_type == 'rds/aurora': if prev == 'Role': pm[0][current_index] = 'ROLE' if prev == 'DBClusterIdentifier': pm[0][current_index] = 'DB_CLUSTER_IDENTIFIER' if prev == 'DbClusterIdentifier': pm[0][current_index] = 'DB_CLUSTER_IDENTIFIER' prev = dimension current_index += 1 dw['properties']['metrics'] = pm template_name = env['template']['NAME'] filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name) with open(filename_path, 'w') as ff: json.dump(dashboard_body, ff, sort_keys=True, indent=2)
def start_codebuild(name, phase=None): aws_cli = AWSCli() cmd = ['codebuild', 'start-build'] cmd += ['--project-name', name] if phase: cmd += ['--source-version', phase] rr = aws_cli.run(cmd) return rr['build']['id']
def run_create_cw_dashboard_rds_aurora(name, settings): if not env.get('rds'): print_message('No RDS settings in config.json') return if env['rds'].get('ENGINE') != 'aurora': print_message('Only RDS Aurora supported') dashboard_region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(dashboard_region) cluster_id = env['rds']['DB_CLUSTER_ID'] instance_role_list = list() instance_role_list.append('WRITER') instance_role_list.append('READER') dashboard_name = '%s_%s' % (name, dashboard_region) print_message('create or update cloudwatch dashboard: %s' % dashboard_name) template_name = env['template']['NAME'] filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name) with open(filename_path, 'r') as ff: dashboard_body = json.load(ff) for dw in dashboard_body['widgets']: pm = dw['properties']['metrics'] cluster_id_only = True for dimension in pm[0]: if dimension == 'Role': cluster_id_only = False template = json.dumps(pm[0]) new_metrics_list = list() if cluster_id_only: new_metric = template.replace('DB_CLUSTER_IDENTIFIER', cluster_id) new_metric = json.loads(new_metric) new_metrics_list.append(new_metric) else: for ir in instance_role_list: new_metric = template.replace('DB_CLUSTER_IDENTIFIER', cluster_id) new_metric = new_metric.replace('ROLE', ir) new_metric = json.loads(new_metric) new_metrics_list.append(new_metric) dw['properties']['metrics'] = new_metrics_list dashboard_body = json.dumps(dashboard_body) cmd = ['cloudwatch', 'put-dashboard'] cmd += ['--dashboard-name', dashboard_name] cmd += ['--dashboard-body', dashboard_body] aws_cli.run(cmd)
def run_terminate_cw_dashboard(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) dashboard_name = f'{name}_{region}' print_message(f'terminate cloudwatch dashboard: {dashboard_name}') cmd = ['cloudwatch', 'delete-dashboards'] cmd += ['--dashboard-names', dashboard_name] aws_cli.run(cmd)
def run_terminate_default_lambda(function_name, settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) ################################################################################ print_session('terminate lambda: %s' % function_name) print_message('delete lambda function') cmd = ['lambda', 'delete-function', '--function-name', function_name] aws_cli.run(cmd, ignore_error=True)
def run_terminate_cw_dashboard(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) dashboard_name = '%s_%s' % (name, region) print_message('terminate cloudwatch dashboard: %s' % dashboard_name) cmd = ['cloudwatch', 'delete-dashboards'] cmd += ['--dashboard-names', dashboard_name] aws_cli.run(cmd)
def run_terminate_cloudwatch_dashboard(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) dashboard_name = '%s_%s' % (name, region) print_message('terminate cloudwatch dashboard: %s' % dashboard_name) cmd = ['cloudwatch', 'delete-dashboards'] cmd += ['--dashboard-names', dashboard_name] aws_cli.run(cmd)
def run_terminate_github_codebuild(name, settings): print_message('delete github codebuild %s' % name) print_message('delete github codebuild(webhook) %s' % name) aws_default_region = settings.get('AWS_DEFAULT_REGION') _aws_cli = AWSCli(aws_default_region) cmd = ['codebuild', 'delete-webhook'] cmd += ['--project-name', name] _aws_cli.run(cmd, ignore_error=True) for cc in settings.get('CRON', list()): rule_name = '%sCronRuleSourceBy%s' % (name, cc['SOURCE_VERSION'].title()) terminate_cron_event(_aws_cli, rule_name) print_message('delete github codebuild(project) %s' % name) cmd = ['codebuild', 'delete-project'] cmd += ['--name', name] _aws_cli.run(cmd, ignore_error=True) print_message('delete github codebuild(environment variable) %s' % name) cmd = ['ssm', 'get-parameters-by-path'] cmd += ['--path', '/CodeBuild/%s' % name] result = _aws_cli.run(cmd) if 'Parameters' in result: for rr in result['Parameters']: cmd = ['ssm', 'delete-parameter'] cmd += ['--name', rr['Name']] _aws_cli.run(cmd, ignore_error=True) terminate_iam_for_codebuild(name.replace('_', '-'))
def find_host_zone_id(host_zone_name): print_session('find_host_zone_id') aws_cli = AWSCli() cmd = ['route53', 'list-hosted-zones-by-name'] cmd += ['--dns-name', host_zone_name] rr = aws_cli.run(cmd) if not (len(rr['HostedZones']) == 1): raise Exception('wrong host zone') return rr['HostedZones'][0]['Id']
def delete_cloudfront(id): aws_cli = AWSCli() cmd = ['cloudfront', 'get-distribution'] cmd += ['--id', id] rr = aws_cli.run(cmd) e_tag = rr['ETag'] cmd = ['cloudfront', 'delete-distribution'] cmd += ['--id', id] cmd += ['--if-match', e_tag] rr = aws_cli.run(cmd)
def run_create_sns_topic(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) ################################################################################ print_message('create sns topic: %s' % name) cmd = ['sns', 'create-topic'] cmd += ['--name', name] result = aws_cli.run(cmd) print('created:', result['TopicArn'])
def run_terminate_cloudwatch_alarm(name, settings): phase = env['common']['PHASE'] region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) ################################################################################ alarm_name = '%s-%s_%s_%s' % (phase, name, region, settings['METRIC_NAME']) print_message('terminate cloudwatch alarm: %s' % alarm_name) cmd = ['cloudwatch', 'delete-alarms'] cmd += ['--alarm-names', alarm_name] aws_cli.run(cmd, ignore_error=True)
def run_create_acm_certificate(domain_name, additional_names, validation_method): print_session('create acm certificate') aws_cli = AWSCli() cmd = ['acm', 'request-certificate'] cmd += ['--domain-name', domain_name] if additional_names: cmd += ['--subject-alternative-names', ' '.join(additional_names)] cmd += ['--validation-method', validation_method] aws_cli.run(cmd)
def is_exist_cname(cname_list): aws_cli = AWSCli() cmd = ['cloudfront', 'list-distributions'] rr = aws_cli.run(cmd) for vv in rr['DistributionList']['Items']: for cc in cname_list: if 'Items' in vv['Aliases'] and cc in vv['Aliases']['Items']: print_message('exist cname(%s)' % cc) return True return False
def run_terminate_s3_script_bucket_lifecycle(name): aws_cli = AWSCli() bucket_name = args.bucket_name ################################################################################ print_session('terminate %s' % name) ################################################################################ print_message('delete life cycle') cmd = ['s3api', 'delete-bucket-lifecycle', '--bucket', bucket_name] aws_cli.run(cmd, ignore_error=True)
def create_image_builder(name, subnet_ids, security_group_id, image_name): vpc_config = 'SubnetIds=%s,SecurityGroupIds=%s' % (subnet_ids, security_group_id) aws_cli = AWSCli() cmd = ['appstream', 'create-image-builder'] cmd += ['--name', name] cmd += ['--instance-type', 'stream.standard.medium'] cmd += ['--image-name', image_name] cmd += ['--vpc-config', vpc_config] cmd += ['--no-enable-default-internet-access'] aws_cli.run(cmd)
def run_terminate_sns_tpoic(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) ################################################################################ print_message('terminate sns topic: "%s" in %s' % (name, region)) topic_arn = aws_cli.get_topic_arn(name) if not topic_arn: return cmd = ['sns', 'delete-topic'] cmd += ['--topic-arn', topic_arn] aws_cli.run(cmd)
def run_export_cloudwatch_dashboard_sqs_lambda(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) dashboard_name = '%s_%s' % (name, region) print_message('export cloudwatch dashboard: %s' % dashboard_name) cmd = ['cloudwatch', 'get-dashboard'] cmd += ['--dashboard-name', dashboard_name] result = aws_cli.run(cmd) dashboard_body = json.loads(result['DashboardBody']) for dw in dashboard_body['widgets']: pm = dw['properties']['metrics'] first_pm = pm[:1] prev = '' current_index = 0 if len(first_pm) < 1: return for dimension in first_pm[0]: if prev == 'QueueName': queue_name = first_pm[0][current_index] if queue_name.startswith('dv-'): queue_name = queue_name.replace('dv-', 'PHASE-') if queue_name.startswith('qa-'): queue_name = queue_name.replace('qa-', 'PHASE-') if queue_name.startswith('op-'): queue_name = queue_name.replace('op-', 'PHASE-') first_pm[0][current_index] = queue_name prev = dimension current_index += 1 dw['properties']['metrics'] = pm title = dw['properties']['title'] if title.startswith('SQS: dv-'): title = title.replace('SQS: dv-', 'SQS: PHASE-') if title.startswith('SQS: qa-'): title = title.replace('SQS: qa-', 'SQS: PHASE-') if title.startswith('SQS: op-'): title = title.replace('SQS: op-', 'SQS: PHASE-') dw['properties']['title'] = title template_name = env['template']['NAME'] filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name) with open(filename_path, 'w') as ff: json.dump(dashboard_body, ff, sort_keys=True, indent=2)
def run_terminate_cw_alarm(name, settings): phase = env['common']['PHASE'] region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) ################################################################################ alarm_name = '%s-%s_%s_%s' % (phase, name, region, settings['METRIC_NAME']) if settings['TYPE'] == 'sqs': sqs_name = settings['QUEUE_NAME'] alarm_name = '%s-%s_%s_%s_%s' % (phase, name, region, sqs_name, settings['METRIC_NAME']) print_message('terminate cloudwatch alarm: %s' % alarm_name) cmd = ['cloudwatch', 'delete-alarms'] cmd += ['--alarm-names', alarm_name] aws_cli.run(cmd, ignore_error=True)
def run_export_cloudwatch_dashboard(name, settings): region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(region) dashboard_name = '%s_%s' % (name, region) print_message('export cloudwatch dashboard: %s' % dashboard_name) cmd = ['cloudwatch', 'get-dashboard'] cmd += ['--dashboard-name', dashboard_name] result = aws_cli.run(cmd) service_type = settings['TYPE'] dashboard_body = json.loads(result['DashboardBody']) for dw in dashboard_body['widgets']: pm = dw['properties']['metrics'] pm = pm[:1] prev = '' current_index = 0 if len(pm) < 1: return for dimension in pm[0]: if prev == 'InstanceId' and service_type == 'elasticbeanstalk': pm[0][current_index] = 'INSTANCE_ID' if prev == 'AutoScalingGroupName' and service_type == 'elasticbeanstalk': pm[0][current_index] = 'AUTO_SCALING_GROUP_NAME' if prev == 'LoadBalancerName' and service_type == 'elasticbeanstalk': pm[0][current_index] = 'LOAD_BALANCER_NAME' if prev == 'EnvironmentName' and service_type == 'elasticbeanstalk': pm[0][current_index] = 'ENVIRONMENT_NAME' if prev == 'Role' and service_type == 'rds/aurora': pm[0][current_index] = 'ROLE' if prev == 'DBClusterIdentifier' and service_type == 'rds/aurora': pm[0][current_index] = 'DB_CLUSTER_IDENTIFIER' if prev == 'DbClusterIdentifier' and service_type == 'rds/aurora': pm[0][current_index] = 'DB_CLUSTER_IDENTIFIER' prev = dimension current_index += 1 dw['properties']['metrics'] = pm template_name = env['template']['NAME'] filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name) with open(filename_path, 'w') as ff: json.dump(dashboard_body, ff, sort_keys=True, indent=2)
def run_create_cw_dashboard_sqs_lambda(name, settings): phase = env['common']['PHASE'] dashboard_region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(dashboard_region) dashboard_name = '%s_%s' % (name, dashboard_region) print_message('create or update cloudwatch dashboard: %s' % dashboard_name) template_name = env['template']['NAME'] filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name) with open(filename_path, 'r') as ff: dashboard_body = json.load(ff) for dw in dashboard_body['widgets']: pm = dw['properties']['metrics'] current_index = 0 for pp in pm: template = json.dumps(pp) template = template.replace('PHASE-', '%s-' % phase) pm[current_index] = json.loads(template) current_index += 1 dw['properties']['metrics'] = pm title = dw['properties']['title'] if title.startswith('SQS: PHASE-'): title = title.replace('SQS: PHASE-', 'SQS: %s-' % phase) dw['properties']['title'] = title dashboard_body = json.dumps(dashboard_body) cmd = ['cloudwatch', 'put-dashboard'] cmd += ['--dashboard-name', dashboard_name] cmd += ['--dashboard-body', dashboard_body] aws_cli.run(cmd)
# # start # ################################################################################ print_session('terminate old environment version') timestamp = int(time.time()) max_age_seconds = 60 * 60 * 24 * 3 ################################################################################ print_message('terminate old environment version (current timestamp: %d)' % timestamp) eb_application_name = env['elasticbeanstalk']['APPLICATION_NAME'] for vpc_env in env['vpc']: aws_cli = AWSCli(vpc_env['AWS_DEFAULT_REGION']) aws_default_region = vpc_env['AWS_DEFAULT_REGION'] cmd = ['elasticbeanstalk', 'describe-application-versions'] cmd += ['--application-name', eb_application_name] result = aws_cli.run(cmd) for r in result['ApplicationVersions']: if r['Status'] in ('PROCESSING', 'BUILDING'): continue print('') print('ApplicationName:', r['ApplicationName']) print('VersionLabel:', r['VersionLabel']) print('Status:', r['Status']) print('')
def run_create_lambda_sns(name, settings): aws_cli = AWSCli() description = settings['DESCRIPTION'] function_name = settings['NAME'] phase = env['common']['PHASE'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] git_hash_template = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_path).communicate()[0] ################################################################################ topic_arn_list = list() for sns_topic_name in settings['SNS_TOPICS_NAMES']: print_message('check topic exists: %s' % sns_topic_name) region, topic_name = sns_topic_name.split('/') topic_arn = AWSCli(region).get_topic_arn(topic_name) if not topic_arn: print('sns topic: "%s" is not exists in %s' % (settings['SNS_TOPIC_NAME'], region)) raise Exception() topic_arn_list.append(topic_arn) ################################################################################ print_session('packaging lambda: %s' % function_name) print_message('cleanup generated files') subprocess.Popen(['git', 'clean', '-d', '-f', '-x'], cwd=deploy_folder).communicate() requirements_path = '%s/requirements.txt' % deploy_folder if os.path.exists(requirements_path): print_message('install dependencies') cmd = ['pip3', 'install', '-r', requirements_path, '-t', deploy_folder] subprocess.Popen(cmd).communicate() settings_path = '%s/settings_local_sample.py' % deploy_folder if os.path.exists(settings_path): print_message('create environment values') lines = read_file(settings_path) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/settings_local.py' % deploy_folder, lines) print_message('zip files') cmd = ['zip', '-r', 'deploy.zip', '.'] subprocess.Popen(cmd, cwd=deploy_folder).communicate() print_message('create lambda function') role_arn = aws_cli.get_role_arn('aws-lambda-default-role') tags = list() # noinspection PyUnresolvedReferences tags.append('git_hash_johanna=%s' % git_hash_johanna.decode('utf-8').strip()) # noinspection PyUnresolvedReferences tags.append('git_hash_%s=%s' % (template_name, git_hash_template.decode('utf-8').strip())) ################################################################################ print_message('check previous version') need_update = False cmd = ['lambda', 'list-functions'] result = aws_cli.run(cmd) for ff in result['Functions']: if function_name == ff['FunctionName']: need_update = True break ################################################################################ if need_update: print_session('update lambda: %s' % function_name) cmd = ['lambda', 'update-function-code', '--function-name', function_name, '--zip-file', 'fileb://deploy.zip'] result = aws_cli.run(cmd, cwd=deploy_folder) function_arn = result['FunctionArn'] print_message('update lambda tags') cmd = ['lambda', 'tag-resource', '--resource', function_arn, '--tags', ','.join(tags)] aws_cli.run(cmd, cwd=deploy_folder) return ################################################################################ print_session('create lambda: %s' % function_name) cmd = ['lambda', 'create-function', '--function-name', function_name, '--description', description, '--zip-file', 'fileb://deploy.zip', '--role', role_arn, '--handler', 'lambda.handler', '--runtime', 'python3.6', '--tags', ','.join(tags), '--timeout', '120'] result = aws_cli.run(cmd, cwd=deploy_folder) function_arn = result['FunctionArn'] for topic_arn in topic_arn_list: print_message('create subscription') topic_region = topic_arn.split(':')[3] cmd = ['sns', 'subscribe', '--topic-arn', topic_arn, '--protocol', 'lambda', '--notification-endpoint', function_arn] AWSCli(topic_region).run(cmd) print_message('Add permission to lambda') statement_id = '%s_%s_Permission' % (function_name, topic_region) cmd = ['lambda', 'add-permission', '--function-name', function_name, '--statement-id', statement_id, '--action', 'lambda:InvokeFunction', '--principal', 'sns.amazonaws.com', '--source-arn', topic_arn] aws_cli.run(cmd) print_message('update tag with subscription info') cmd = ['lambda', 'tag-resource', '--resource', function_arn, '--tags', ','.join(tags)] aws_cli.run(cmd, cwd=deploy_folder)
#!/usr/bin/env python3 from run_common import AWSCli from run_common import print_message from run_common import print_session if __name__ == "__main__": from run_common import parse_args parse_args() aws_cli = AWSCli() ################################################################################ # # start # ################################################################################ print_session('terminate sqs') ################################################################################ print_message('load queue lists') cmd = ['sqs', 'list-queues'] command_result = aws_cli.run(cmd) if 'QueueUrls' in command_result: sqs = command_result['QueueUrls'] print_message('delete queues') for sqs_env in sqs: cmd = ['sqs', 'delete-queue']
def terminate_iam(): ################################################################################ # # IAM # ################################################################################ print_session('terminate iam') aws_cli = AWSCli() ################################################################################ print_message('terminate iam: aws-elasticbeanstalk-service-role') cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'delete-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('terminate iam: aws-elasticbeanstalk-ec2-role') cmd = ['iam', 'delete-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-name', 'aws-elasticbeanstalk-ec2-policy'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkWorkerTier'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkMulticontainerDocker'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'detach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'remove-role-from-instance-profile'] cmd += ['--instance-profile-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'delete-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd, ignore_error=True) cmd = ['iam', 'delete-instance-profile'] cmd += ['--instance-profile-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd, ignore_error=True)
def main(settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) rds_subnet_name = env['rds']['DB_SUBNET_NAME'] service_name = env['common'].get('SERVICE_NAME', '') name_prefix = '%s_' % service_name if service_name else '' ################################################################################ print_message('wait terminate rds') aws_cli.wait_terminate_rds() ################################################################################ print_message('wait terminate elasticache') aws_cli.wait_terminate_elasticache() ################################################################################ print_message('wait terminate eb') aws_cli.wait_terminate_eb() ################################################################################ print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() ################################################################################ print_message('delete network interface') cmd = ['ec2', 'describe-network-interfaces'] result = aws_cli.run(cmd, ignore_error=True) for r in result['NetworkInterfaces']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue network_interface_id = r['NetworkInterfaceId'] if 'Attachment' in r: attachment_id = r['Attachment']['AttachmentId'] cmd = ['ec2', 'detach-network-interface'] cmd += ['--attachment-id', attachment_id] aws_cli.run(cmd, ignore_error=True) cmd = ['ec2', 'delete-network-interface'] cmd += ['--network-interface-id', network_interface_id] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete vpc peering connection') cmd = ['ec2', 'describe-vpc-peering-connections'] result = aws_cli.run(cmd, ignore_error=True) for vpc_peer in result['VpcPeeringConnections']: if vpc_peer['RequesterVpcInfo']['VpcId'] == rds_vpc_id and vpc_peer['AccepterVpcInfo']['VpcId'] == eb_vpc_id: peering_connection_id = vpc_peer['VpcPeeringConnectionId'] print('delete vpc peering connnection (id: %s)' % peering_connection_id) cmd = ['ec2', 'delete-vpc-peering-connection'] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('revoke security group ingress') security_group_id_1 = None security_group_id_2 = None cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd, ignore_error=True) for r in result['SecurityGroups']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue if r['GroupName'] == '%seb_private' % name_prefix: security_group_id_1 = r['GroupId'] if r['GroupName'] == '%seb_public' % name_prefix: security_group_id_2 = r['GroupId'] if security_group_id_1 and security_group_id_2: cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', security_group_id_1] cmd += ['--protocol', 'all'] cmd += ['--source-group', security_group_id_2] aws_cli.run(cmd, ignore_error=True) cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', security_group_id_2] cmd += ['--protocol', 'all'] cmd += ['--source-group', security_group_id_1] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete security group') cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd, ignore_error=True) for r in result['SecurityGroups']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue if r['GroupName'] == 'default': continue print('delete security group (id: %s)' % r['GroupId']) cmd = ['ec2', 'delete-security-group'] cmd += ['--group-id', r['GroupId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete route') cmd = ['ec2', 'describe-route-tables'] result = aws_cli.run(cmd, ignore_error=True) for r in result['RouteTables']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue for route in r['Routes']: if route['DestinationCidrBlock'] == '0.0.0.0/0': print('delete route (route table id: %s)' % r['RouteTableId']) cmd = ['ec2', 'delete-route'] cmd += ['--route-table-id', r['RouteTableId']] cmd += ['--destination-cidr-block', '0.0.0.0/0'] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('disassociate route table') cmd = ['ec2', 'describe-route-tables'] result = aws_cli.run(cmd, ignore_error=True) for r in result['RouteTables']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue for association in r['Associations']: if association['Main']: continue print('disassociate route table (route table id: %s, route table association id: %s)' % (r['RouteTableId'], association['RouteTableAssociationId'])) cmd = ['ec2', 'disassociate-route-table'] cmd += ['--association-id', association['RouteTableAssociationId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete route table') cmd = ['ec2', 'describe-route-tables'] result = aws_cli.run(cmd, ignore_error=True) for r in result['RouteTables']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue if len(r['Associations']) != 0: continue print('delete route table (route table id: %s)' % r['RouteTableId']) cmd = ['ec2', 'delete-route-table'] cmd += ['--route-table-id', r['RouteTableId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete nat gateway') cmd = ['ec2', 'describe-nat-gateways'] result = aws_cli.run(cmd, ignore_error=True) for r in result['NatGateways']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue print('delete nat gateway (nat gateway id: %s)' % r['NatGatewayId']) cmd = ['ec2', 'delete-nat-gateway'] cmd += ['--nat-gateway-id', r['NatGatewayId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('wait delete nat gateway') aws_cli.wait_delete_nat_gateway(eb_vpc_id=eb_vpc_id) ################################################################################ print_message('release eip') cmd = ['ec2', 'describe-addresses'] result = aws_cli.run(cmd, ignore_error=True) for r in result['Addresses']: if 'AssociationId' in r: continue print('release address (address id: %s)' % r['AllocationId']) cmd = ['ec2', 'release-address'] cmd += ['--allocation-id', r['AllocationId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ if env.get('elasticache'): elasticache_subnet_name = env['elasticache']['CACHE_SUBNET_NAME'] print_message('delete cache subnet group') cmd = ['elasticache', 'delete-cache-subnet-group'] cmd += ['--cache-subnet-group-name', elasticache_subnet_name] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete db subnet group') cmd = ['rds', 'delete-db-subnet-group'] cmd += ['--db-subnet-group-name', rds_subnet_name] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('detach internet gateway') cmd = ['ec2', 'describe-internet-gateways'] result = aws_cli.run(cmd, ignore_error=True) for r in result['InternetGateways']: if len(r['Attachments']) != 1: continue if r['Attachments'][0]['VpcId'] != eb_vpc_id: continue print('detach internet gateway (internet gateway id: %s)' % r['InternetGatewayId']) cmd = ['ec2', 'detach-internet-gateway'] cmd += ['--internet-gateway-id', r['InternetGatewayId']] cmd += ['--vpc-id', r['Attachments'][0]['VpcId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete internet gateway') cmd = ['ec2', 'describe-internet-gateways'] result = aws_cli.run(cmd, ignore_error=True) for r in result['InternetGateways']: if len(r['Attachments']) != 0: continue print('delete internet gateway (internet gateway id: %s)' % r['InternetGatewayId']) cmd = ['ec2', 'delete-internet-gateway'] cmd += ['--internet-gateway-id', r['InternetGatewayId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete subnet') cmd = ['ec2', 'describe-subnets'] result = aws_cli.run(cmd, ignore_error=True) for r in result['Subnets']: if r['VpcId'] != rds_vpc_id and r['VpcId'] != eb_vpc_id: continue print('delete subnet (subnet id: %s)' % r['SubnetId']) cmd = ['ec2', 'delete-subnet'] cmd += ['--subnet-id', r['SubnetId']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete vpc') while rds_vpc_id or eb_vpc_id: if rds_vpc_id: print('delete vpc (vpc id: %s)' % rds_vpc_id) cmd = ['ec2', 'delete-vpc'] cmd += ['--vpc-id', rds_vpc_id] aws_cli.run(cmd, ignore_error=True) if eb_vpc_id: print('delete vpc (vpc id: %s)' % eb_vpc_id) cmd = ['ec2', 'delete-vpc'] cmd += ['--vpc-id', eb_vpc_id] aws_cli.run(cmd, ignore_error=True) rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() ################################################################################ # # EB Application # ################################################################################ print_session('terminate eb application') ################################################################################ print_message('delete application') cmd = ['elasticbeanstalk', 'delete-application'] cmd += ['--application-name', env['elasticbeanstalk']['APPLICATION_NAME']] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('delete key pair') cmd = ['ec2', 'delete-key-pair'] cmd += ['--key-name', env['common']['AWS_KEY_PAIR_NAME']] aws_cli.run(cmd, ignore_error=True)
def run_create_eb_cron_job(name, settings): aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) aws_asg_max_value = settings['AWS_ASG_MAX_VALUE'] aws_asg_min_value = settings['AWS_ASG_MIN_VALUE'] aws_default_region = settings['AWS_DEFAULT_REGION'] aws_eb_notification_email = settings['AWS_EB_NOTIFICATION_EMAIL'] cname = settings['CNAME'] debug = env['common']['DEBUG'] eb_application_name = env['elasticbeanstalk']['APPLICATION_NAME'] git_url = settings['GIT_URL'] key_pair_name = env['common']['AWS_KEY_PAIR_NAME'] phase = env['common']['PHASE'] subnet_type = settings['SUBNET_TYPE'] template_name = env['template']['NAME'] service_name = env['common'].get('SERVICE_NAME', '') name_prefix = '%s_' % service_name if service_name else '' if hasattr(settings, 'PRIVATE_IP'): private_ip = settings['PRIVATE_IP'] else: private_ip = None cidr_subnet = aws_cli.cidr_subnet str_timestamp = str(int(time.time())) zip_filename = '%s-%s.zip' % (name, str_timestamp) eb_environment_name = '%s-%s' % (name, str_timestamp) eb_environment_name_old = None template_path = 'template/%s' % template_name environment_path = '%s/elasticbeanstalk/%s' % (template_path, name) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] git_hash_template = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_path).communicate()[0] ################################################################################ print_session('create %s' % name) ################################################################################ print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() if not eb_vpc_id: print('ERROR!!! No VPC found') raise Exception() ################################################################################ print_message('get subnet id') subnet_id_1 = None subnet_id_2 = None cmd = ['ec2', 'describe-subnets'] result = aws_cli.run(cmd) for r in result['Subnets']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['public_1']: subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['public_2']: subnet_id_2 = r['SubnetId'] elif 'private' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['private_1']: subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_2']: subnet_id_2 = r['SubnetId'] else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('get security group id') security_group_id = None cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd) for r in result['SecurityGroups']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['GroupName'] == '%seb_public' % name_prefix: security_group_id = r['GroupId'] break elif 'private' == subnet_type: if r['GroupName'] == '%seb_private' % name_prefix: security_group_id = r['GroupId'] break else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('configuration %s' % name) with open('%s/configuration/phase' % environment_path, 'w') as f: f.write(phase) f.close() lines = read_file('%s/.ebextensions/%s.config.sample' % (environment_path, name)) lines = re_sub_lines(lines, 'AWS_ASG_MIN_VALUE', aws_asg_min_value) lines = re_sub_lines(lines, 'AWS_ASG_MAX_VALUE', aws_asg_max_value) lines = re_sub_lines(lines, 'AWS_EB_NOTIFICATION_EMAIL', aws_eb_notification_email) write_file('%s/.ebextensions/%s.config' % (environment_path, name), lines) ################################################################################ print_message('git clone') subprocess.Popen(['rm', '-rf', './%s' % name], cwd=environment_path).communicate() if phase == 'dv': git_command = ['git', 'clone', '--depth=1', git_url] else: git_command = ['git', 'clone', '--depth=1', '-b', phase, git_url] subprocess.Popen(git_command, cwd=environment_path).communicate() if not os.path.exists('%s/%s' % (environment_path, name)): raise Exception() git_hash_app = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd='%s/%s' % (environment_path, name)).communicate()[0] subprocess.Popen(['rm', '-rf', './%s/.git' % name], cwd=environment_path).communicate() subprocess.Popen(['rm', '-rf', './%s/.gitignore' % name], cwd=environment_path).communicate() ################################################################################ for ss in settings['SETTINGS_LOCAL_PATH']: lines = read_file('%s/%s/settings_local_sample.py' % (environment_path, ss)) lines = re_sub_lines(lines, '^(DEBUG).*', '\\1 = %s' % debug) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/%s/settings_local.py' % (environment_path, ss), lines) ################################################################################ print_message('check previous version') cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] result = aws_cli.run(cmd) for r in result['Environments']: if 'CNAME' not in r: continue if r['CNAME'] == '%s.%s.elasticbeanstalk.com' % (cname, aws_default_region): if r['Status'] == 'Terminated': continue elif r['Status'] != 'Ready': print('previous version is not ready.') raise Exception() eb_environment_name_old = r['EnvironmentName'] cname += '-%s' % str_timestamp break ################################################################################ print_message('create storage location') cmd = ['elasticbeanstalk', 'create-storage-location'] result = aws_cli.run(cmd) s3_bucket = result['S3Bucket'] s3_zip_filename = '/'.join(['s3://' + s3_bucket, eb_application_name, zip_filename]) ################################################################################ print_message('create application version') cmd = ['zip', '-r', zip_filename, '.', '.ebextensions'] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=environment_path).communicate() cmd = ['s3', 'cp', zip_filename, s3_zip_filename] aws_cli.run(cmd, cwd=environment_path) cmd = ['rm', '-rf', zip_filename] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=environment_path).communicate() cmd = ['elasticbeanstalk', 'create-application-version'] cmd += ['--application-name', eb_application_name] cmd += ['--source-bundle', 'S3Bucket="%s",S3Key="%s/%s"' % (s3_bucket, eb_application_name, zip_filename)] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=environment_path) ################################################################################ print_message('create environment %s' % name) option_settings = list() oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'EC2KeyName' oo['Value'] = key_pair_name option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'InstanceType' oo['Value'] = 't2.micro' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'IamInstanceProfile' oo['Value'] = 'aws-elasticbeanstalk-ec2-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'SecurityGroups' oo['Value'] = security_group_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'AssociatePublicIpAddress' oo['Value'] = 'true' if 'private' == subnet_type: oo['Value'] = 'false' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBScheme' oo['Value'] = '...' if 'private' == subnet_type: oo['Value'] = 'internal' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBSubnets' oo['Value'] = ','.join([subnet_id_1, subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'Subnets' oo['Value'] = ','.join([subnet_id_1, subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'VPCId' oo['Value'] = eb_vpc_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'EnvironmentType' oo['Value'] = 'LoadBalanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'ServiceRole' oo['Value'] = 'aws-elasticbeanstalk-service-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'SystemType' oo['Value'] = 'enhanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'ConfigDocument' cw_instance = dict() cw_instance['RootFilesystemUtil'] = 60 cw_instance['InstanceHealth'] = 60 cw_instance['CPUIdle'] = 60 cw = dict() cw['Instance'] = cw_instance cfg_doc = dict() cfg_doc['CloudWatchMetrics'] = cw cfg_doc['Version'] = 1 oo['Value'] = json.dumps(cfg_doc) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:cloudwatch:logs' oo['OptionName'] = 'StreamLogs' oo['Value'] = 'true' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:cloudwatch:logs' oo['OptionName'] = 'DeleteOnTerminate' oo['Value'] = 'true' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:cloudwatch:logs' oo['OptionName'] = 'RetentionInDays' oo['Value'] = '3' option_settings.append(oo) option_settings = json.dumps(option_settings) tag0 = 'Key=git_hash_johanna,Value=%s' % git_hash_johanna.decode('utf-8').strip() tag1 = 'Key=git_hash_%s,Value=%s' % (template_name, git_hash_template.decode('utf-8').strip()) tag2 = 'Key=git_hash_%s,Value=%s' % (name, git_hash_app.decode('utf-8').strip()) cmd = ['elasticbeanstalk', 'create-environment'] cmd += ['--application-name', eb_application_name] cmd += ['--cname-prefix', cname] cmd += ['--environment-name', eb_environment_name] cmd += ['--option-settings', option_settings] cmd += ['--solution-stack-name', '64bit Amazon Linux 2018.03 v2.7.1 running Python 3.6'] cmd += ['--tags', tag0, tag1, tag2] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=environment_path) elapsed_time = 0 while True: cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] cmd += ['--environment-name', eb_environment_name] result = aws_cli.run(cmd) ee = result['Environments'][0] print(json.dumps(ee, sort_keys=True, indent=4)) if ee.get('Health', '') == 'Green' and ee.get('Status', '') == 'Ready': break print('creating... (elapsed time: \'%d\' seconds)' % elapsed_time) time.sleep(5) elapsed_time += 5 if elapsed_time > 60 * 30: raise Exception() subprocess.Popen(['rm', '-rf', './%s' % name], cwd=environment_path).communicate() ################################################################################ print_message('revoke security group ingress') cmd = ['ec2', 'describe-security-groups'] cmd += ['--filters', 'Name=tag-key,Values=Name,Name=tag-value,Values=%s' % eb_environment_name] result = aws_cli.run(cmd) for ss in result['SecurityGroups']: cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', ss['GroupId']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '22'] cmd += ['--cidr', '0.0.0.0/0'] aws_cli.run(cmd, ignore_error=True) ################################################################################ if private_ip is not None: print_message('attach network interface') elapsed_time = 0 while True: cmd = ['ec2', 'describe-network-interfaces'] cmd += ['--filters', 'Name=private-ip-address,Values=%s' % private_ip] result = aws_cli.run(cmd) network_interface_id = result['NetworkInterfaces'][0]['NetworkInterfaceId'] if 'Attachment' not in result['NetworkInterfaces'][0]: cmd = ['ec2', 'describe-instances'] cmd += ['--filters', 'Name=tag-key,Values=Name,Name=tag-value,Values=%s' % eb_environment_name] result = aws_cli.run(cmd) instance_id = result['Reservations'][0]['Instances'][0]['InstanceId'] cmd = ['ec2', 'attach-network-interface'] cmd += ['--network-interface-id', network_interface_id] cmd += ['--instance-id', instance_id] cmd += ['--device-index', '1'] aws_cli.run(cmd) break attachment_id = result['NetworkInterfaces'][0]['Attachment']['AttachmentId'] cmd = ['ec2', 'detach-network-interface'] cmd += ['--attachment-id', attachment_id] aws_cli.run(cmd, ignore_error=True) print('detaching network interface... (elapsed time: \'%d\' seconds)' % elapsed_time) time.sleep(5) elapsed_time += 5 ################################################################################ print_message('swap CNAME if the previous version exists') if eb_environment_name_old: cmd = ['elasticbeanstalk', 'swap-environment-cnames'] cmd += ['--source-environment-name', eb_environment_name_old] cmd += ['--destination-environment-name', eb_environment_name] aws_cli.run(cmd)
def run_create_cw_dashboard_elasticbeanstalk(name, settings): dashboard_region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(dashboard_region) print_message('get elasticbeanstalk environment info: %s' % name) cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--no-include-deleted'] result = aws_cli.run(cmd) env_list = list() for ee in result['Environments']: ename = ee['EnvironmentName'] if ename.startswith(name): env_list.append(ee) env_instances_list = list() env_asg_list = list() env_elb_list = list() for ee in env_list: cmd = ['elasticbeanstalk', 'describe-environment-resources'] cmd += ['--environment-id', ee['EnvironmentId']] result = aws_cli.run(cmd) ee_res = result['EnvironmentResources'] for instance in ee_res['Instances']: ii = dict() ii['Id'] = instance['Id'] ii['EnvironmentName'] = ee_res['EnvironmentName'] env_instances_list.append(ii) for asg in ee_res['AutoScalingGroups']: ii = dict() ii['Name'] = asg['Name'] ii['EnvironmentName'] = ee_res['EnvironmentName'] env_asg_list.append(ii) for elb in ee_res['LoadBalancers']: ii = dict() ii['Name'] = elb['Name'] ii['EnvironmentName'] = ee_res['EnvironmentName'] env_elb_list.append(ii) ################################################################################ dashboard_name = '%s_%s' % (name, dashboard_region) print_message('create or update cloudwatch dashboard: %s' % dashboard_name) template_name = env['template']['NAME'] filename_path = 'template/%s/cloudwatch/%s.json' % (template_name, dashboard_name) with open(filename_path, 'r') as ff: dashboard_body = json.load(ff) for dw in dashboard_body['widgets']: if not dw['properties'].get('metrics'): continue pm = dw['properties']['metrics'] dimension_type = 'env' for dimension in pm[0]: if dimension == 'InstanceId': dimension_type = 'instance' elif dimension == 'AutoScalingGroupName': dimension_type = 'asg' elif dimension == 'LoadBalancerName': dimension_type = 'elb' template = json.dumps(pm[0]) new_metrics_list = list() if dimension_type == 'asg': for ii in env_asg_list: new_metric = template.replace('AUTO_SCALING_GROUP_NAME', ii['Name']) new_metric = new_metric.replace('ENVIRONMENT_NAME', ii['EnvironmentName']) new_metric = json.loads(new_metric) new_metrics_list.append(new_metric) elif dimension_type == 'instance': for ii in env_instances_list: new_metric = template.replace('INSTANCE_ID', ii['Id']) new_metric = new_metric.replace('ENVIRONMENT_NAME', ii['EnvironmentName']) new_metric = json.loads(new_metric) new_metrics_list.append(new_metric) elif dimension_type == 'elb': for ii in env_elb_list: new_metric = template.replace('LOAD_BALANCER_NAME', ii['Name']) new_metric = new_metric.replace('ENVIRONMENT_NAME', ii['EnvironmentName']) new_metric = json.loads(new_metric) new_metrics_list.append(new_metric) else: for ii in env_list: new_metric = template.replace('ENVIRONMENT_NAME', ii['EnvironmentName']) new_metric = json.loads(new_metric) new_metrics_list.append(new_metric) dw['properties']['metrics'] = new_metrics_list dashboard_body = json.dumps(dashboard_body) cmd = ['cloudwatch', 'put-dashboard'] cmd += ['--dashboard-name', dashboard_name] cmd += ['--dashboard-body', dashboard_body] aws_cli.run(cmd)
print('#' * 80) if __name__ == "__main__": from run_common import parse_args args = parse_args(True) if len(args) < 2: print_usage() sys.exit(0) command = args[1] if command == 'aws': aws_cli = AWSCli() result = aws_cli.run(args[2:], ignore_error=True) if type(result) == dict: print(json.dumps(result, sort_keys=True, indent=4)) else: print(result) sys.exit(0) if len(args) != 2: print_usage() sys.exit(0) if command not in command_list: print_usage() sys.exit(0)
def run_create_lambda_default(name, settings): aws_cli = AWSCli() description = settings['DESCRIPTION'] function_name = settings['NAME'] phase = env['common']['PHASE'] template_name = env['template']['NAME'] template_path = 'template/%s' % template_name deploy_folder = '%s/lambda/%s' % (template_path, name) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] git_hash_template = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_path).communicate()[0] ################################################################################ print_session('packaging lambda: %s' % function_name) print_message('cleanup generated files') subprocess.Popen(['git', 'clean', '-d', '-f', '-x'], cwd=deploy_folder).communicate() requirements_path = '%s/requirements.txt' % deploy_folder if os.path.exists(requirements_path): print_message('install dependencies') cmd = ['pip3', 'install', '-r', requirements_path, '-t', deploy_folder] subprocess.Popen(cmd).communicate() settings_path = '%s/settings_local_sample.py' % deploy_folder if os.path.exists(settings_path): print_message('create environment values') lines = read_file(settings_path) option_list = list() option_list.append(['PHASE', phase]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s) .*' % oo[0], '\\1 = \'%s\'' % oo[1]) write_file('%s/settings_local.py' % deploy_folder, lines) print_message('zip files') cmd = ['zip', '-r', 'deploy.zip', '.'] subprocess.Popen(cmd, cwd=deploy_folder).communicate() print_message('create lambda function') role_arn = aws_cli.get_role_arn('aws-lambda-default-role') tags = list() # noinspection PyUnresolvedReferences tags.append('git_hash_johanna=%s' % git_hash_johanna.decode('utf-8').strip()) # noinspection PyUnresolvedReferences tags.append('git_hash_%s=%s' % (template_name, git_hash_template.decode('utf-8').strip())) ################################################################################ print_message('check previous version') need_update = False cmd = ['lambda', 'list-functions'] result = aws_cli.run(cmd) for ff in result['Functions']: if function_name == ff['FunctionName']: need_update = True break ################################################################################ if need_update: print_session('update lambda: %s' % function_name) cmd = ['lambda', 'update-function-code', '--function-name', function_name, '--zip-file', 'fileb://deploy.zip'] result = aws_cli.run(cmd, cwd=deploy_folder) function_arn = result['FunctionArn'] print_message('update lambda tags') cmd = ['lambda', 'tag-resource', '--resource', function_arn, '--tags', ','.join(tags)] aws_cli.run(cmd, cwd=deploy_folder) return ################################################################################ print_session('create lambda: %s' % function_name) cmd = ['lambda', 'create-function', '--function-name', function_name, '--description', description, '--zip-file', 'fileb://deploy.zip', '--role', role_arn, '--handler', 'lambda.handler', '--runtime', 'python3.6', '--tags', ','.join(tags), '--timeout', '120'] aws_cli.run(cmd, cwd=deploy_folder)
def main(settings): aws_availability_zone_1 = settings['AWS_AVAILABILITY_ZONE_1'] aws_availability_zone_2 = settings['AWS_AVAILABILITY_ZONE_2'] aws_cli = AWSCli(settings['AWS_DEFAULT_REGION']) rds_engine = env['rds']['ENGINE'] rds_subnet_name = env['rds']['DB_SUBNET_NAME'] service_name = env['common'].get('SERVICE_NAME', '') name_prefix = '%s_' % service_name if service_name else '' cidr_vpc = aws_cli.cidr_vpc cidr_subnet = aws_cli.cidr_subnet print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() if rds_vpc_id or eb_vpc_id: print_message('VPC already exists') print('RDS: %s \n' % rds_vpc_id) print('EB: %s \n' % eb_vpc_id) print_session('finish python code') sys.exit(0) ################################################################################ # # EB Application # ################################################################################ print_session('create eb application') ################################################################################ print_message('import key pair') cmd = ['ec2', 'import-key-pair'] cmd += ['--key-name', env['common']['AWS_KEY_PAIR_NAME']] cmd += ['--public-key-material', env['common']['AWS_KEY_PAIR_MATERIAL']] aws_cli.run(cmd) ################################################################################ print_message('create application') eb_service_role_arn = aws_cli.get_iam_role('aws-elasticbeanstalk-service-role')['Role']['Arn'] config_format = '%s=%s' eb_max_count_rule = list() eb_max_count_rule.append(config_format % ('DeleteSourceFromS3', 'true')) eb_max_count_rule.append(config_format % ('Enabled', 'true')) eb_max_count_rule.append(config_format % ('MaxCount', 100)) cmd = ['elasticbeanstalk', 'create-application'] cmd += ['--application-name', env['elasticbeanstalk']['APPLICATION_NAME']] cmd += ['--resource-lifecycle-config', 'ServiceRole=%s,VersionLifecycleConfig={MaxCountRule={%s}}' % ( eb_service_role_arn, ','.join(eb_max_count_rule))] aws_cli.run(cmd) ################################################################################ # # RDS # ################################################################################ print_session('rds') ################################################################################ print_message('create vpc') cmd = ['ec2', 'create-vpc'] cmd += ['--cidr-block', cidr_vpc['rds']] result = aws_cli.run(cmd) rds_vpc_id = result['Vpc']['VpcId'] aws_cli.set_name_tag(rds_vpc_id, '%srds' % name_prefix) ################################################################################ print_message('create subnet') rds_subnet_id = dict() cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', rds_vpc_id] cmd += ['--cidr-block', cidr_subnet['rds']['private_1']] cmd += ['--availability-zone', aws_availability_zone_1] result = aws_cli.run(cmd) rds_subnet_id['private_1'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(rds_subnet_id['private_1'], '%srds_private_1' % name_prefix) cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', rds_vpc_id] cmd += ['--cidr-block', cidr_subnet['rds']['private_2']] cmd += ['--availability-zone', aws_availability_zone_2] result = aws_cli.run(cmd) rds_subnet_id['private_2'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(rds_subnet_id['private_2'], '%srds_private_2' % name_prefix) ################################################################################ print_message('create db subnet group') cmd = ['rds', 'create-db-subnet-group'] cmd += ['--db-subnet-group-name', rds_subnet_name] cmd += ['--db-subnet-group-description', rds_subnet_name] cmd += ['--subnet-ids', rds_subnet_id['private_1'], rds_subnet_id['private_2']] aws_cli.run(cmd) ################################################################################ print_message('create ' + 'route table') # [FYI] PyCharm inspects 'create route table' as SQL query. rds_route_table_id = dict() cmd = ['ec2', 'create-route-table'] cmd += ['--vpc-id', rds_vpc_id] result = aws_cli.run(cmd) rds_route_table_id['private'] = result['RouteTable']['RouteTableId'] aws_cli.set_name_tag(rds_route_table_id['private'], '%srds_private' % name_prefix) ################################################################################ print_message('associate route table') cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', rds_subnet_id['private_1']] cmd += ['--route-table-id', rds_route_table_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', rds_subnet_id['private_2']] cmd += ['--route-table-id', rds_route_table_id['private']] aws_cli.run(cmd) ################################################################################ print_message('create security group') rds_security_group_id = dict() cmd = ['ec2', 'create-security-group'] cmd += ['--group-name', '%srds' % name_prefix] cmd += ['--description', '%srds' % name_prefix] cmd += ['--vpc-id', rds_vpc_id] result = aws_cli.run(cmd) rds_security_group_id['private'] = result['GroupId'] ################################################################################ print_message('authorize security group ingress') cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', rds_security_group_id['private']] cmd += ['--protocol', 'all'] cmd += ['--source-group', rds_security_group_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', rds_security_group_id['private']] cmd += ['--protocol', 'tcp'] if rds_engine == 'aurora-postgresql': cmd += ['--port', '5432'] else: cmd += ['--port', '3306'] cmd += ['--cidr', cidr_vpc['eb']] aws_cli.run(cmd) ################################################################################ # # EB # ################################################################################ print_session('eb') ################################################################################ print_message('create vpc') cmd = ['ec2', 'create-vpc'] cmd += ['--cidr-block', cidr_vpc['eb']] result = aws_cli.run(cmd) eb_vpc_id = result['Vpc']['VpcId'] aws_cli.set_name_tag(eb_vpc_id, '%seb' % name_prefix) ################################################################################ print_message('create subnet') eb_subnet_id = dict() cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', eb_vpc_id] cmd += ['--cidr-block', cidr_subnet['eb']['private_1']] cmd += ['--availability-zone', aws_availability_zone_1] result = aws_cli.run(cmd) eb_subnet_id['private_1'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(eb_subnet_id['private_1'], '%seb_private_1' % name_prefix) cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', eb_vpc_id] cmd += ['--cidr-block', cidr_subnet['eb']['private_2']] cmd += ['--availability-zone', aws_availability_zone_2] result = aws_cli.run(cmd) eb_subnet_id['private_2'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(eb_subnet_id['private_2'], '%seb_private_2' % name_prefix) cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', eb_vpc_id] cmd += ['--cidr-block', cidr_subnet['eb']['public_1']] cmd += ['--availability-zone', aws_availability_zone_1] result = aws_cli.run(cmd) eb_subnet_id['public_1'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(eb_subnet_id['public_1'], '%seb_public_1' % name_prefix) cmd = ['ec2', 'create-subnet'] cmd += ['--vpc-id', eb_vpc_id] cmd += ['--cidr-block', cidr_subnet['eb']['public_2']] cmd += ['--availability-zone', aws_availability_zone_2] result = aws_cli.run(cmd) eb_subnet_id['public_2'] = result['Subnet']['SubnetId'] aws_cli.set_name_tag(eb_subnet_id['public_2'], '%seb_public_2' % name_prefix) ################################################################################ print_message('create internet gateway') cmd = ['ec2', 'create-internet-gateway'] result = aws_cli.run(cmd) internet_gateway_id = result['InternetGateway']['InternetGatewayId'] aws_cli.set_name_tag(internet_gateway_id, '%seb' % name_prefix) ################################################################################ print_message('attach internet gateway') cmd = ['ec2', 'attach-internet-gateway'] cmd += ['--internet-gateway-id', internet_gateway_id] cmd += ['--vpc-id', eb_vpc_id] aws_cli.run(cmd) ################################################################################ print_message('create eip') # We use only one NAT gateway at subnet 'public_1' cmd = ['ec2', 'allocate-address'] cmd += ['--domain', 'vpc'] result = aws_cli.run(cmd) eb_eip_id = result['AllocationId'] aws_cli.set_name_tag(eb_eip_id, '%snat' % name_prefix) ################################################################################ print_message('create nat gateway') # We use only one NAT gateway at subnet 'public_1' cmd = ['ec2', 'create-nat-gateway'] cmd += ['--subnet-id', eb_subnet_id['public_1']] cmd += ['--allocation-id', eb_eip_id] result = aws_cli.run(cmd) eb_nat_gateway_id = result['NatGateway']['NatGatewayId'] aws_cli.set_name_tag(eb_nat_gateway_id, '%seb' % name_prefix) ################################################################################ print_message('wait create nat gateway') aws_cli.wait_create_nat_gateway(eb_vpc_id) ################################################################################ print_message('create ' + 'route table') # [FYI] PyCharm inspects 'create route table' as SQL query. eb_route_table_id = dict() cmd = ['ec2', 'create-route-table'] cmd += ['--vpc-id', eb_vpc_id] result = aws_cli.run(cmd) eb_route_table_id['private'] = result['RouteTable']['RouteTableId'] aws_cli.set_name_tag(eb_route_table_id['private'], '%seb_private' % name_prefix) cmd = ['ec2', 'create-route-table'] cmd += ['--vpc-id', eb_vpc_id] result = aws_cli.run(cmd) eb_route_table_id['public'] = result['RouteTable']['RouteTableId'] aws_cli.set_name_tag(eb_route_table_id['public'], '%seb_public' % name_prefix) ################################################################################ print_message('associate route table') cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', eb_subnet_id['private_1']] cmd += ['--route-table-id', eb_route_table_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', eb_subnet_id['private_2']] cmd += ['--route-table-id', eb_route_table_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', eb_subnet_id['public_1']] cmd += ['--route-table-id', eb_route_table_id['public']] aws_cli.run(cmd) cmd = ['ec2', 'associate-route-table'] cmd += ['--subnet-id', eb_subnet_id['public_2']] cmd += ['--route-table-id', eb_route_table_id['public']] aws_cli.run(cmd) ################################################################################ print_message('create route') cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['public']] cmd += ['--destination-cidr-block', '0.0.0.0/0'] cmd += ['--gateway-id', internet_gateway_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['private']] cmd += ['--destination-cidr-block', '0.0.0.0/0'] cmd += ['--nat-gateway-id', eb_nat_gateway_id] aws_cli.run(cmd) ################################################################################ print_message('create security group') eb_security_group_id = dict() cmd = ['ec2', 'create-security-group'] cmd += ['--group-name', '%seb_private' % name_prefix] cmd += ['--description', '%seb_private' % name_prefix] cmd += ['--vpc-id', eb_vpc_id] result = aws_cli.run(cmd) eb_security_group_id['private'] = result['GroupId'] cmd = ['ec2', 'create-security-group'] cmd += ['--group-name', '%seb_public' % name_prefix] cmd += ['--description', '%seb_public' % name_prefix] cmd += ['--vpc-id', eb_vpc_id] result = aws_cli.run(cmd) eb_security_group_id['public'] = result['GroupId'] ################################################################################ print_message('authorize security group ingress') cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['private']] cmd += ['--protocol', 'all'] cmd += ['--source-group', eb_security_group_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['private']] cmd += ['--protocol', 'all'] cmd += ['--source-group', eb_security_group_id['public']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['public']] cmd += ['--protocol', 'all'] cmd += ['--source-group', eb_security_group_id['private']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['public']] cmd += ['--protocol', 'all'] cmd += ['--source-group', eb_security_group_id['public']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['public']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '22'] cmd += ['--cidr', cidr_vpc['eb']] aws_cli.run(cmd) cmd = ['ec2', 'authorize-security-group-ingress'] cmd += ['--group-id', eb_security_group_id['public']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '80'] cmd += ['--cidr', '0.0.0.0/0'] aws_cli.run(cmd) ################################################################################ # # ElastiCache # ################################################################################ print_session('elasticache') ################################################################################ if env.get('elasticache'): elasticache_subnet_name = env['elasticache']['CACHE_SUBNET_NAME'] print_message('create cache subnet group') cmd = ['elasticache', 'create-cache-subnet-group'] cmd += ['--cache-subnet-group-name', elasticache_subnet_name] cmd += ['--cache-subnet-group-description', elasticache_subnet_name] cmd += ['--subnet-ids', eb_subnet_id['private_1'], eb_subnet_id['private_2']] aws_cli.run(cmd) ################################################################################ # # vpc peering connection # ################################################################################ print_session('vpc peering connection') ################################################################################ print_message('create vpc peering connection') cmd = ['ec2', 'create-vpc-peering-connection'] cmd += ['--vpc-id', rds_vpc_id] cmd += ['--peer-vpc-id', eb_vpc_id] result = aws_cli.run(cmd) peering_connection_id = result['VpcPeeringConnection']['VpcPeeringConnectionId'] aws_cli.set_name_tag(peering_connection_id, '%s' % service_name) cmd = ['ec2', 'accept-vpc-peering-connection'] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) ################################################################################ print_message('create route: rds -> eb') cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', rds_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['eb']['private_1']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', rds_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['eb']['private_2']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', rds_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['eb']['public_1']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', rds_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['eb']['public_2']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) ################################################################################ print_message('create route: eb -> rds') cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['rds']['private_1']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['private']] cmd += ['--destination-cidr-block', cidr_subnet['rds']['private_2']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['public']] cmd += ['--destination-cidr-block', cidr_subnet['rds']['private_1']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) cmd = ['ec2', 'create-route'] cmd += ['--route-table-id', eb_route_table_id['public']] cmd += ['--destination-cidr-block', cidr_subnet['rds']['private_2']] cmd += ['--vpc-peering-connection-id', peering_connection_id] aws_cli.run(cmd) ################################################################################ # # Network Interface # ################################################################################ print_session('network interface') ################################################################################ environment_list = env['elasticbeanstalk']['ENVIRONMENTS'] for environment in environment_list: cname = environment['CNAME'] private_ip = environment.get('PRIVATE_IP') if cname and private_ip: print_message('create network interface for %s' % cname) cmd = ['ec2', 'create-network-interface'] cmd += ['--subnet-id', eb_subnet_id['private_1']] cmd += ['--description', cname] cmd += ['--private-ip-address', private_ip] cmd += ['--groups', eb_security_group_id['private']] result = aws_cli.run(cmd) network_interface_id = result['NetworkInterface']['NetworkInterfaceId'] aws_cli.set_name_tag(network_interface_id, '%snat' % name_prefix)
def run_create_cloudwatch_alarm_elasticbeanstalk(name, settings): phase = env['common']['PHASE'] alarm_region = settings['AWS_DEFAULT_REGION'] aws_cli = AWSCli(alarm_region) print_message('get elasticbeanstalk environment info: %s' % name) cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--no-include-deleted'] result = aws_cli.run(cmd) env_list = list() for ee in result['Environments']: cname = ee['CNAME'] if not cname.endswith('%s.elasticbeanstalk.com' % alarm_region): continue if '%s.' % name not in cname and '%s2.' % name not in cname: continue ename = ee['EnvironmentName'] if ename.startswith(name): env_list.append(ee) env_instances_list = list() env_asg_list = list() env_elb_list = list() for ee in env_list: cmd = ['elasticbeanstalk', 'describe-environment-resources'] cmd += ['--environment-id', ee['EnvironmentId']] result = aws_cli.run(cmd) ee_res = result['EnvironmentResources'] for instance in ee_res['Instances']: ii = dict() ii['Id'] = instance['Id'] ii['EnvironmentName'] = ee_res['EnvironmentName'] env_instances_list.append(ii) for asg in ee_res['AutoScalingGroups']: env_asg_list.append(asg) for elb in ee_res['LoadBalancers']: env_elb_list.append(elb) ################################################################################ alarm_name = '%s-%s_%s_%s' % (phase, name, alarm_region, settings['METRIC_NAME']) print_message('create or update cloudwatch alarm: %s' % alarm_name) topic_arn = aws_cli.get_topic_arn(settings['SNS_TOPIC_NAME']) if not topic_arn: print('sns topic: "%s" is not exists in %s' % (settings['SNS_TOPIC_NAME'], alarm_region)) raise Exception() dimension_list = list() for ei in env_instances_list: if settings['DIMENSIONS'] == 'InstanceId': dimension = 'Name=InstanceId,Value=%s' % ei['Id'] dimension_list.append(dimension) if settings['DIMENSIONS'] == 'EnvironmentName': dimension = 'Name=EnvironmentName,Value=%s' % ei['EnvironmentName'] dimension_list.append(dimension) break for ei in env_asg_list: if settings['DIMENSIONS'] == 'AutoScalingGroupName': dimension = 'Name=AutoScalingGroupName,Value=%s' % ei['Name'] dimension_list.append(dimension) for ei in env_elb_list: if settings['DIMENSIONS'] == 'LoadBalancerName': dimension = 'Name=LoadBalancerName,Value=%s' % ei['Name'] dimension_list.append(dimension) cmd = ['cloudwatch', 'put-metric-alarm'] cmd += ['--alarm-actions', topic_arn] cmd += ['--alarm-description', settings['DESCRIPTION']] cmd += ['--alarm-name', alarm_name] cmd += ['--comparison-operator', settings['COMPARISON_OPERATOR']] cmd += ['--datapoints-to-alarm', settings['DATAPOINTS_TO_ALARM']] cmd += ['--dimensions', ' '.join(dimension_list)] cmd += ['--evaluation-periods', settings['EVALUATION_PERIODS']] cmd += ['--metric-name', settings['METRIC_NAME']] cmd += ['--namespace', settings['NAMESPACE']] cmd += ['--period', settings['PERIOD']] cmd += ['--statistic', settings['STATISTIC']] cmd += ['--threshold', settings['THRESHOLD']] aws_cli.run(cmd)
import datetime import json import time from env import env from run_common import AWSCli from run_common import check_template_availability from run_common import print_message from run_common import print_session if __name__ == "__main__": from run_common import parse_args parse_args() aws_cli = AWSCli() def create_iam_for_rds(): sleep_required = False role_name = 'rds-monitoring-role' if not aws_cli.get_iam_role(role_name): print_message('create iam role') cc = ['iam', 'create-role'] cc += ['--role-name', role_name] cc += ['--assume-role-policy-document', 'file://aws_iam/rds-monitoring-role.json'] aws_cli.run(cc) cc = ['iam', 'attach-role-policy']
def run_create_eb_spring(name, settings): aws_cli = AWSCli() aws_asg_max_value = settings['AWS_ASG_MAX_VALUE'] aws_asg_min_value = settings['AWS_ASG_MIN_VALUE'] aws_default_region = env['aws']['AWS_DEFAULT_REGION'] cname = settings['CNAME'] db_conn_str_suffix = settings.get('DB_CONNECTION_STR_SUFFIX', '') eb_application_name = env['elasticbeanstalk']['APPLICATION_NAME'] git_url = settings['GIT_URL'] instance_type = settings.get('INSTANCE_TYPE', 't2.medium') key_pair_name = env['common']['AWS_KEY_PAIR_NAME'] phase = env['common']['PHASE'] service_name = env['common'].get('SERVICE_NAME', '') subnet_type = settings['SUBNET_TYPE'] name_prefix = '%s_' % service_name if service_name else '' cidr_subnet = aws_cli.cidr_subnet str_timestamp = str(int(time.time())) war_filename = '%s-%s.war' % (name, str_timestamp) eb_environment_name = '%s-%s' % (name, str_timestamp) eb_environment_name_old = None template_folder = 'template/%s' % name target_folder = 'template/%s/target' % name ebextensions_folder = 'template/%s/_provisioning/.ebextensions' % name configuration_folder = 'template/%s/_provisioning/configuration' % name properties_file = 'template/%s/%s' % (name, settings['PROPERTIES_FILE']) git_rev = ['git', 'rev-parse', 'HEAD'] git_hash_johanna = subprocess.Popen(git_rev, stdout=subprocess.PIPE).communicate()[0] ################################################################################ print_session('create %s' % name) ################################################################################ print_message('get vpc id') rds_vpc_id, eb_vpc_id = aws_cli.get_vpc_id() if not eb_vpc_id: print('ERROR!!! No VPC found') raise Exception() ################################################################################ print_message('get subnet id') elb_subnet_id_1 = None elb_subnet_id_2 = None ec2_subnet_id_1 = None ec2_subnet_id_2 = None cmd = ['ec2', 'describe-subnets'] result = aws_cli.run(cmd) for r in result['Subnets']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['public_1']: elb_subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['public_2']: elb_subnet_id_2 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_1']: ec2_subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_2']: ec2_subnet_id_2 = r['SubnetId'] elif 'private' == subnet_type: if r['CidrBlock'] == cidr_subnet['eb']['private_1']: elb_subnet_id_1 = ec2_subnet_id_1 = r['SubnetId'] if r['CidrBlock'] == cidr_subnet['eb']['private_2']: elb_subnet_id_2 = ec2_subnet_id_2 = r['SubnetId'] else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('get security group id') security_group_id = None cmd = ['ec2', 'describe-security-groups'] result = aws_cli.run(cmd) for r in result['SecurityGroups']: if r['VpcId'] != eb_vpc_id: continue if 'public' == subnet_type: if r['GroupName'] == '%seb_private' % name_prefix: security_group_id = r['GroupId'] break elif 'private' == subnet_type: if r['GroupName'] == '%seb_private' % name_prefix: security_group_id = r['GroupId'] break else: print('ERROR!!! Unknown subnet type:', subnet_type) raise Exception() ################################################################################ print_message('get database address') db_address = aws_cli.get_rds_address() ################################################################################ print_message('get cache address') cache_address = aws_cli.get_elasticache_address() ################################################################################ print_message('git clone') subprocess.Popen(['mkdir', '-p', 'template']).communicate() subprocess.Popen(['rm', '-rf', '%s/' % name], cwd='template').communicate() branch = aws_cli.env.get('GIT_BRANCH_APP', phase) git_command = ['git', 'clone', '--depth=1'] if branch != 'dv': git_command += ['-b', branch] git_command += [git_url] subprocess.Popen(git_command, cwd='template').communicate() if not os.path.exists('%s' % template_folder): raise Exception() git_hash_app = subprocess.Popen(git_rev, stdout=subprocess.PIPE, cwd=template_folder).communicate()[0] subprocess.Popen(['rm', '-rf', '.git'], cwd=template_folder).communicate() subprocess.Popen(['rm', '-rf', '.gitignore'], cwd=template_folder).communicate() ################################################################################ print_message('configuration %s' % name) with open('%s/phase' % configuration_folder, 'w') as f: f.write(phase) f.close() lines = read_file('%s/etc/logstash/conf.d/logstash_sample.conf' % configuration_folder) write_file('%s/etc/logstash/conf.d/logstash.conf' % configuration_folder, lines) lines = read_file('%s/%s.config.sample' % (ebextensions_folder, name)) lines = re_sub_lines(lines, 'AWS_ASG_MAX_VALUE', aws_asg_max_value) lines = re_sub_lines(lines, 'AWS_ASG_MIN_VALUE', aws_asg_min_value) write_file('%s/%s.config' % (ebextensions_folder, name), lines) sample_file = properties_file.replace('.properties', '-sample.properties') lines = read_file(sample_file) option_list = list() option_list.append(['jdbc.url', 'jdbc:mysql://%s%s' % (db_address, db_conn_str_suffix)]) option_list.append(['jdbc.username', env['rds']['USER_NAME']]) option_list.append(['jdbc.password', env['rds']['USER_PASSWORD']]) option_list.append(['redis.host', cache_address]) for key in settings: value = settings[key] option_list.append([key, value]) for oo in option_list: lines = re_sub_lines(lines, '^(%s)=.*' % oo[0], '\\1=%s' % oo[1]) write_file(properties_file, lines) ################################################################################ print_message('check previous version') cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] result = aws_cli.run(cmd) for r in result['Environments']: if 'CNAME' not in r: continue if r['CNAME'] == '%s.%s.elasticbeanstalk.com' % (cname, aws_default_region): if r['Status'] == 'Terminated': continue elif r['Status'] != 'Ready': print('previous version is not ready.') raise Exception() eb_environment_name_old = r['EnvironmentName'] cname += '-%s' % str_timestamp break ################################################################################ print_message('build artifact') build_command = ['mvn'] if phase != 'dv': build_command += ['exec:exec'] build_command += ['package'] print_message('build %s: %s' % (name, ' '.join(build_command))) subprocess.Popen(build_command, cwd=template_folder).communicate() ################################################################################ print_message('create storage location') cmd = ['elasticbeanstalk', 'create-storage-location'] result = aws_cli.run(cmd) s3_bucket = result['S3Bucket'] s3_war_filename = '/'.join(['s3://' + s3_bucket, eb_application_name, war_filename]) ################################################################################ print_message('create application version') cmd = ['mv', 'ROOT.war', war_filename] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=target_folder).communicate() cmd = ['s3', 'cp', war_filename, s3_war_filename] aws_cli.run(cmd, cwd=target_folder) cmd = ['rm', '-rf', war_filename] subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=target_folder).communicate() cmd = ['elasticbeanstalk', 'create-application-version'] cmd += ['--application-name', eb_application_name] cmd += ['--source-bundle', 'S3Bucket="%s",S3Key="%s/%s"' % (s3_bucket, eb_application_name, war_filename)] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=template_folder) ################################################################################ print_message('create environment %s' % name) option_settings = list() oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'EC2KeyName' oo['Value'] = key_pair_name option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'InstanceType' oo['Value'] = instance_type option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'IamInstanceProfile' oo['Value'] = 'aws-elasticbeanstalk-ec2-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:autoscaling:launchconfiguration' oo['OptionName'] = 'SecurityGroups' oo['Value'] = security_group_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'AssociatePublicIpAddress' oo['Value'] = 'false' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBScheme' oo['Value'] = 'public' if 'private' == subnet_type: oo['Value'] = 'internal' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'ELBSubnets' oo['Value'] = ','.join([elb_subnet_id_1, elb_subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'Subnets' oo['Value'] = ','.join([ec2_subnet_id_1, ec2_subnet_id_2]) option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:ec2:vpc' oo['OptionName'] = 'VPCId' oo['Value'] = eb_vpc_id option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'EnvironmentType' oo['Value'] = 'LoadBalanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:environment' oo['OptionName'] = 'ServiceRole' oo['Value'] = 'aws-elasticbeanstalk-service-role' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'SystemType' oo['Value'] = 'enhanced' option_settings.append(oo) oo = dict() oo['Namespace'] = 'aws:elasticbeanstalk:healthreporting:system' oo['OptionName'] = 'ConfigDocument' cw_env = dict() cw_env['ApplicationRequestsTotal'] = 60 cw_env['ApplicationRequests2xx'] = 60 cw_env['ApplicationRequests3xx'] = 60 cw_env['ApplicationRequests4xx'] = 60 cw_env['ApplicationRequests5xx'] = 60 cw_instance = dict() cw_instance['RootFilesystemUtil'] = 60 cw_instance['InstanceHealth'] = 60 cw_instance['CPUIdle'] = 60 cw = dict() cw['Environment'] = cw_env cw['Instance'] = cw_instance cfg_doc = dict() cfg_doc['CloudWatchMetrics'] = cw cfg_doc['Version'] = 1 oo['Value'] = json.dumps(cfg_doc) option_settings.append(oo) option_settings = json.dumps(option_settings) tag0 = 'Key=git_hash_johanna,Value=%s' % git_hash_johanna.decode('utf-8').strip() tag2 = 'Key=git_hash_%s,Value=%s' % (name, git_hash_app.decode('utf-8').strip()) cmd = ['elasticbeanstalk', 'create-environment'] cmd += ['--application-name', eb_application_name] cmd += ['--cname-prefix', cname] cmd += ['--environment-name', eb_environment_name] cmd += ['--option-settings', option_settings] cmd += ['--solution-stack-name', '64bit Amazon Linux 2018.03 v3.0.1 running Tomcat 8.5 Java 8'] cmd += ['--tags', tag0, tag2] cmd += ['--version-label', eb_environment_name] aws_cli.run(cmd, cwd=template_folder) elapsed_time = 0 while True: cmd = ['elasticbeanstalk', 'describe-environments'] cmd += ['--application-name', eb_application_name] cmd += ['--environment-name', eb_environment_name] result = aws_cli.run(cmd) ee = result['Environments'][0] print(json.dumps(ee, sort_keys=True, indent=4)) if ee.get('Health', '') == 'Green' and ee.get('Status', '') == 'Ready': break print('creating... (elapsed time: \'%d\' seconds)' % elapsed_time) time.sleep(5) elapsed_time += 5 if elapsed_time > 60 * 30: raise Exception() subprocess.Popen(['rm', '-rf', '%s/' % name], cwd='template').communicate() ################################################################################ print_message('revoke security group ingress') cmd = ['ec2', 'describe-security-groups'] cmd += ['--filters', 'Name=tag-key,Values=Name,Name=tag-value,Values=%s' % eb_environment_name] result = aws_cli.run(cmd) for ss in result['SecurityGroups']: cmd = ['ec2', 'revoke-security-group-ingress'] cmd += ['--group-id', ss['GroupId']] cmd += ['--protocol', 'tcp'] cmd += ['--port', '22'] cmd += ['--cidr', '0.0.0.0/0'] aws_cli.run(cmd, ignore_error=True) ################################################################################ print_message('swap CNAME if the previous version exists') if eb_environment_name_old: cmd = ['elasticbeanstalk', 'swap-environment-cnames'] cmd += ['--source-environment-name', eb_environment_name_old] cmd += ['--destination-environment-name', eb_environment_name] aws_cli.run(cmd)
def create_iam(): ################################################################################ # # IAM # ################################################################################ print_session('create iam') aws_cli = AWSCli() ################################################################################ print_message('create iam: aws-elasticbeanstalk-ec2-role') cmd = ['iam', 'create-instance-profile'] cmd += ['--instance-profile-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd) cmd = ['iam', 'create-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--assume-role-policy-document', 'file://aws_iam/aws-elasticbeanstalk-ec2-role.json'] aws_cli.run(cmd) cmd = ['iam', 'add-role-to-instance-profile'] cmd += ['--instance-profile-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkMulticontainerDocker'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/AWSElasticBeanstalkWorkerTier'] aws_cli.run(cmd) cmd = ['iam', 'put-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-ec2-role'] cmd += ['--policy-name', 'aws-elasticbeanstalk-ec2-policy'] cmd += ['--policy-document', 'file://aws_iam/aws-elasticbeanstalk-ec2-policy.json'] aws_cli.run(cmd) ################################################################################ print_message('create iam: aws-elasticbeanstalk-service-role') cmd = ['iam', 'create-role'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--assume-role-policy-document', 'file://aws_iam/aws-elasticbeanstalk-service-role.json'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth'] aws_cli.run(cmd) cmd = ['iam', 'attach-role-policy'] cmd += ['--role-name', 'aws-elasticbeanstalk-service-role'] cmd += ['--policy-arn', 'arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService'] aws_cli.run(cmd)
#!/usr/bin/env python3 import datetime import os.path import subprocess import sys from env import env from run_common import AWSCli from run_common import check_template_availability from run_common import print_message from run_common import print_session aws_cli = AWSCli() print_session('alter database') check_template_availability() engine = env['rds']['ENGINE'] if engine not in ('mysql', 'aurora'): print('not supported:', engine) raise Exception() print_message('get database address') if env['common']['PHASE'] != 'dv': db_host = aws_cli.get_rds_address() else: while True: answer = input('Do you use a database of Vagrant VM? (yes/no): ') if answer.lower() == 'no':