forked from stelligent/nando_automation_demo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
go.py
executable file
·726 lines (654 loc) · 27.8 KB
/
go.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
#!/usr/bin/env python
import argparse
import hashlib
import json
import os
import re
import socket
import sys
import time
import zipfile
from datetime import datetime
from pprint import pprint
from time import sleep
from boto.cloudformation import connect_to_region as cfn_connect
from boto.codedeploy import connect_to_region as codedeploy_connect
from boto.ec2 import connect_to_region as ec2_connect
from boto.exception import BotoServerError, EC2ResponseError, S3ResponseError
from boto.iam import connect_to_region as iam_connect
from boto.s3 import connect_to_region as s3_connect
from boto.s3.key import Key as S3Key
# OPTIONAL: Provide prebaked AMIs with python2.7, jenkins, and puppet.
# See configSets "cfg-packages" and "cfg-sys-commands" for reference.
CUSTOM_AMI_MAP = {
'us-east-1': 'ami-f798779c',
'us-west-2': 'ami-bf9ea08f'
}
STACK_DATA = {
'main': {'key_prefix': 'stelligent-demo',
'prefix': 'nando-demo',
'template': 'cloudformation-py.json',
'type': 'MAIN'},
's3': {'prefix': 'stelligent-demo-s3',
'template': 'cloudformation/cloudformation.s3.json',
'type': 'S3'},
'vpc': {'prefix': 'stelligent-demo-vpc',
'template': 'cloudformation/cloudformation.vpc.json',
'type': 'VPC'},
'sg': {'prefix': 'stelligent-demo-sg',
'template': 'cloudformation/cloudformation.sg.json',
'type': 'SG'},
'rds': {'db_prefix': 'stelligent',
'prefix': 'stelligent-demo-rds',
'template': 'cloudformation/cloudformation.rds.json',
'type': 'RDS'},
'eb': {'prefix': 'stelligent-demo-eb',
'template': 'cloudformation/cloudformation.eb.json',
'type': 'EB'},
'ecs': {'prefix': 'stelligent-demo-ecs',
'template': 'cloudformation/cloudformation.ecs.json',
'type': 'ECS'}
}
DEFAULT_REGION = 'us-east-1'
ROUTE53_DOMAIN = 'elasticoperations.com'
MAIN_S3_BUCKET = 'nando-automation-demo' # Permanent S3 Bucket
MAIN_S3_BUCKET_REGION = 'us-east-1'
DOCKER_ZIPFILE = 'nando-demo.zip'
DOCKER_FILES = ['Dockerfile', 'application.py', 'requirements.txt']
FILES_TO_S3 = ['jenkins/seed.xml.erb',
'puppet/installJenkins.pp',
'puppet/installJenkinsJob.pp',
'puppet/installJenkinsPlugins.pp',
'puppet/installJenkinsUsers.pp',
'puppet/installJenkinsSecurity.pp',
DOCKER_ZIPFILE]
INGRESS_PORTS = ['22', '2222', '8080']
ALLOWED_ACTIONS = ["build", "destroy", "info", "test"]
# FIXME: These are hard-coded elsewhere, make dynamic everywhere.
CODEDEPLOY_APP_NAME = 'nando-demo'
CODEDEPLOY_GROUP_NAME = 'nando-demo'
# Note: IAM_ROLE_NAME and IAM_POLICY_NAME will have the region and hash
# appended as 'NandoDemoRole-us-east-1' to allow multi-region support
IAM_ROLE_NAME = 'NandoDemoCodeDeployRole'
IAM_ROLE_DOC = 'codedeploy/NandoDemoCodeDeployRole.json'
IAM_POLICY_NAME = 'NandoDemoCodeDeployPolicy'
IAM_POLICY_DOC = 'codedeploy/NandoDemoCodeDeployPolicy.json'
# Resource Logical IDs
JENKINS_INSTANCE = "NandoDemoJenkins"
WEB_ASG_NAME = 'NandoDemoWebASG'
DEMO_RDS = 'NandoDemoMysql'
DEMO_ELB = 'NandoDemoELB'
DEMO_ECS = 'StelligentDemoECS'
DEMO_S3_BUCKET = 'NandoDemoBucket' # Ephemeral Bucket
DEMO_DOCKER_ENV = 'NandoDemoDockerEnvironment'
def ip_address_type(location):
try:
socket.inet_aton(location)
except:
raise argparse.ArgumentTypeError("%s is not a valid IP Address" %
location)
else:
return location
def list_and_get_stacks(cfn_connection, allow_all=False):
stack_list = []
all_stacks = cfn_connection.describe_stacks()
for type in STACK_DATA:
match_stacks = [[stack, STACK_DATA[type]['type']] for
stack in all_stacks if
re.match('%s-(\d+)' % STACK_DATA[type]['prefix'],
stack.stack_name)]
stack_list = stack_list + match_stacks
if stack_list:
response = 0
custom_range = range(1, len(stack_list)+1)
while response not in custom_range:
for index, stack in enumerate(stack_list):
print "%s) %s (%s) - %s" % (index + 1, stack[0].stack_name,
stack[1], stack[0].stack_status)
if allow_all:
print "nc) Non-Core: Select all but VPC/SG/RDS."
print "all) Select all."
print "q) Quit."
response = raw_input("Which stack? ")
if response in ['q', 'quit', 'exit']:
sys.exit(0)
if response == 'nc' and allow_all:
return [stack for stack in stack_list if stack[1] not in [
'VPC', 'SG', 'RDS']]
if response == 'all' and allow_all:
return stack_list
try:
response = int(response)
except ValueError:
pass
return [stack_list[response - 1]]
else:
print "No stacks found. Exiting."
sys.exit(0)
def prepare_docker_zip():
sys.stdout.write("Repacking %s..." % DOCKER_ZIPFILE)
sys.stdout.flush()
try:
os.remove(DOCKER_ZIPFILE)
except OSError:
pass
with zipfile.ZipFile(DOCKER_ZIPFILE, mode='w') as zf:
os.chdir('docker')
for f in DOCKER_FILES:
zf.write(f)
os.chdir('..')
print "Done!"
def copy_files_to_s3(s3_connection, bucket):
prepare_docker_zip()
sys.stdout.write("Sending files to S3...")
sys.stdout.flush()
s3_bucket = s3_connection.get_bucket(bucket)
s3_key = S3Key(s3_bucket)
for f in FILES_TO_S3:
s3_key.key = os.path.basename(f)
with open(f) as f:
s3_key.set_contents_from_file(f)
print "Done!"
def create_and_upload_index_to_s3(s3, outputs=dict()):
output_key = "StelligentDemoBucketURL"
bucket_url = ([output.value for output in outputs
if output.key == output_key])[0]
bucket_name = re.sub(r'http://(.*).s3-website.*', r'\1', bucket_url)
contents = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>Demo Index File in S3 Bucket</title>
</head>
<body>
<h1>Stelligent Demo Stack</h1>
<pre>"""
for output in outputs:
contents += "%40s : %s\n" % (output.key, output.value)
s3_bucket = s3.get_bucket(bucket_name)
s3_key = S3Key(s3_bucket)
s3_key.key = "index.html"
s3_key.set_metadata('Content-Type', 'text/html')
s3_key.set_contents_from_string(contents)
s3_key.set_acl('public-read')
def delete_stack_name_from_s3(s3_connection, bucket, target):
s3_bucket = s3_connection.get_bucket(bucket)
s3_key = target
s3_bucket.delete_key(s3_key)
print "Deleted stack name from s3 %s." % target
def inject_custom_ami(resource, data, parameters, ec2_connection, region):
try:
ami = CUSTOM_AMI_MAP[region]
ec2_connection.get_image(ami)
except KeyError:
print "No Custom AMI defined for %s. See CUSTOM_AMI_MAP." % region
print "Using default AMI for %s." % resource
return data, parameters
except EC2ResponseError:
print "AMI %s does not exist in %s. See CUSTOM_AMI_MAP." % (ami,
region)
print "Using default AMI for %s." % resource
return data, parameters
data['Resources'][resource]['Properties']['ImageId'] = ami
resource_config_set = "%sConfigSet" % resource
parameters.append((resource_config_set, "quick"))
print "Using image %s for %s." % (ami, resource)
return data, parameters
def add_cidr_subnet(locations):
locations_with_cidr = list()
for location in locations:
if location == '0.0.0.0':
location = '%s/0' % location
else:
location = '%s/32' % location
locations_with_cidr.append(location)
return locations_with_cidr
def inject_locations(locations, data):
sys.stdout.write("Setting security source(s) to %s..." % locations)
sys.stdout.flush()
for location in locations:
for port in INGRESS_PORTS:
item = {'IpProtocol': 'tcp',
'FromPort': port,
'ToPort': port,
'CidrIp': '%s' % location}
data['Resources']['StelligentDemoPublicLockedSecurityGroup']['Properties']['SecurityGroupIngress'].append(item)
print "Done!"
return data
def get_instagram_keys_from_env():
try:
insta_id = os.environ['INSTAGRAM_CLIENT_ID']
insta_secret = os.environ['INSTAGRAM_CLIENT_SECRET']
except KeyError:
print "Please set both 'INSTAGRAM_CLIENT_ID' and " \
"'INSTAGRAM_CLIENT_SECRET' in your environment."
sys.exit(1)
else:
return insta_id, insta_secret
def create_cfn_stack(cfn_connection, stack_name, data, build_params=None,
capabilities=['CAPABILITY_IAM'], disable_rollback='true'):
build_params = build_params or list()
cfn_connection.create_stack(
stack_name,
template_body=json.dumps(data, indent=2),
parameters=build_params,
capabilities=capabilities,
disable_rollback=disable_rollback
)
def get_stack_outputs(cfn_connection, stack_name):
outputs = ''
while not outputs:
stack = cfn_connection.describe_stacks(stack_name)[0]
outputs = stack.outputs
if not outputs:
time.sleep(2)
return outputs
def get_or_create_stack(cfn_connection, all_stacks, stack_data, timestamp,
build_params=None, check_outputs=None, create=False,
wait=True, locations=None):
stack = None
created = False
if create:
stacks = None
else:
stacks = [stack_match for stack_match in all_stacks if
re.match('%s-(\d+)' % stack_data['prefix'],
stack_match.stack_name) and
stack_match.stack_status == 'CREATE_COMPLETE']
if stacks:
if check_outputs:
for check_stack in stacks:
subset = [x.value for x in check_outputs]
fullset = [x.value for x in check_stack.parameters]
if set(subset).issubset(fullset):
stack = check_stack
break
else:
# Default to first, complete stack
stack = stacks[0]
if stack:
print "Using %s %s..." % (stack_data['type'], stack.stack_name)
return stack.stack_name, stack.outputs, created
else:
created = True
stack_name = '%s-%s' % (stack_data['prefix'], timestamp)
with open(stack_data['template']) as data_file:
data = json.load(data_file)
if stack_data['type'] == 'S3':
for location in locations:
data['Resources']['StelligentDemoBucketPolicy']['Properties']['PolicyDocument']['Statement'][0]['Condition']['IpAddress']['aws:SourceIp'].append(location)
print "Creating %s stack %s..." % (stack_data['type'],
stack_name)
create_cfn_stack(cfn_connection, stack_name, data,
build_params=build_params)
if wait:
get_resource_id(cfn_connection, stack_name)
outputs = get_stack_outputs(cfn_connection, stack_name)
else:
outputs = None
return stack_name, outputs, created
def create_ec2_key_pair(ec2_connection, key_pair_name):
sys.stdout.write("Creating EC2 Key Pair %s..." % key_pair_name)
sys.stdout.flush()
kp = ec2_connection.create_key_pair(key_pair_name)
print "Done!"
sys.stdout.write("Creating private key %s.pem locally..." % key_pair_name)
sys.stdout.flush()
kp.save('.')
print "Done!"
return kp.material
def delete_ec2_key_pair(ec2_connection, key_pair_name):
sys.stdout.write("Deleting EC2 Key Pair %s..." % key_pair_name)
sys.stdout.flush()
ec2_connection.delete_key_pair(key_pair_name)
print "Done!"
key_file = '%s.pem' % key_pair_name
if os.path.isfile(key_file):
os.remove(key_file)
print "Deleted Private Key %s." % key_file
def create_iam_role(iam_connection, role_name, role_doc):
sys.stdout.write("Creating IAM Role %s..." % role_name)
sys.stdout.flush()
with open(role_doc) as doc:
result = iam_connection.create_role(
role_name, assume_role_policy_document=doc.read())
print "Done!"
return result['create_role_response']['create_role_result']['role']['arn']
def delete_iam_role(iam_connection, role_name):
sys.stdout.write("Deleting IAM Role %s..." % role_name)
sys.stdout.flush()
try:
iam_connection.delete_role(role_name)
except BotoServerError:
pass
print "Done!"
def put_iam_role_policy(iam_connection, role_name, policy_name,
policy_doc):
sys.stdout.write("Adding policy %s..." % policy_name)
sys.stdout.flush()
with open(policy_doc) as doc:
iam_connection.put_role_policy(role_name, policy_name, doc.read())
print "Done!"
def delete_iam_policy(iam_connection, role_name, policy_name):
sys.stdout.write("Deleting policy %s..." % policy_name)
sys.stdout.flush()
try:
iam_connection.delete_role_policy(role_name, policy_name)
except BotoServerError:
pass
print "Done!"
def create_codedeploy_application(codedeploy_connection, app_name):
sys.stdout.write("Creating CodeDeploy Application %s..." % app_name)
sys.stdout.flush()
codedeploy_connection.create_application(app_name)
print "Done!"
def delete_codedeploy_application(codedeploy_connection, app_name):
sys.stdout.write("Deleting CodeDeploy Application %s..." % app_name)
sys.stdout.flush()
codedeploy_connection.delete_application(app_name)
print "Done!"
def create_codedeploy_deployment_group(codedeploy_connection, app_name,
group_name, asg_id, service_role):
sys.stdout.write("Creating CodeDeploy Deployment Group %s..." % group_name)
sys.stdout.flush()
codedeploy_connection.create_deployment_group(
app_name,
group_name,
auto_scaling_groups=[asg_id],
service_role_arn=service_role
)
print "Done!"
pass
def delete_codedeploy_deployment_group(codedeploy_connection, app_name,
group_name):
sys.stdout.write("Deleting CodeDeploy Deployment Group %s..." % group_name)
sys.stdout.flush()
codedeploy_connection.delete_deployment_group(app_name, group_name)
print "Done!"
def empty_related_buckets(s3_connection, bucket_name):
# Safeguard. Do not delete items from main bucket.
if bucket_name == MAIN_S3_BUCKET:
return
try:
bucket = s3_connection.get_bucket(bucket_name)
keys = bucket.get_all_keys()
if keys:
print "Deleting the following files from %s:" % bucket_name
print keys
bucket.delete_keys(keys)
except S3ResponseError:
pass
def get_resource_id(cfn_connection, stack_name, resource_name=None):
# Initial Check
if resource_name:
resource_label = resource_name
else:
resource_label = stack_name
try:
# FIXME: Must be a better way...
if resource_name:
resource = cfn_connection.describe_stack_resource(stack_name,
resource_name)
info = resource['DescribeStackResourceResponse']['DescribeStackResourceResult']['StackResourceDetail']
status = info['ResourceStatus']
resource_id = info['PhysicalResourceId']
else:
status = cfn_connection.describe_stacks(stack_name)[0].stack_status
resource_id = cfn_connection.describe_stacks(
stack_name)[0].stack_id
except BotoServerError:
status = "NOT STARTED"
while status != "CREATE_COMPLETE":
sys.stdout.write("\rWaiting for %s " % resource_label)
sys.stdout.flush()
sleep(1)
sys.stdout.write("\rWaiting for %s. " % resource_label)
sys.stdout.flush()
sleep(1)
sys.stdout.write("\rWaiting for %s.. " % resource_label)
sys.stdout.flush()
sleep(1)
sys.stdout.write("\rWaiting for %s... " % resource_label)
sys.stdout.flush()
try:
# FIXME: Must be a better way...
if resource_name:
resource = cfn_connection.describe_stack_resource(
stack_name, resource_name)
info = resource['DescribeStackResourceResponse']['DescribeStackResourceResult']['StackResourceDetail']
status = info['ResourceStatus']
resource_id = info['PhysicalResourceId']
else:
status = cfn_connection.describe_stacks(
stack_name)[0].stack_status
except BotoServerError:
status = "NOT STARTED"
if status.endswith('FAILED'):
sys.stdout.write("\n")
print "Stack Failed. Exiting..."
sys.exit(1)
if status.endswith('COMPLETE'):
sys.stdout.write("\rWaiting for %s...Done!" % resource_label)
sys.stdout.flush()
sys.stdout.write("\n")
return resource_id
def set_stack_name_in_s3(s3_connection, stack_name, dest_name, bucket):
s3_bucket = s3_connection.get_bucket(bucket)
s3_key = S3Key(s3_bucket)
s3_key.key = dest_name
s3_key.set_contents_from_string(stack_name)
def outputs_to_parameters(outputs, params=None):
params = params or list()
for output in outputs:
params.append((output.key, output.value))
return params
def build(connections, region, locations, hash_id, full):
instagram_id, instagram_secret = get_instagram_keys_from_env()
timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
locations = add_cidr_subnet(locations)
all_stacks = connections['cfn'].describe_stacks()
# Setup EC2 Key Pair
key_pair_name = "%s-%s" % (STACK_DATA['main']['key_prefix'], timestamp)
private_key = create_ec2_key_pair(connections['ec2'], key_pair_name)
# Launch ElasticBeanstalk Stack, don't wait
eb_params = list()
eb_params.append(("HashID", hash_id))
eb_params.append(("DemoRegion", region))
eb_params.append(("StelligentDemoZoneName", ROUTE53_DOMAIN))
eb_params.append(("KeyName", key_pair_name))
eb_stack, eb_outputs, eb_created = get_or_create_stack(
connections['cfn'], all_stacks, STACK_DATA['eb'], timestamp,
build_params=eb_params, create=True, wait=False
)
# Launch S3 Stack, don't wait
s3_params = list()
s3_params.append(("DemoRegion", region))
s3_params.append(("StelligentDemoZoneName", ROUTE53_DOMAIN))
s3_stack, s3_outputs, s3_created = get_or_create_stack(
connections['cfn'], all_stacks, STACK_DATA['s3'], timestamp,
build_params=s3_params, create=True, wait=False, locations=locations
)
# Cascading Outputs/Parameters
# Get or create VPC
vpc_stack, vpc_outputs, vpc_created = get_or_create_stack(
connections['cfn'], all_stacks, STACK_DATA['vpc'], timestamp,
create=full
)
# Get or create SG
sg_params = outputs_to_parameters(vpc_outputs)
sg_stack, sg_outputs, sg_created = get_or_create_stack(
connections['cfn'], all_stacks, STACK_DATA['sg'], timestamp,
build_params=sg_params, check_outputs=vpc_outputs, create=vpc_created
)
# Launch ECS Stack, don't wait
ecs_params = outputs_to_parameters(sg_outputs)
ecs_params.append(("KeyName", key_pair_name))
ecs_params.append(('StelligentDemoECSClusterName', DEMO_ECS))
ecs_stack, ecs_outputs, ecs_created = get_or_create_stack(
connections['cfn'], all_stacks, STACK_DATA['ecs'], timestamp,
build_params=ecs_params, check_outputs=sg_outputs, create=True,
wait=False
)
# Get or create RDS
rds_params = outputs_to_parameters(sg_outputs)
rds_stack, rds_outputs, rds_created = get_or_create_stack(
connections['cfn'], all_stacks, STACK_DATA['rds'], timestamp,
build_params=rds_params, check_outputs=sg_outputs, create=sg_created
)
# Wait for S3
get_resource_id(connections['cfn'], s3_stack)
s3_outputs = get_stack_outputs(connections['cfn'], s3_stack)
# Setup Main Stack
stack_name = "%s-%s" % (STACK_DATA['main']['prefix'], timestamp)
build_params = outputs_to_parameters(s3_outputs)
build_params += outputs_to_parameters(rds_outputs)
build_params.append(("PrimaryPermanentS3Bucket", MAIN_S3_BUCKET))
# Setup Instagram Access
build_params.append(("InstagramId", instagram_id))
build_params.append(("InstagramSecret", instagram_secret))
build_params.append(("NandoDemoName", stack_name))
build_params.append(("DemoRegion", region))
build_params.append(("StelligentDemoZoneName", ROUTE53_DOMAIN))
build_params.append(("HashID", hash_id))
build_params.append(("KeyName", key_pair_name))
build_params.append(("PrivateKey", private_key))
copy_files_to_s3(connections['main_s3'], MAIN_S3_BUCKET)
with open(STACK_DATA['main']['template']) as data_file:
data = json.load(data_file)
# Inject locations
data = inject_locations(locations, data)
# Inject Custom AMI
data, build_params = inject_custom_ami(
JENKINS_INSTANCE, data, build_params, connections['ec2'], region)
# Setup IAM Roles/Policies
IRN = "-".join((IAM_ROLE_NAME, region, hash_id))
IPN = "-".join((IAM_POLICY_NAME, region, hash_id))
role_arn = create_iam_role(connections['iam'], IRN, IAM_ROLE_DOC)
put_iam_role_policy(connections['iam'], IRN, IPN, IAM_POLICY_DOC)
# Add Extra Information to Stack
CAN = "-".join((CODEDEPLOY_APP_NAME, region, hash_id))
CGN = "-".join((CODEDEPLOY_GROUP_NAME, region, hash_id))
build_params.append(("CodeDeployAppName", CAN))
build_params.append(("CodeDeployDeploymentGroup", CGN))
# Inject Database name
db_name = "%s%s" % (STACK_DATA['rds']['db_prefix'], timestamp)
build_params.append(("StelligentDemoDBName", db_name))
# Create Stack
sys.stdout.write("Launching CloudFormation Stack in %s..." % region)
sys.stdout.flush()
create_cfn_stack(connections['cfn'], stack_name, data, build_params)
# Upload stack name to S3
dest_name = "cloudformation.stack.name-%s-%s" % (region, hash_id)
set_stack_name_in_s3(connections['main_s3'], stack_name,
dest_name, MAIN_S3_BUCKET)
print "Done!"
# Give Feedback whilst we wait...
get_resource_id(connections['cfn'], stack_name, DEMO_ELB)
asg_id = get_resource_id(connections['cfn'], stack_name, WEB_ASG_NAME)
# Setup CodeDeploy
create_codedeploy_application(connections['codedeploy'],
CAN)
create_codedeploy_deployment_group(connections['codedeploy'],
CAN, CGN, asg_id, role_arn)
get_resource_id(connections['cfn'], stack_name, JENKINS_INSTANCE)
# Wait for Elastic Beanstalk
get_resource_id(connections['cfn'], eb_stack)
print "Gathering Stack Outputs...almost there!"
main_outputs = get_stack_outputs(connections['cfn'], stack_name)
eb_outputs = get_stack_outputs(connections['cfn'], eb_stack)
ecs_outputs = get_stack_outputs(connections['cfn'], ecs_stack)
outputs = main_outputs + eb_outputs + ecs_outputs
outputs = sorted(outputs, key=lambda k: k.key)
# Upload index.html to transient demo bucket
create_and_upload_index_to_s3(connections['s3'], outputs)
print "Outputs:"
for output in outputs:
print '%s = %s' % (output.key, output.value)
def destroy(connections, region):
stacks = list_and_get_stacks(connections['cfn'], allow_all=True)
for stack in stacks:
stack, stack_type = stack
if stack.stack_status == "DELETE_IN_PROGRESS":
print "Stack %s deletion already in progress." % stack.stack_name
continue
if stack_type == 'S3':
outputs = {x.key: x.value for x in stack.outputs}
try:
s3_bucket = outputs['StelligentDemoBucket']
empty_related_buckets(connections['s3'], s3_bucket)
except KeyError:
pass
elif stack_type == 'MAIN':
parameters = {x.key: x.value for x in stack.parameters}
hash_id = parameters['HashID']
# Destroy CodeDeploy
delete_codedeploy_deployment_group(
connections['codedeploy'],
parameters['CodeDeployAppName'],
parameters['CodeDeployDeploymentGroup'])
delete_codedeploy_application(connections['codedeploy'],
parameters['CodeDeployAppName'])
# Destroy IAM Roles/Policies
IRN = "-".join((IAM_ROLE_NAME, region, hash_id))
IPN = "-".join((IAM_POLICY_NAME, region, hash_id))
delete_iam_policy(connections['iam'], IRN, IPN)
delete_iam_role(connections['iam'], IRN)
# Destroy EC2 Key Pair
delete_ec2_key_pair(connections['ec2'], parameters['KeyName'])
# Remove the stackname from S3
dest_name = "cloudformation.stack.name-%s-%s" % (region, hash_id)
delete_stack_name_from_s3(connections['main_s3'], MAIN_S3_BUCKET,
dest_name)
# Destroy Stack
sys.stdout.write("Deleting the CloudFormation Stack %s..." %
stack.stack_name)
print "Deleting!"
connections['cfn'].delete_stack(stack.stack_name)
def info(connections):
stack = list_and_get_stacks(connections['cfn'])[0]
stack, _ = stack
pprint(stack.parameters, indent=2)
pprint(stack.outputs, indent=2)
def main():
new_hash = hashlib.md5(str(time.time())).hexdigest()[:8]
parser = argparse.ArgumentParser()
parser.add_argument("action", choices=ALLOWED_ACTIONS, action="store",
help="Action to take against the stack(s)")
parser.add_argument("-l", "--location", nargs='*', action="store",
dest="locations", help="""If building, provide the
IP Address(es) from which ssh is allowed.\n
Example: './go.py build -l xx.xx.xx.xx yy.yy.yy.yy""",
type=ip_address_type, default=["0.0.0.0"])
parser.add_argument('--region', action="store", dest="region",
default=DEFAULT_REGION)
parser.add_argument('--hash', action="store", dest="hash_id",
help="""Define the hash to use for multiple
deployments. If left blank, the hash will be
generated.""", default=new_hash)
parser.add_argument('--full', action='store_true',
help="Always build all components. (VPC, RDS, etc.)")
args = parser.parse_args()
full = args.full
connections = dict()
connections['cfn'] = cfn_connect(args.region)
if args.action == "info":
info(connections)
sys.exit(0)
connections['codedeploy'] = codedeploy_connect(args.region)
connections['ec2'] = ec2_connect(args.region)
connections['iam'] = iam_connect(args.region)
connections['main_s3'] = s3_connect(MAIN_S3_BUCKET_REGION)
connections['s3'] = s3_connect(args.region)
if args.action == "test":
# Test pieces here
sys.exit(0)
if args.action == "build":
if not args.locations:
print "Please provide at least one IP Address."
parser.print_help()
sys.exit(1)
build(connections, args.region, args.locations, args.hash_id, full)
elif args.action == "destroy":
destroy(connections, args.region)
if __name__ == '__main__':
main()