def do_work(self, step, function): lambdas = self.list_local_lambdas() with Pool(3) as p: args = zip(repeat(function), lambdas) print('About to %s lambdas' % step) results = p.starmap(self.safe_fail, args) results = [{ 'name': name, 'msg': result['msg'], 'Success': result['Success'], 'return_val': None if ('return_val' not in result) else result['return_val'] } for (name, result) in zip(lambdas, results)] failed = [result for result in results if not result['Success']] if len(failed) > 0: print('\n' + '#' * 20 + '\n') print(failed[0]['msg']) print('\n' + '#' * 20 + '\n') print(error('Unable to %s the following lambdas:' % step)) print(' ' + '\n '.join([x['name'] for x in failed])) print('Output for %s of lambda %s shown above' % (step, failed[0]['name'])) # pp.pprint(results) exit(1) else: print(good('Successfully completed %s for all lambdas' % step)) return (results)
def main(argv): v = arguments(argv) if v.skip_zip_upload and not v.skip_build: print( error( 'Error: You\'ve asked me to skip the upload, but you still want me to rebuild the virtual environments?' )) print(error('The new virtual environments would not be uploaded')) print(error('If skipping upload, you must also skip build')) exit(1) elif v.skip_zip_upload and not v.skip_zip: print( error( 'Error: You\'ve asked me to skip the upload, but you still want me to zip the local lambda files?' )) print(error('The new zips would not be uploaded')) print(error('If skipping upload, you must also skip build')) exit(1) elif v.skip_zip and not v.skip_build: print( error( 'Error: You\'ve asked me to skip the zipping of lambdas, but you still want me rebuild the virtual environment?' )) print( error( 'The zip will still contain the previous virtual environment')) print(error('If skipping zipping, you must also skip build')) exit(1) print(emph('deploying %s' % v.stage_name)) root_dir = os.path.dirname(os.path.realpath(__file__)) lambda_dir = os.path.join(root_dir, 'data', 'lambda') lib_dir = os.path.join(root_dir, 'data', 'lib') cloudformation_dir = os.path.join(root_dir, 'data', 'cloudformation') data_dir = os.path.join(root_dir, 'data') project_name = 'site-views' code_bucket = '%s-code' % project_name region = 'ap-southeast-2' prj = Project(project_name, region, lambda_dir, lib_dir, cloudformation_dir, data_dir, code_bucket, v.stage_name) prj.the_lot(v.skip_zip, v.skip_build, v.skip_zip_upload, v.skip_lambda_test) print(good('Done'))
def latest_version(self, lambda_name): key = self.s3_key(lambda_name) # s3 = boto3.resource('s3') # object_summary = s3.ObjectSummary(self.code_bucket,key) # version = object_summary.Version() client = boto3.client('s3') print('Getting version info for zip %s from S3' % lambda_name) response = client.list_object_versions( Bucket=self.code_bucket, Prefix=key, # KeyMarker=key, # won't work if we just list versions for one key MaxKeys=1 # deleted keys show up for some reason ) if 'Versions' not in response: pp.pprint(response) versions = response['Versions'] i = 0 while (response['IsTruncated']): # pp.pprint(response) i = i + 1 if (i % 5) == 0: sys.stdout.write('.') # print('.') but without a newline sys.stdout.flush() # print('Getting next page of version info for zip %s from S3' % lambda_name) assert ('NextVersionIdMarker' in response) response = client.list_object_versions( Bucket=self.code_bucket, KeyMarker=response['NextKeyMarker'], MaxKeys=1, # deleted keys show up for some reason VersionIdMarker=response['NextVersionIdMarker'], Prefix=key) versions.extend(response['Versions']) print('Got all versions, now finding the most recent for %s' % key) version_ids = [ v['VersionId'] for v in versions if (v['IsLatest'] and (v['Key'] == key)) ] if len(version_ids) == 0: print( error( 'Couldn\'t find zip %s in S3 bucket %s. Try again without -u' % (key, self.code_bucket))) exit(1) elif len(version_ids) != 1: print('Versions:') pp.pprint(versions) print('Key: %s' % key) print('version_ids:') pp.pprint(version_ids) assert (len(version_ids) == 1) version = version_ids[0] print('Latest version of zip for %s is %s' % (lambda_name, version)) assert (version != None) return (version)
def deploy(self, lambda_versions): assert (not any([x['S3Version'] == None for x in lambda_versions])) self.lambda_versions = lambda_versions client = boto3.client('cloudformation') stack_name_short = 'stack' fname = os.path.join('data', 'cloudformation', '%s.yaml' % stack_name_short) if not os.path.isfile(fname): print(error('Error: I expect a yaml template at %s' % fname)) self.stack_name = self.project_name + '-' + self.stage print('About to deploy file %s as stack %s' % (fname, self.stack_name)) client = boto3.client('cloudformation') print('Checking template file %s' % fname) with open(fname, "rb") as f: response = client.validate_template( TemplateBody=f.read().decode("utf-8")) print(good('Template %s is valid' % fname)) params = [{ 'ParameterKey': 'prjName', 'ParameterValue': self.project_name }, { 'ParameterKey': 'codebucket', 'ParameterValue': self.code_bucket }, { 'ParameterKey': 'stage', 'ParameterValue': self.stage }] # versions of S3 zips version_params = [{'ParameterKey':'%sS3Version' % v['name'], \ 'ParameterValue':v['S3Version'] \ } \ for v in self.lambda_versions] assert (not any([x['ParameterValue'] == None for x in version_params])) params.extend(version_params) if any([x['ParameterValue'] == None for x in params]): print(error('Error: some parameters are None')) pp.pprint([ x['ParameterKey'] for x in params if x['ParameterValue'] == None ]) exit(1) if self.stack_exists(self.stack_name): # update stack print('Stack %s already exists. Updating it' % self.stack_name) change_set_name = 'update-%d' % int(time.time()) with open(fname, "rb") as f: print('Creating change set for %s' % self.stack_name) response = client.create_change_set( StackName=self.stack_name, TemplateBody=f.read().decode("utf-8"), Parameters=params, Capabilities=['CAPABILITY_NAMED_IAM'], ChangeSetName=change_set_name, Description='Update of %s' % fname, ChangeSetType='UPDATE') try: waiter = client.get_waiter('change_set_create_complete') waiter.wait(StackName=self.stack_name, ChangeSetName=change_set_name, WaiterConfig={ 'Delay': 10, 'MaxAttempts': 100 }) except Exception as e: response = client.describe_change_set( ChangeSetName=change_set_name, StackName=self.stack_name) pp.pprint(response) expected = 'The submitted information didn\'t contain changes.' if (response['Status'] == 'FAILED') and (expected in response['StatusReason']): print('No changes to make to stack %s' % self.stack_name) response = client.delete_change_set( ChangeSetName=change_set_name, StackName=self.stack_name) return () pp.pprint(e) print( err('Could not create change set %s for stack %s' % (change_set_name, self.stack_name))) raise (e) print( good('change set created sucessfully for %s' % self.stack_name)) print('Applying change set to stack %s' % self.stack_name) response = client.execute_change_set( ChangeSetName=change_set_name, StackName=self.stack_name, ) waiter = client.get_waiter('stack_update_complete') waiter.wait(StackName=self.stack_name, WaiterConfig={ 'Delay': 10, 'MaxAttempts': 100 }) print(good('Stack %s updated sucessfully' % self.stack_name)) else: print('Stack %s does not exist. Creating it' % self.stack_name) # create stack with open(fname, "rb") as f: response = client.create_stack( StackName=self.stack_name, TemplateBody=f.read().decode("utf-8"), Parameters=params, Capabilities=['CAPABILITY_NAMED_IAM']) stack_id = response['StackId'] waiter = client.get_waiter('stack_create_complete') waiter.wait(StackName=self.stack_name, WaiterConfig={ 'Delay': 10, 'MaxAttempts': 20 }) response = client.describe_stacks(StackName=self.stack_name) assert (len(response['Stacks']) == 1) assert (response['Stacks'][0]['StackName'] == self.stack_name) status = response['Stacks'][0]['StackStatus'] if status in ['CREATE_COMPLETE']: print(good('Stack %s sucessfully created' % self.stack_name)) else: print(error('Failed to create stack %s')) print(warn('status %s' % status)) print( 'Check cloudformation in the browser to see what went wrong' ) # TODO: add option to delete stack exit(1)