def _internal_deploy(self, dry_run): mkdtemp = tempfile.mkdtemp() if not self.artifact_id.endswith(".zip"): self.artifact_id = "{}.zip".format(self.artifact_id) artifact_download = "s3://{location}/{artifact_id}".format( location=self.location, artifact_id=self.artifact_id) destination_bucket = self.cloud_formation_buddy.get_export_value( param="WWW-Files") s3util.download_zip_from_s3_url(artifact_download, destination=mkdtemp) to_upload = self.get_filepaths(mkdtemp) if dry_run: print_utility.banner_warn( "Dry Run: Uploading files to - {}".format(destination_bucket), str(to_upload)) else: split = destination_bucket.split("/") if len(split) > 1: path = "/".join(split[1:]) else: path = '' s3 = S3Buddy(self.deploy_ctx, path, split[0]) print_utility.progress("S3 Deploy: Uploading files to - {}".format( destination_bucket)) for s3_key, path in to_upload.items(): print_utility.info("{} - {}".format(destination_bucket, s3_key)) s3.upload(key_name=s3_key, file=path)
def create_change_set(self, template_file_url, parameter_file): resp = self.client.create_change_set( StackName=self.stack_name, TemplateURL=template_file_url, Parameters=_load_file_to_json(parameter_file), Capabilities=[ 'CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM' ], ChangeSetName=self.deploy_ctx.change_set_name ) self.existing_change_set_id = resp['Id'] self.stack_id = resp['StackId'] print_utility.info("Created ChangeSet:\nChangeSetID: {}\nStackID: {}\n{}".format(resp['Id'], resp['StackId'], pformat(resp, indent=1))) waiter = self.client.get_waiter('change_set_create_complete') try: waiter.wait(ChangeSetName=self.deploy_ctx.change_set_name, StackName=self.stack_id) except WaiterError as we: self.change_set_description = we.last_response noop = self._is_noop_changeset() print_utility.info("ChangeSet Failed to Create - {}".format(self.change_set_description['StatusReason'])) if not noop: self.log_changeset_status() self._clean_change_set_and_exit()
def log_changeset_status(self, warn=True): if warn: print_utility.banner_warn("ChangeSet Details: {}".format(self.existing_change_set_id), pformat(self.change_set_description)) else: print_utility.info("ChangeSet Details: {}".format(self.existing_change_set_id)) print_utility.info_banner(pformat(self.change_set_description))
def __init__(self, artifact_directory, environment): super(ServiceDefinition, self).__init__() self.artifact_directory = artifact_directory service_definition_path = os.path.join(artifact_directory, _SERVICE_DEFINITION_FILE) if not os.path.exists(service_definition_path): err_msg = "Service definition ({}) does not exist in artifact directory - {}".format( _SERVICE_DEFINITION_FILE, artifact_directory) print_utility.error(err_msg) raise Exception(err_msg) with open(service_definition_path, 'r') as fp: service_definition = json.load(fp) validate(service_definition, self.schema) self.application = service_definition[_APPLICATION] self.role = service_definition[_ROLE] self.service_type = service_definition[_SERVICE_TYPE] self.docker_registry = service_definition.get(_DOCKER_REGISTRY, "") self.service_template_definition_locations = service_definition.get( _SERVICE_TEMPLATE_DEFINITION_LOCATIONS, []) if _DEPLOYMENT_PARAMETERS in service_definition: self.deployment_parameters = service_definition[ _DEPLOYMENT_PARAMETERS] env_deployment_parameters = '{environment}-deployment-parameters'.format( environment=environment) if env_deployment_parameters in service_definition: print_utility.info( "Updating deployment params with environment" " specific settings - {}".format( env_deployment_parameters)) self.deployment_parameters.update( service_definition[env_deployment_parameters]) print_utility.info("Loaded deployment parameters: " + pformat(self.deployment_parameters, indent=4)) self.service_modifications = service_definition.get( _MODIFICATIONS, [])
def cli(ctx, artifact_directory, application, role, environment, configuration_defaults, verbose): # type: (object, str, str, str, str, str, bool) -> None """ CLI for managing the infrastructure for deploying micro-services in AWS. """ print_utility.configure(verbose) loaded_defaults = None # if a defaults.json exists in the directory and it is not overriden with an explicit parameter - use it! if not configuration_defaults and os.path.exists('defaults.json'): configuration_defaults = 'defaults.json' if configuration_defaults or os.path.exists('defaults.json'): print_utility.info("Loading default settings from path: {}".format(configuration_defaults)) with open(configuration_defaults, 'r') as fp: loaded_defaults = json.load(fp) if artifact_directory: if application or role: raise click.UsageError("When specifying --artifact-directory do not provide --application or --role") ctx.obj = DeployContext.create_deploy_context_artifact(artifact_directory=artifact_directory, environment=environment, defaults=loaded_defaults) else: ctx.obj = DeployContext.create_deploy_context(application=application, role=role, environment=environment, defaults=loaded_defaults)
def _initialize_environment_variables(self): application = self['APPLICATION'] self[ 'VPCAPP'] = application if not application or '-' not in application else application[:application.find( '-')] # allow for partial stack names for validation and introspection usecases stack_template = "${ENVIRONMENT}" if application: stack_template += "-${APPLICATION}" if self['ROLE']: stack_template += "-${ROLE}" env_variables[STACK_NAME] = stack_template self['DEPLOY_DATE'] = datetime.datetime.now().strftime( "%b_%d_%Y_Time_%H_%M") for property_name in built_in: self.__dict__[property_name.lower()] = self.get( property_name, None) for variable, template in env_variables.items(): evaluated_template = self.expandvars(template) self[variable] = evaluated_template self.__dict__[variable.lower()] = evaluated_template # s3 has non-standardized behavior in us-east-1 you can not use the region in the url if self['REGION'] == 'us-east-1': self['CONFIG_TEMPLATES_URL'] = self['CONFIG_TEMPLATES_EAST_URL'] self.__dict__['CONFIG_TEMPLATES_URL'.lower( )] = self['CONFIG_TEMPLATES_EAST_URL'] print_utility.info("deploy_ctx = {}".format(repr(self.__dict__)))
def save_to_file(self, destination_dir=None): if destination_dir: path = os.path.join(destination_dir, _ARTIFACT_FILE) else: path = _ARTIFACT_FILE print_utility.info("Persisting monitor definition - {}".format(path)) with open(path, 'w') as file: json.dump(self.monitors, file) return path
def delete_change_set(self): self._validate_changeset_operation_ready('delete_change_set') if self.get_change_set_execution_status(refresh=True) == 'EXECUTE_FAILED': print_utility.info( "Skipping Delete ChangeSet - ChangeSetID: {} Execution Status Failed".format( self.existing_change_set_id)) return response = self.client.delete_change_set(ChangeSetName=self.existing_change_set_id) print_utility.info( "Deleted ChangeSet - ChangeSetID: {} Response: {}".format(self.existing_change_set_id, response))
def do_command(artifact_type, artifact_location, artifact_identifier, destination=None): # type: (str,str,str) -> str ad = ArtifactDefinition.create(artifact_type, artifact_location, artifact_identifier) print_utility.info("Generated artifact manifest - {}".format( ad.__class__.__class__)) return ad.save_to_file(destination_dir=destination)
def _describe_stack(self): try: stacks = self.client.describe_stacks(StackName=self.stack_name)['Stacks'] if len(stacks) >= 1: print_utility.info("Stack Description - {}".format(pformat(stacks))) self.stack_description = stacks[0] self.stack_id = self.stack_description['StackId'] return True except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: pass return False
def upload(self, file, key_name=None): key_name = self._get_upload_bucket_key_name(file, key_name) with open(file, 'rb') as fp: args = {"Key":key_name, "Body": fp} content_type = self._guess_content_type(file) if content_type: args['ContentType'] = content_type self.bucket.put_object(**args) print_utility.info( f"Uploaded file to S3 - Bucket: {self.bucket_name} Key: {key_name} Content-Type: {content_type}") return f"{self.url_base}/{key_name}"
def download_zip_from_s3_url(s3_url, destination): # type: (str, dest_directory) -> None parsed = urlparse(s3_url) bucket = parsed.hostname key = parsed.path[1:] # strip leading / s3 = boto3.resource('s3') with tempfile.NamedTemporaryFile() as temporary_file: temp_file_path = temporary_file.name print_utility.info("Downloading zip from s3: {} - {}:{}".format(s3_url, key, temp_file_path)) s3.Bucket(bucket).download_file(key, temp_file_path) with ZipFile(temp_file_path) as zf: zf.extractall(destination)
def requires_update(self): if not self.new_image: print_utility.warn("Checking for ECS update without registering new image ") return False if not self.ecs_task_family: print_utility.warn("No ECS Task family found - assuming first deploy of stack and skipping ECS update") return False self._describe_task_definition() existing = pydash.get(self.task_definition_description, "containerDefinitions[0].image") print_utility.info(f"ECS task existing image - {existing}") print_utility.info(f"ECS task desired image - {self.new_image}") return self.run_task or existing != self.new_image
def perform_update(self): self._describe_task_definition(refresh=True) new_task_def = { 'family': self.task_definition_description['family'], 'containerDefinitions': self.task_definition_description['containerDefinitions'], 'volumes': self.task_definition_description['volumes'] } if 'networkMode' in self.task_definition_description: new_task_def['networkMode'] = self.task_definition_description['networkMode'] new_task_def['containerDefinitions'][0]['image'] = self.new_image ctx_memory = self.deploy_ctx.get('TASK_MEMORY') if ctx_memory: new_task_def['containerDefinitions'][0]['memory'] = ctx_memory if 'TASK_SOFT_MEMORY' in self.deploy_ctx and self.deploy_ctx['TASK_SOFT_MEMORY']: new_task_def['containerDefinitions'][0]['memoryReservation'] = self.deploy_ctx['TASK_SOFT_MEMORY'] ctx_cpu = self.deploy_ctx.get('TASK_CPU') if ctx_cpu: new_task_def['containerDefinitions'][0]['cpu'] = ctx_cpu # set at the task level for fargate definitions if self.using_fargate: first_container = new_task_def['containerDefinitions'][0] new_task_def['requiresCompatibilities'] = ['FARGATE'] new_cpu = ctx_cpu or first_container.get('cpu') if new_cpu: new_task_def['cpu'] = str(new_cpu) # not sure if this is right but AWS says it should be str new_memory = ctx_memory or first_container.get('memoryReservation') if new_memory: new_task_def['memory'] = str(new_memory) # not sure if this is right but AWS says it should be str if self.ecs_task_execution_role: new_task_def['executionRoleArn'] = self.ecs_task_execution_role if self.ecs_task_role: new_task_def['taskRoleArn'] = self.ecs_task_role for k, v in self.deploy_ctx.items(): print_utility.info(f'[deploy_ctx] {k} = {repr(v)}') for k, v in new_task_def.items(): print_utility.info(f'[new_task_def] {k} = {repr(v)}') updated_task_definition = self.client.register_task_definition(**new_task_def)['taskDefinition'] new_task_def_arn = updated_task_definition['taskDefinitionArn'] if self.run_task: self.exec_run_task(new_task_def_arn) else: self.update_service(new_task_def_arn)
def clean(self, cloudformation): if cloudformation.stack_id is not None or cloudformation.does_stack_exist( ): print_utility.info("Starting stack cleanup - {}".format( cloudformation.stack_id)) cloudformation.client.delete_stack( StackName=cloudformation.stack_name) waiter = cloudformation.client.get_waiter('stack_delete_complete') waiter.wait(StackName=cloudformation.stack_id) print_utility.info("Finishing stack cleanup - {}".format( cloudformation.stack_id)) elif cloudformation.existing_change_set_id is not None: cloudformation.delete_change_set()
def _load_monitor_definition(artifact_directory): artifact_def_path = os.path.join(artifact_directory, _ARTIFACT_FILE) if os.path.exists(artifact_def_path): print_utility.info( "Defining artifact definition with monitor.json - {}".format( artifact_def_path)) with open(artifact_def_path, 'r') as art_def: return json.load(art_def) else: print_utility.warn( "Monitor definition (monitor.json) did not exist in artifact directory." " Continuing infrastructure update without monitor deploy.") return None
def render_template(self, file, destination): with open(file, 'r') as source: with open( os.path.join(destination, os.path.basename(file).replace('.tmpl', '')), 'w+') as destination: temp_file_path = os.path.abspath(destination.name) print_utility.info( "Rendering template to path: {}".format(temp_file_path)) self.temp_files.append(temp_file_path) for line in source: destination.write(self.expandvars(line)) return temp_file_path
def save_to_file(self, destination_dir=None): if destination_dir: path = os.path.join(destination_dir, _ARTIFACT_FILE) else: path = _ARTIFACT_FILE print_utility.info("Persisting artifact manfiest - {}".format(path)) with open(path, 'w') as file: json.dump( { _ARTIFACT_TYPE: self.artifact_type, _ARTIFACT_LOCATION: self.artifact_location, _ARTIFACT_IDENTIFIER: self.artifact_id }, file) return path
def should_execute_change_set(self): self._validate_changeset_operation_ready('should_execute_change_set') self.describe_change_set() if self._is_noop_changeset(): return False changes_ = self.change_set_description['Changes'] if len(changes_) == 2: if pydash.get(changes_[0], 'ResourceChange.ResourceType') == "AWS::ECS::Service": if pydash.get(changes_[1], 'ResourceChange.ResourceType') == "AWS::ECS::TaskDefinition": if self.deploy_ctx.should_skip_ecs_trivial_update(): print_utility.info( "WARN: Skipping changeset update because no computed changes except to service & task " "rerun with SKIP_ECS=True to force") return False return True
def wait_for_export(self, fully_qualified_param_name): # we are seeing an issue where immediately after stack create the export values are not # immediately available value = waitfor( function_pointer=self.get_export_value, expected_result=None, interval_seconds=2, max_attempts=MAX_ATTEMPTS, negate=True, args={"fully_qualified_param_name": fully_qualified_param_name}, exception=False ) print_utility.info(f"[wait_for_export] {fully_qualified_param_name}={value}") return value
def __init__(self, deploy_ctx, root_path, bucket_name): super(S3Buddy, self).__init__() self.deploy_ctx = deploy_ctx self.s3 = boto3.resource('s3', region_name=self.deploy_ctx.region) self.bucket = self.s3.Bucket(bucket_name) try: print_utility.info("S3Buddy using bucket_name={}, root_path={}".format(bucket_name, root_path)) configuration = self._get_bucket_configuration() if configuration: self.bucket.create(CreateBucketConfiguration=configuration) else: self.bucket.create() except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: if 'BucketAlreadyOwnedByYou' not in str(err): print_utility.info("Error during bucket create - {}".format(str(err))) self.bucket_name = bucket_name self.deploy_ctx = deploy_ctx self.s3 = boto3.resource('s3', region_name=self.deploy_ctx.region) self.key_root_path = root_path self.url_base = self._get_url_base()
def _load_templates(self, templates, service_modification=False): # type: (dict, bool) -> None validate(templates, self.schema) alias_templates = [] for name, values in templates.items(): type_ = values['type'] if type_ == "github": template = GitHubTemplate(service_type=name, values=values) elif type_ == "s3": template = S3Template(service_type=name, values=values) elif type_ == "url": template = URLTemplate(service_type=name, values=values) elif type_ == "alias": template = AliasTemplate(service_type=name, values=values) alias_templates.append(template) else: print_utility.error( "Can not locate resource. Requested unknown template type - {}" .format(type_), raise_exception=True) raise Exception("") if service_modification: compatibility = values.get('compatible', []) for service in compatibility: if service == "*": self.default_service_modification_templates[ name] = template else: self.service_modification_templates[service][ name] = template self.all_service_mods[name] = template else: if name in self.deploy_templates: print_utility.info( f"Overwriting existing template for service {name}: {self.deploy_templates[name]}" ) self.deploy_templates[name] = template for alias in alias_templates: alias.resolve(self.all_service_mods if service_modification else self.deploy_templates)
def create_stack(self, template_file_url, parameter_file): action = 'create-stack' self._start_update_event(action) print_utility.info("Template URL: " + template_file_url) resp = self.client.create_stack( StackName=self.stack_name, TemplateURL=template_file_url, Parameters=_load_file_to_json(parameter_file), Capabilities=[ 'CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM' ], Tags=[ { 'Key': 'Environment', 'Value': self.deploy_ctx.environment }, { 'Key': 'Application', 'Value': self.deploy_ctx.application }, { 'Key': 'Role', 'Value': self.deploy_ctx.role } ] ) self.stack_id = resp['StackId'] waiter = self.client.get_waiter('stack_create_complete') try: waiter.wait(StackName=self.stack_id) success = True except WaiterError as we: self.stack_description = we.last_response success = False self._finish_update_event(action, success) print_utility.info("Created Stack - StackID: {}".format(resp['StackId'])) if not success: raise Exception("Cloudformation stack failed to create")
def _validate_fargate_resource_allocation(cpu, memory, deploy_ctx): if cpu is None: discovered_cpu = deploy_ctx.get('TASK_CPU', None) if discovered_cpu not in _valid_fargate_resources: print_utility.info( 'Skipping fargate resource validation - CPU not transformed - {}' .format(discovered_cpu)) return cpu = discovered_cpu elif memory is None: discovered_memory = deploy_ctx.get('TASK_SOFT_MEMORY', None) if discovered_memory not in _valid_fargate_memories: print_utility.info( 'Skipping fargate resource validation - Memory not transformed - {}' .format(discovered_memory)) return memory = discovered_memory memory_possibilities = _valid_fargate_resources[cpu] if memory not in memory_possibilities: print_utility.error( 'Attempting to use fargate with invalid configuration. {} CPU {} Memory' .format(cpu, memory), raise_exception=True)
def generate_execution_plan(self, template_manager, deploy_ctx): # type: (TemplateManager) -> list(Deploy) ret = [] template = template_manager.get_known_service(self.service_type) ret.append( CloudFormationDeploy(stack_name=deploy_ctx.stack_name, template=template, deploy_ctx=deploy_ctx)) if template.has_monitor_definition(): ret.extend(template.get_monitor_artifact().generate_execution_plan( deploy_ctx)) resource_deploy = template_manager.get_resource_service( self.artifact_directory) if resource_deploy: ret.append( CloudFormationDeploy(stack_name=deploy_ctx.resource_stack_name, template=resource_deploy, deploy_ctx=deploy_ctx)) else: print_utility.info( "Addition resource template not located (aws-resources.template)." ) for mod in self.service_modifications: template = template_manager.get_known_service_modification( self.service_type, mod) ret.append( CloudFormationDeploy( stack_name=deploy_ctx.generate_modification_stack_name( mod), template=template, deploy_ctx=deploy_ctx)) if template.has_monitor_definition(): ret.extend( template.get_monitor_artifact().generate_execution_plan( deploy_ctx)) return ret
def do_command(deploy_ctx, service_template_directory=None, service_type=None): # type: (DeployContext,[str or None],str) -> None if service_template_directory is None: print_utility.info( "Service template directory was not provided. Assuming service-type '{}' is built-in." .format(service_type)) template = deploy_ctx.template_manager.get_known_template( template_name=service_type) deploy = CloudFormationDeploy(stack_name=deploy_ctx.stack_name, template=template, deploy_ctx=deploy_ctx) else: deploy = CloudFormationDeploy( stack_name=deploy_ctx.stack_name, template=NamedLocalTemplate(service_template_directory), deploy_ctx=deploy_ctx) errs = deploy.analyze() if errs > 0: print_utility.error("Template raised {} errors ".format(errs), raise_exception=True) else: print_utility.banner_warn( "Service Template Validation - {}".format(service_type), "SUCCESS - No errors")
def _get_valid_fargate_memory(_value): if _value <= 512: print_utility.info( "Transforming memory value of {} to '0.5GB' - min value".format( _value)) return '512' elif _value < 1024: print_utility.info( "Transforming memory value of {} to '1GB' - next legitimate value". format(_value)) return '1024' else: memory = int(math.ceil(_value / 1024.0)) memory = memory * 1024 #normalize to closest interval of 1GB if memory > 30720: print_utility.info( "Transforming memory value of {} to '30GB' - max value".format( _value)) memory = 30720 else: print_utility.info( "Transforming memory value of {} to '{}'".format( _value, memory)) return memory
def _internal_deploy(self, dry_run): to_deploy = self.expand_monitors() for monitor in to_deploy: print_utility.info("Deploying datadog monitor: {}".format( monitor['name'])) if not dry_run: self.init_dd() existing_id = self.find_monitor_if_exists(monitor['name']) if not existing_id: response = dd.api.Monitor.create(**monitor) created_name = response.get('name', None) if created_name: print_utility.info( "Created monitor - {}".format(created_name)) else: print_utility.error( "Error creating monitor - {}".format(response), raise_exception=True) else: response = dd.api.Monitor.update(id=existing_id, **monitor) print_utility.info("Updated monitor - {}".format( response['name']))
def _get_valid_fargate_cpu(_value): if _value <= 256: print_utility.info( "Transforming cpu value of {} to '256' - min value".format(_value)) return 256 elif _value <= 512: print_utility.info( "Transforming cpu value of {} to '512' - next valid value".format( _value)) return 512 elif _value <= 1024: print_utility.info( "Transforming cpu value of {} to '1024' - next valid value".format( _value)) return 1024 elif _value <= 2048: print_utility.info( "Transforming cpu value of {} to '2048' - next valid value".format( _value)) return 2048 elif _value <= 4096: print_utility.info( "Transforming cpu value of {} to '4096' - next valid value".format( _value)) return 4096 else: print_utility.info( "Transforming cpu value of {} to '4096' - max valid value".format( _value)) return 4096
def deploy_cloudformation(artifact_type, artifact_location, artifact_identifier): # type: (str,str,str) -> None path = do_command(artifact_type, artifact_location, artifact_identifier) print_utility.info("Artifact Manifest saved to - {}".format(path))