def _fetch_current_desired_count(self): stack_name = get_service_stack_name(self.env, self.application_name) self.desired_counts = {} try: stack = region_service.get_client_for( 'cloudformation', self.env ).describe_stacks(StackName=stack_name)['Stacks'][0] ecs_service_outputs = filter( lambda x: x['OutputKey'].endswith('EcsServiceName'), stack['Outputs'] ) ecs_service_names = [] for service_name in ecs_service_outputs: ecs_service_names.append({ "key": service_name['OutputKey'], "value": service_name['OutputValue'] }) ecs_client = EcsClient(None, None, self.region) for service_name in ecs_service_names: deployment = DeployAction( ecs_client, self.cluster_name, service_name["value"] ) actual_service_name = service_name["key"]. \ replace("EcsServiceName", "") self.desired_counts[actual_service_name] = deployment. \ service.desired_count log("Existing service counts: " + str(self.desired_counts)) except Exception: log_bold("Could not find existing services.")
def run_update(self, update_ecs_agents): if update_ecs_agents: self.__run_ecs_container_agent_udpate() try: log("Initiating environment stack update.") environment_stack_template_body = ClusterTemplateGenerator( self.environment, self.configuration, self.__get_desired_count() ).generate_cluster() log("Template generation complete.") change_set = create_change_set( self.client, environment_stack_template_body, self.cluster_name, self.__get_parameter_values(), self.environment ) self.existing_events = get_stack_events( self.client, self.cluster_name ) if change_set is None: return log_bold("Executing changeset. Checking progress...") self.client.execute_change_set( ChangeSetName=change_set['ChangeSetId'] ) self.__print_progress() except ClientError as e: log_err("No updates are to be performed") except Exception as e: raise e
def build_secrets_for_all_namespaces(env_name, service_name, ecs_service_name, sample_env_folder_path, secrets_name): secrets_across_namespaces = {} namespaces = get_namespaces_from_directory(sample_env_folder_path) duplicates = find_duplicate_keys(sample_env_folder_path, namespaces) if len(duplicates) != 0: raise UnrecoverableException( 'duplicate keys found in env sample files {} '.format(duplicates)) for namespace in namespaces: secrets_for_namespace = _get_secrets_for_namespace( env_name, namespace, sample_env_folder_path, secrets_name) secrets_across_namespaces.update(secrets_for_namespace) automated_secret_name = get_automated_injected_secret_name( env_name, service_name, ecs_service_name) existing_secrets = {} try: existing_secrets = secrets_manager.get_config(automated_secret_name, env_name)['secrets'] except Exception as err: log_warning( f'secret {automated_secret_name} does not exist. It will be created: {err}' ) if existing_secrets != secrets_across_namespaces: log(f"Updating {automated_secret_name}") secrets_manager.set_secrets_manager_config(env_name, automated_secret_name, secrets_across_namespaces) arn = secrets_manager.get_config(automated_secret_name, env_name)['ARN'] return dict(CLOUDLIFT_INJECTED_SECRETS=arn)
def _table_status(self): status = "" while status == "ACTIVE": log("Checking {} table status...".format(self.table_name)) sleep(1) status = self.dynamodb_client.describe_table( TableName=self.table_name)["Table"]["TableStatus"] sleep(10) log("{} table status is ACTIVE".format(self.table_name))
def _get_target_instance(self): service_instance_ids = ServiceInformationFetcher( self.name, self.environment).get_instance_ids() if not service_instance_ids: raise UnrecoverableException("Couldn't find instances. Exiting.") instance_ids = list( set(functools.reduce(operator.add, service_instance_ids.values()))) log("Found " + str(len(instance_ids)) + " instances to start session") return instance_ids[0]
def get_config(secret_name, env): if secret_name not in _secret_manager_cache: log(f"Fetching config from AWS secrets manager for secret {secret_name}") response = get_client_for('secretsmanager', env).get_secret_value(SecretId=secret_name) log(f"Fetched secret {secret_name}. Version: {response['VersionId']}") secret_val = json.loads(response['SecretString']) _secret_manager_cache[secret_name] = {'secrets': secret_val, 'ARN': f"{response['ARN']}:::{response['VersionId']}"} return _secret_manager_cache[secret_name]
def _get_environment_stack(self): try: log("Looking for " + self.environment + " cluster.") environment_stack = self.client.describe_stacks( StackName=get_cluster_name(self.environment))['Stacks'][0] log_bold(self.environment + " stack found. Using stack with ID: " + environment_stack['StackId']) except ClientError: log_err(self.environment + " cluster not found. Create the environment \ cluster using `create_environment` command.") exit(1) return environment_stack
def fetch_current_desired_count(self): desired_counts = {} try: deployment_ecs_client = EcsClient( None, None, get_region_for_environment(self.environment)) for logical_service_name, service_config in self.service_info.items( ): deployment = DeployAction(deployment_ecs_client, self.cluster_name, service_config["ecs_service_name"]) desired_counts[ logical_service_name] = deployment.service.desired_count log("Existing service counts: " + str(desired_counts)) except Exception: pass return desired_counts
def create_change_set(client, service_template_body, stack_name, key_name, environment): change_set_parameters = [{ 'ParameterKey': 'Environment', 'ParameterValue': environment }] if key_name: change_set_parameters.append({ 'ParameterKey': 'KeyPair', 'ParameterValue': key_name }) create_change_set_res = client.create_change_set( StackName=stack_name, ChangeSetName="cg" + uuid.uuid4().hex, TemplateBody=service_template_body, Parameters=change_set_parameters, Capabilities=['CAPABILITY_NAMED_IAM'], ChangeSetType='UPDATE') log("Changeset creation initiated. Checking the progress...") change_set = client.describe_change_set( ChangeSetName=create_change_set_res['Id']) while change_set['Status'] in ['CREATE_PENDING', 'CREATE_IN_PROGRESS']: sleep(1) status_string = '\x1b[2K\rChecking changeset status. Status: ' + \ change_set['Status'] sys.stdout.write(status_string) sys.stdout.flush() change_set = client.describe_change_set( ChangeSetName=create_change_set_res['Id']) status_string = '\x1b[2K\rChecking changeset status.. Status: ' + \ change_set['Status']+'\n' sys.stdout.write(status_string) if change_set['Status'] == 'FAILED': log_err("Changeset creation failed!") log_bold( change_set.get('StatusReason', "Check AWS console for reason.")) client.delete_change_set(ChangeSetName=create_change_set_res['Id']) exit(0) else: log_bold("Changeset created.. Following are the changes") _print_changes(change_set) if click.confirm('Do you want to execute the changeset?'): return change_set log_bold("Deleting changeset...") client.delete_change_set(ChangeSetName=create_change_set_res['Id']) log_bold("Done. Bye!") exit(0)
def fetch_current_desired_count(self): desired_counts = {} try: deployment_ecs_client = EcsClient( None, None, get_region_for_environment(self.environment)) for logical_service_name, service_config in self.service_info.items( ): deployment = DeployAction(deployment_ecs_client, self.cluster_name, service_config["ecs_service_name"]) if deployment.service: desired_counts[ logical_service_name] = deployment.service.desired_count log("Existing service counts: " + str(desired_counts)) except Exception as e: raise UnrecoverableException( "Could not find existing services. {}".format(e)) return desired_counts
def run(self): try: log("Check if stack already exists for " + self.cluster_name) environment_stack = self.client.describe_stacks( StackName=self.cluster_name)['Stacks'][0] log(self.cluster_name + " stack exists. ID: " + environment_stack['StackId']) log_err("Cannot create environment with duplicate name: " + self.cluster_name) except Exception: log(self.cluster_name + " stack does not exist. Creating new stack.") # When creating a cluster, desired_instance count is same # as min_instance count environment_stack_template_body = ClusterTemplateGenerator( self.environment, self.configuration).generate_cluster() self.existing_events = get_stack_events(self.client, self.cluster_name) environment_stack = self.client.create_stack( StackName=self.cluster_name, TemplateBody=environment_stack_template_body, Parameters=[{ 'ParameterKey': 'KeyPair', 'ParameterValue': self.key_name, }, { 'ParameterKey': 'Environment', 'ParameterValue': self.environment, }], OnFailure='DO_NOTHING', Capabilities=['CAPABILITY_NAMED_IAM'], ) log_bold("Submitted to cloudformation. Checking progress...") self.__print_progress() log_bold(self.cluster_name + " stack created. ID: " + environment_stack['StackId'])
def build_secrets_for_all_namespaces(env_name, service_name, ecs_service_name, sample_env_folder_path, secrets_name): secrets_across_namespaces = verify_and_get_secrets_for_all_namespaces( env_name, sample_env_folder_path, secrets_name) automated_secret_name = get_automated_injected_secret_name( env_name, service_name, ecs_service_name) existing_secrets = {} try: existing_secrets = secrets_manager.get_config(automated_secret_name, env_name)['secrets'] except Exception as err: log_warning( f'secret {automated_secret_name} does not exist. It will be created: {err}' ) if existing_secrets != secrets_across_namespaces: log(f"Updating {automated_secret_name}") secrets_manager.set_secrets_manager_config(env_name, automated_secret_name, secrets_across_namespaces) arn = secrets_manager.get_config(automated_secret_name, env_name)['ARN'] return dict(CLOUDLIFT_INJECTED_SECRETS=arn)
def log_ips(self): for service in self.ecs_service_names: task_arns = self.ecs_client.list_tasks( cluster=self.cluster_name, serviceName=service)['taskArns'] tasks = self.ecs_client.describe_tasks(cluster=self.cluster_name, tasks=task_arns)['tasks'] container_instance_arns = [ task['containerInstanceArn'] for task in tasks ] container_instances = self.ecs_client.describe_container_instances( cluster=self.cluster_name, containerInstances=container_instance_arns )['containerInstances'] ecs_instance_ids = [ container['ec2InstanceId'] for container in container_instances ] ec2_reservations = self.ec2_client.describe_instances( InstanceIds=ecs_instance_ids)['Reservations'] log_bold(service, ) for reservation in ec2_reservations: instances = reservation['Instances'] ips = [instance['PrivateIpAddress'] for instance in instances] [log_intent(ip) for ip in ips] log("")
def set_secrets_manager_config(env, secret_name, config): client = get_client_for('secretsmanager', env) secret_string = json.dumps(config) try: client.put_secret_value(SecretId=secret_name, SecretString=secret_string) except client.exceptions.ResourceNotFoundException: client.create_secret(Name=secret_name, SecretString=secret_string) def check_consistency(secret_id, expected_configuration): try: r = client.get_secret_value(SecretId=secret_id) value = json.loads(r['SecretString']) return expected_configuration == value except Exception as e: log_warning("secrets_manager consistency failure: {}".format(e)) return False secrets_consistent = wait_until(lambda: check_consistency(secret_name, config), timeout=SECRETS_MANAGER_CONSISTENCY_CHECK_TIMEOUT_SECONDS, period=1) if secrets_consistent: log(f'{secret_name} successfully updated') else: raise UnrecoverableException("Created secrets are not consistent") clear_cache()
def get_version(self, short): commit_sha = self._fetch_current_task_definition_tag() if commit_sha is None: log_err("Current task definition tag could not be found. \ Is it deployed?") elif commit_sha == "dirty": log("Dirty version is deployed. Commit information could not be \ fetched.") else: log("Currently deployed version: " + commit_sha) if not short: log("Running `git fetch --all`") call(["git", "fetch", "--all"]) log_bold("Commit Info:") call([ "git", "--no-pager", "show", "-s", "--format=medium", commit_sha ]) log_bold("Branch Info:") call(["git", "branch", "-r", "--contains", commit_sha]) log("")
def __run_ecs_container_agent_udpate(self): log("Initiating agent update") ecs_client = get_client_for('ecs', self.environment) response = ecs_client.list_container_instances( cluster=self.cluster_name ) container_instance_arns = response['containerInstanceArns'] for container_instance_arn in response['containerInstanceArns']: try: response = ecs_client.update_container_agent( cluster=self.cluster_name, containerInstance=container_instance_arn ) except ClientError as exception: if "There is no update available for your container agent." in str(exception): log("There is no update available for your container agent " + container_instance_arn) elif "Agent update is already in progress." in str(exception): log("Agent update is already in progress " + container_instance_arn) else: raise exception while True: sleep(1) response = ecs_client.describe_container_instances( cluster=self.cluster_name, containerInstances=container_instance_arns ) update_statuses = map( lambda x: { "arn": x['containerInstanceArn'], "status": x.get('agentUpdateStatus', 'UPDATED') }, response['containerInstances'] ) finished = True status_string = '\r' for status in update_statuses: status_string += status['arn'] + ":\033[92m" + \ status['status'] + " \033[0m" if status['status'] != 'UPDATED': finished = False sys.stdout.write(status_string) sys.stdout.flush() if finished: print("") break