def test_raises_exception_if_path_and_handler_configured(self): with pytest.raises(InvalidConfigFileError): Stack(name="stack_name", project_code="project_code", template_path="template_path", template_handler_config={"type": "file"}, region="region")
def _construct_stack(self, rel_path, stack_group_config=None): """ Constructs an individual Stack object from a config path and a base config. :param rel_path: A relative config file path. :type rel_path: str :param stack_group_config: The Stack group config to use as defaults. :type stack_group_config: dict :returns: Stack object :rtype: sceptre.stack.Stack """ directory, filename = path.split(rel_path) if filename == self.context.config_file: pass self.templating_vars["stack_group_config"] = stack_group_config parsed_stack_group_config = self._parsed_stack_group_config( stack_group_config) config = self.read(rel_path, stack_group_config) stack_name = path.splitext(rel_path)[0] # Check for missing mandatory attributes for required_key in REQUIRED_KEYS: if required_key not in config: raise InvalidConfigFileError( "Required attribute '{0}' not found in configuration of '{1}'." .format(required_key, stack_name)) abs_template_path = path.join(self.context.project_path, self.context.templates_path, sceptreise_path(config["template_path"])) s3_details = self._collect_s3_details(stack_name, config) stack = Stack(name=stack_name, project_code=config["project_code"], template_path=abs_template_path, region=config["region"], template_bucket_name=config.get("template_bucket_name"), template_key_prefix=config.get("template_key_prefix"), required_version=config.get("required_version"), iam_role=config.get("iam_role"), profile=config.get("profile"), parameters=config.get("parameters", {}), sceptre_user_data=config.get("sceptre_user_data", {}), hooks=config.get("hooks", {}), s3_details=s3_details, dependencies=config.get("dependencies", []), role_arn=config.get("role_arn"), protected=config.get("protect", False), tags=config.get("stack_tags", {}), external_name=config.get("stack_name"), notifications=config.get("notifications"), on_failure=config.get("on_failure"), stack_timeout=config.get("stack_timeout", 0), stack_group_config=parsed_stack_group_config) del self.templating_vars["stack_group_config"] return stack
def setup_method(self, test_method): self.patcher_SceptrePlan = patch("sceptre.plan.plan.SceptrePlan") self.stack = Stack(name='dev/app/stack', project_code=sentinel.project_code, template_path=sentinel.template_path, region=sentinel.region, profile=sentinel.profile, parameters={"key1": "val1"}, sceptre_user_data=sentinel.sceptre_user_data, hooks={}, s3_details=None, dependencies=sentinel.dependencies, role_arn=sentinel.role_arn, protected=False, tags={"tag1": "val1"}, external_name=sentinel.external_name, notifications=[sentinel.notification], on_failure=sentinel.on_failure, stack_timeout=sentinel.stack_timeout) self.mock_context = MagicMock(spec=SceptreContext) self.mock_config_reader = MagicMock(spec=ConfigReader) self.mock_context.project_path = sentinel.project_path self.mock_context.command_path = sentinel.command_path self.mock_context.config_file = sentinel.config_file self.mock_context.full_config_path.return_value =\ sentinel.full_config_path self.mock_context.user_variables = {} self.mock_context.options = {} self.mock_context.no_colour = True self.mock_config_reader.context = self.mock_context
def test_initiate_stack_with_template_path(self): stack = Stack(name='dev/stack/app', project_code=sentinel.project_code, template_path=sentinel.template_path, template_bucket_name=sentinel.template_bucket_name, template_key_prefix=sentinel.template_key_prefix, required_version=sentinel.required_version, region=sentinel.region, external_name=sentinel.external_name) assert stack.name == 'dev/stack/app' assert stack.project_code == sentinel.project_code assert stack.template_bucket_name == sentinel.template_bucket_name assert stack.template_key_prefix == sentinel.template_key_prefix assert stack.required_version == sentinel.required_version assert stack.external_name == sentinel.external_name assert stack.hooks == {} assert stack.parameters == {} assert stack.sceptre_user_data == {} assert stack.template_path == sentinel.template_path assert stack.template_handler_config is None assert stack.s3_details is None assert stack._template is None assert stack.protected is False assert stack.iam_role is None assert stack.role_arn is None assert stack.dependencies == [] assert stack.tags == {} assert stack.notifications == [] assert stack.on_failure is None assert stack.stack_group_config == {}
def run(self): """ run is the method called by Sceptre. It should carry out the work intended by this hook. self.argument is available from the base class and contains the argument defined in the sceptre config file (see below) The following attributes may be available from the base class: self.stack_config (A dict of data from <stack_name>.yaml) self.environment_config (A dict of data from config.yaml) self.connection_manager (A connection_manager) """ environment = self.environment_config.environment_path + "/" + self.stack_config.name stack = Stack(name=environment, environment_config=self.environment_config, connection_manager=self.connection_manager) try: outputs = stack.describe_outputs() except: return if outputs: client_artifacts_s3_bucket_name = [output['OutputValue'] for output in outputs if output['OutputKey'] == 'EnvironmentArtifactsS3Bucket'] print(client_artifacts_s3_bucket_name[0]) s3 = boto3.resource('s3') for bucket in s3.buckets.all(): print(bucket.name) if bucket.name == client_artifacts_s3_bucket_name[0]: bucket.object_versions.delete()
def setup_method(self, test_method): self.patcher_connection_manager = patch( "sceptre.plan.actions.ConnectionManager") self.mock_ConnectionManager = self.patcher_connection_manager.start() self.stack = Stack(name='prod/app/stack', project_code=sentinel.project_code, template_path=sentinel.template_path, region=sentinel.region, profile=sentinel.profile, parameters={"key1": "val1"}, sceptre_user_data=sentinel.sceptre_user_data, hooks={}, s3_details=None, dependencies=sentinel.dependencies, role_arn=sentinel.role_arn, protected=False, tags={"tag1": "val1"}, external_name=sentinel.external_name, notifications=[sentinel.notification], on_failure=sentinel.on_failure, stack_timeout=sentinel.stack_timeout) self.actions = StackActions(self.stack) self.template = Template("fixtures/templates", self.stack.sceptre_user_data, self.actions.connection_manager, self.stack.s3_details) self.stack._template = self.template
def stack_factory(**kwargs): call_kwargs = { 'name': 'dev/app/stack', 'project_code': sentinel.project_code, 'template_bucket_name': sentinel.template_bucket_name, 'template_key_prefix': sentinel.template_key_prefix, 'required_version': sentinel.required_version, 'template_path': sentinel.template_path, 'region': sentinel.region, 'profile': sentinel.profile, 'parameters': { "key1": "val1" }, 'sceptre_user_data': sentinel.sceptre_user_data, 'hooks': {}, 's3_details': None, 'dependencies': sentinel.dependencies, 'role_arn': sentinel.role_arn, 'protected': False, 'tags': { "tag1": "val1" }, 'external_name': sentinel.external_name, 'notifications': [sentinel.notification], 'on_failure': sentinel.on_failure, 'stack_timeout': sentinel.stack_timeout, 'stack_group_config': {} } call_kwargs.update(kwargs) return Stack(**call_kwargs)
def setup_method(self, test_method): self.stack = Stack( name='dev/app/stack', project_code=sentinel.project_code, template_bucket_name=sentinel.template_bucket_name, template_key_prefix=sentinel.template_key_prefix, required_version=sentinel.required_version, template_path=sentinel.template_path, region=sentinel.region, profile=sentinel.profile, parameters={"key1": "val1"}, sceptre_user_data=sentinel.sceptre_user_data, hooks={}, s3_details=None, dependencies=sentinel.dependencies, role_arn=sentinel.role_arn, protected=False, tags={"tag1": "val1"}, external_name=sentinel.external_name, notifications=[sentinel.notification], on_failure=sentinel.on_failure, iam_role=sentinel.iam_role, iam_role_session_duration=sentinel.iam_role_session_duration, stack_timeout=sentinel.stack_timeout, stack_group_config={}) self.stack._template = MagicMock(spec=Template)
def test_external_name_with_custom_stack_name(self): stack = Stack(name="stack_name", project_code="project_code", template_path="template_path", region="region", external_name="external_name") assert stack.external_name == "external_name"
def _construct_stack(self, rel_path, stack_group_config=None): """ Constructs an individual Stack object from a config path and a base config. :param rel_path: A relative config file path. :type rel_path: str :param stack_group_config: The Stack group config to use as defaults. :type stack_group_config: dict :returns: Stack object :rtype: sceptre.stack.Stack """ directory, filename = path.split(rel_path) if filename == self.context.config_file: pass self.templating_vars["stack_group_config"] = stack_group_config config = self.read(rel_path, stack_group_config) stack_name = path.splitext(rel_path)[0] abs_template_path = path.join(self.context.project_path, self.context.templates_path, config["template_path"]) s3_details = self._collect_s3_details(stack_name, config) stack = Stack(name=stack_name, project_code=config["project_code"], template_path=abs_template_path, region=config["region"], template_bucket_name=config.get("template_bucket_name"), template_key_prefix=config.get("template_key_prefix"), required_version=config.get("required_version"), profile=config.get("profile"), parameters=config.get("parameters", {}), sceptre_user_data=config.get("sceptre_user_data", {}), hooks=config.get("hooks", {}), s3_details=s3_details, dependencies=config.get("dependencies", []), role_arn=config.get("role_arn"), protected=config.get("protect", False), tags=config.get("stack_tags", {}), external_name=config.get("stack_name"), notifications=config.get("notifications"), on_failure=config.get("on_failure"), stack_timeout=config.get("stack_timeout", 0)) del self.templating_vars["stack_group_config"] return stack
def setup_method(self, test_method, mock_config): self.mock_environment_config = MagicMock(spec=Config) self.mock_environment_config.environment_path = sentinel.path # environment config is an object which inherits from dict. Its # attributes are accessable via dot and square bracket notation. # In order to mimic the behaviour of the square bracket notation, # a side effect is used to return the expected value from the call to # __getitem__ that the square bracket notation makes. self.mock_environment_config.__getitem__.side_effect = [ sentinel.project_code, sentinel.region ] self.mock_connection_manager = Mock() self.stack = Stack(name="stack_name", environment_config=self.mock_environment_config, connection_manager=self.mock_connection_manager) # Set default value for stack properties self.stack._external_name = sentinel.external_name
def run(self): """ run is the method called by Sceptre. It should carry out the work intended by this hook. self.argument is available from the base class and contains the argument defined in the sceptre config file (see below) The following attributes may be available from the base class: self.stack_config (A dict of data from <stack_name>.yaml) self.environment_config (A dict of data from config.yaml) self.connection_manager (A connection_manager) """ environment = self.environment_config.environment_path + "/" + self.stack_config.name stack = Stack(name=environment, environment_config=self.environment_config, connection_manager=self.connection_manager) outputs = stack.describe_outputs() if outputs: core_artifacts_s3_bucket = self.stack_config['parameters']['CoreBootStrapRepositoryS3BucketName'] print(core_artifacts_s3_bucket) client_artifacts_s3_bucket = [output['OutputValue'] for output in outputs if output['OutputKey'] == 'EnvironmentArtifactsS3Bucket'] print(client_artifacts_s3_bucket[0]) bootstrap_artifacts_key = "bootstrap/" s3 = boto3.resource('s3') source_bucket = s3.Bucket(core_artifacts_s3_bucket) destination_bucket = s3.Bucket(client_artifacts_s3_bucket[0]) print(source_bucket) print(destination_bucket) for s3_object in source_bucket.objects.filter(Prefix=bootstrap_artifacts_key): destination_key = s3_object.key print(destination_key) s3.Object(destination_bucket.name, destination_key).copy_from(CopySource={ 'Bucket': s3_object.bucket_name, 'Key': s3_object.key})
def setup_method(self, test_method): self.patcher_connection_manager = patch( "sceptre.stack.ConnectionManager") self.mock_ConnectionManager = self.patcher_connection_manager.start() self.stack = Stack(name=sentinel.stack_name, project_code=sentinel.project_code, template_path=sentinel.template_path, region=sentinel.region, iam_role=sentinel.iam_role, parameters={"key1": "val1"}, sceptre_user_data=sentinel.sceptre_user_data, hooks={}, s3_details=None, dependencies=sentinel.dependencies, role_arn=sentinel.role_arn, protected=False, tags={"tag1": "val1"}, external_name=sentinel.external_name, notifications=[sentinel.notification], on_failure=sentinel.on_failure) self.stack._template = MagicMock(spec=Template)
def test_initialiser_calls_correct_methods(self, mock_config): mock_config.get.return_value = sentinel.hooks self.stack._config = { "parameters": sentinel.parameters, "hooks": sentinel.hooks } self.mock_environment_config = MagicMock(spec=Config) self.mock_environment_config.environment_path = sentinel.path # environment config is an object which inherits from dict. Its # attributes are accessable via dot and square bracket notation. # In order to mimic the behaviour of the square bracket notation, # a side effect is used to return the expected value from the call to # __getitem__ that the square bracket notation makes. self.mock_environment_config.__getitem__.side_effect = [ sentinel.project_code, sentinel.template_bucket_name, sentinel.region ] Stack(name=sentinel.name, environment_config=self.mock_environment_config, connection_manager=sentinel.connection_manager)
def setup_method(self, test_method): self.patcher_SceptrePlanner = patch("sceptre.plan.plan.SceptrePlanner") self.stack = Stack(name=sentinel.stack_name, project_code=sentinel.project_code, template_path=sentinel.template_path, region=sentinel.region, profile=sentinel.profile, parameters={"key1": "val1"}, sceptre_user_data=sentinel.sceptre_user_data, hooks={}, s3_details=None, dependencies=sentinel.dependencies, role_arn=sentinel.role_arn, protected=False, tags={"tag1": "val1"}, external_name=sentinel.external_name, notifications=[sentinel.notification], on_failure=sentinel.on_failure, stack_timeout=sentinel.stack_timeout) self.stack_group = StackGroup(path="path", options=sentinel.options)
def test_initiate_stack(self): stack = Stack(name=sentinel.stack_name, project_code=sentinel.project_code, template_path=sentinel.template_path, region=sentinel.region, external_name=sentinel.external_name) self.mock_ConnectionManager.assert_called_with(sentinel.region, None) assert stack.name == sentinel.stack_name assert stack.project_code == sentinel.project_code assert stack.external_name == sentinel.external_name assert stack.hooks == {} assert stack.parameters == {} assert stack.sceptre_user_data == {} assert stack.template_path == sentinel.template_path assert stack.s3_details is None assert stack._template is None assert stack.protected is False assert stack.role_arn is None assert stack.dependencies == [] assert stack.tags == {} assert stack.notifications == [] assert stack.on_failure is None
def run(self): """ run is the method called by Sceptre. It should carry out the work intended by this hook. self.argument is available from the base class and contains the argument defined in the sceptre config file (see below) The following attributes may be available from the base class: self.stack_config (A dict of data from <stack_name>.yaml) self.environment_config (A dict of data from config.yaml) self.connection_manager (A connection_manager) """ environment = self.environment_config.environment_path + "/" + self.stack_config.name stack = Stack(name=environment, environment_config=self.environment_config, connection_manager=self.connection_manager) outputs = stack.describe_outputs() print(outputs) if outputs: eks_cluster_name = [output['OutputValue'] for output in outputs if output['OutputKey'] == 'EKSClusterName'] print(eks_cluster_name[0]) connect_to_cluster_cmd = "aws eks update-kubeconfig --name {}".format(eks_cluster_name[0]) os.system(connect_to_cluster_cmd) worker_node_instance_role_arn = [output['OutputValue'] for output in outputs if output['OutputKey'] == 'WorkerNodeInstanceRoleArn'] print(worker_node_instance_role_arn[0]) cluster_admin_role_arn = [output['OutputValue'] for output in outputs if output['OutputKey'] == 'EKSClusterRoleArn'] print(cluster_admin_role_arn[0]) cluster_admin_role = [output['OutputValue'] for output in outputs if output['OutputKey'] == 'EKSClusterRole'] print(cluster_admin_role[0]) basepath = path.dirname(__file__) print(basepath) config_map_yaml_path = path.abspath(path.join(basepath, "ss_eks_config_map.yaml")) print(config_map_yaml_path) basepath = path.dirname(__file__) print(basepath) config_map_template_path = path.abspath(path.join(basepath, "ss_eks_config_map.j2.template")) print(config_map_template_path) j2_env = Environment(loader=FileSystemLoader(basepath), trim_blocks=True) template = j2_env.get_template("ss_eks_config_map.j2.template") render_values = {"worker_role_arn": worker_node_instance_role_arn[0], "EC2PrivateDNSName": "{{EC2PrivateDNSName}}", "cluster_admin_role_arn": cluster_admin_role_arn[0], "cluster_admin_role": cluster_admin_role[0]} rendered = template.render(render_values) with open('hooks/ss_eks_config_map.yaml', 'w') as f: f.write(rendered) connect_worker_nodes_to_cluster_cmd = "kubectl apply -f {}".format(config_map_yaml_path) os.system(connect_worker_nodes_to_cluster_cmd) get_aws_auth_configmap_cmd = "kubectl get configmaps aws-auth -o yaml --namespace='kube-system'" os.system(get_aws_auth_configmap_cmd) worker_node_autoscaling_group_name = [output['OutputValue'] for output in outputs if output['OutputKey'] == 'WorkerNodeAutoScalingGroupName'] print(worker_node_autoscaling_group_name[0]) autoscaling = boto3.client('autoscaling') autoscaling_group_response = autoscaling.describe_auto_scaling_groups( AutoScalingGroupNames=[worker_node_autoscaling_group_name[0]]) asg_desired_capacity = autoscaling_group_response['AutoScalingGroups'][0]['DesiredCapacity'] ready_nodes = 0 ready_nodes_current_loop = 0 print(asg_desired_capacity) print("Begin Polling for new Ready Worker Nodes...") while ready_nodes != asg_desired_capacity: print("Pause for 30 seconds between polling events...") sleep(5) print("Desired Capacity is: " + str(asg_desired_capacity)) print("Ready Worker Node Count is: " + str(ready_nodes)) get_node_status_cmd = "kubectl get nodes -o json" current_nodes_status = os.popen(get_node_status_cmd).read() current_nodes_status_json = json.loads(current_nodes_status) nodes = current_nodes_status_json['items'] for node in nodes: node_conditions = node['status']['conditions'] print(node_conditions) for condition in node_conditions: if condition['reason'] == 'KubeletReady': if condition['type'] == 'Ready' and condition['status'] == 'True': ready_nodes_current_loop += 1 ready_nodes = ready_nodes_current_loop ready_nodes_current_loop = 0 print("Desired Capacity is: " + str(asg_desired_capacity)) print("Ready Worker Node Count is: " + str(ready_nodes)) print("All Worker Nodes are Ready!")
def run(self): """ run is the method called by Sceptre. It should carry out the work intended by this hook. self.argument is available from the base class and contains the argument defined in the sceptre config file (see below) The following attributes may be available from the base class: self.stack_config (A dict of data from <stack_name>.yaml) self.environment_config (A dict of data from config.yaml) self.connection_manager (A connection_manager) """ action = self.argument print(action) print(self.stack_config) environment = self.environment_config.environment_path + "/" + self.stack_config.name kong_stack = Stack(name=self.environment_config.environment_path + "/kong", environment_config=self.environment_config, connection_manager=self.connection_manager) kong_outputs = kong_stack.describe_outputs() print(kong_outputs) restapi_stack = Stack(name=self.environment_config.environment_path + "/restapi", environment_config=self.environment_config, connection_manager=self.connection_manager) restapi_outputs = restapi_stack.describe_outputs() print(restapi_outputs) vpc_stack = Stack(name=self.environment_config.environment_path + "/vpc", environment_config=self.environment_config, connection_manager=self.connection_manager) vpc_outputs = vpc_stack.describe_outputs() print(vpc_outputs) exit if restapi_outputs: admin_cidr_block = [ output['OutputValue'] for output in vpc_outputs if output['OutputKey'] == 'AdminCidrBlock' ] print(admin_cidr_block[0]) local_public_ip = requests.get('http://ip.42.pl/raw').text local_public_ip = local_public_ip + "/32" print(local_public_ip) print(admin_cidr_block[0]) temporary_access = local_public_ip != admin_cidr_block[0] ec2 = boto3.client('ec2') # Call kong API requests if action == "configure": kong_public_load_balancer_security_group_id = [ output['OutputValue'] for output in kong_outputs if output['OutputKey'] == 'KongPublicLoadBalancerSecurityGroup' ] print(kong_public_load_balancer_security_group_id[0]) if temporary_access: self.handle_temporary_access( ec2, "authorize", kong_public_load_balancer_security_group_id[0], local_public_ip) kong_public_load_balancer_dns = [ output['OutputValue'] for output in kong_outputs if output['OutputKey'] == 'KongPublicLoadBalancerDNS' ] print(kong_public_load_balancer_dns[0]) restapi_private_load_balancer_dns = [ output['OutputValue'] for output in restapi_outputs if output['OutputKey'] == 'RestApiPrivateLoadBalancerDNS' ] print(restapi_private_load_balancer_dns[0]) env_artifacts_s3_bucket = [ output['OutputValue'] for output in vpc_outputs if output['OutputKey'] == 'EnvironmentArtifactsS3Bucket' ] print(env_artifacts_s3_bucket[0]) restapi_prefix = self.stack_config['parameters'][ 'RestApiPrefix'] print(restapi_prefix) oauth_admin_port = self.stack_config['parameters'][ 'OAuthAdminPort'] print(oauth_admin_port) postman_files_s3_key = self.stack_config['parameters'][ 'OAuthConfigurationFilesLocation'] print(postman_files_s3_key) # Download Kong Postman Files s3 = boto3.resource('s3') download_bucket = s3.Bucket(env_artifacts_s3_bucket[0]) for s3_object in download_bucket.objects.filter( Prefix=postman_files_s3_key): if s3_object.key[-1] == "/": continue download_key = s3_object.key local_postman_path = download_key.replace( postman_files_s3_key, 'hooks/') print(download_key) print(local_postman_path) s3.Bucket(env_artifacts_s3_bucket[0]).download_file( download_key, local_postman_path) # Update Kong Postman Environment File with open('hooks/kong.postman_environment.json', 'r') as f: json_data = json.load(f) for value in json_data['values']: if value['key'] == "konghost": value['value'] = kong_public_load_balancer_dns[0] if value['key'] == 'upstreamhost': value['value'] = restapi_private_load_balancer_dns[ 0] if value['key'] == 'adminport': value['value'] = oauth_admin_port if value['key'] == 'apiprefix': value['value'] = restapi_prefix with open('hooks/kong.postman_environment.json', 'w') as f: f.write(json.dumps(json_data)) # Execute Postman via Newman basepath = path.dirname(__file__) print(basepath) postman_collection_path = path.abspath( path.join(basepath, "kong.postman_collection.json")) print(postman_collection_path) postman_environment_path = path.abspath( path.join(basepath, "kong.postman_environment.json")) print(postman_environment_path) postman_response_json_path = path.abspath( path.join(basepath, "postman_response.json")) print(postman_response_json_path) cmd = "newman run {0} -e {1} --insecure -r cli,json --reporter-json-export {2}".format( postman_collection_path, postman_environment_path, postman_response_json_path) print(os.system(cmd)) # Parse Postman Response with open(postman_response_json_path) as f: postman_response_json_data = json.load(f) postman_executions = postman_response_json_data['run'][ 'executions'] postman_get_consumer_response = [ item['assertions'][0]['assertion'] for item in postman_executions if item['item']['name'] == 'Get Kong Consumer Info' ] postman_get_consumer_response_json = json.loads( postman_get_consumer_response[0]) self.stack_config['parameters']['OAuthRestApiKongConsumerClientId'] = \ postman_get_consumer_response_json['data'][0]['client_id'] print(self.stack_config['parameters'] ['OAuthRestApiKongConsumerClientId']) self.stack_config['parameters']['OAuthRestApiKongConsumerClientSecret'] = \ postman_get_consumer_response_json['data'][0]['client_secret'] print(self.stack_config['parameters'] ['OAuthRestApiKongConsumerClientSecret']) postman_get_oauth_info_response = [ item['assertions'][0]['assertion'] for item in postman_executions if item['item']['name'] == 'Get OAuth2 Info' ] postman_get_oauth_info_response_json = json.loads( postman_get_oauth_info_response[0]) self.stack_config['parameters']['OAuthRestApiProvisionKey'] = \ postman_get_oauth_info_response_json['data'][0]['config']['provision_key'] print(self.stack_config['parameters'] ['OAuthRestApiProvisionKey']) if temporary_access: self.handle_temporary_access( ec2, "revoke", kong_public_load_balancer_security_group_id[0], local_public_ip)
def run(self): """ run is the method called by Sceptre. It should carry out the work intended by this hook. self.argument is available from the base class and contains the argument defined in the sceptre config file (see below) The following attributes may be available from the base class: self.stack_config (A dict of data from <stack_name>.yaml) self.environment_config (A dict of data from config.yaml) self.connection_manager (A connection_manager) """ environment = self.environment_config.environment_path + "/" + self.stack_config.name stack = Stack(name=environment, environment_config=self.environment_config, connection_manager=self.connection_manager) description = stack.describe() if description: rest_api_autoscaling_group_name = [parameter['ParameterValue'] for parameter in description['Stacks'][0]['Parameters'] if parameter['ParameterKey'] == 'RestApiAutoScalingGroupName'] print("AutoScaling-Group to be Refreshed: " + rest_api_autoscaling_group_name[0]) autoscaling = boto3.client('autoscaling') print("Begin refreshing API server instances...") sleep(3) autoscaling_group_response = autoscaling.describe_auto_scaling_groups(AutoScalingGroupNames=[rest_api_autoscaling_group_name[0]]) print("--------Begin AutoScaling-Group Describe Resources Response (Pre-Termination)----") print(autoscaling_group_response) print("--------End AutoScaling-Group Describe Resources Response (Pre-Termination)--------") current_instances = autoscaling_group_response['AutoScalingGroups'][0]['Instances'] for instance in current_instances: instance_id = instance['InstanceId'] print(instance_id) ec2 = boto3.client('ec2') waiter = ec2.get_waiter('instance_terminated') ec2.terminate_instances(InstanceIds=[instance_id]) print("Instance terminating...") waiter.wait(InstanceIds=[instance_id]) print("Instance successfully terminated!") print("All Instances terminated, wait 3 Minutes for AutoScaling-Group Details to refresh...") sleep(160) autoscaling_group_response = autoscaling.describe_auto_scaling_groups(AutoScalingGroupNames=[rest_api_autoscaling_group_name[0]]) print("--------Begin AutoScaling-Group Describe Resources Response (Post-Termination)----") print(autoscaling_group_response) print("--------End AutoScaling-Group Describe Resources Response (Post-Termination)--------") asg_desired_capacity = autoscaling_group_response['AutoScalingGroups'][0]['DesiredCapacity'] in_service_instances = 0 in_service_current_loop = 0 print("Begin Polling for new InService Instances...") while in_service_instances != asg_desired_capacity: print("Pause for 30 seconds between polling events...") sleep(30) print("Desired Capacity is: " + str(asg_desired_capacity)) print("InService Instance Count is: " + str(in_service_instances)) autoscaling_group_response = autoscaling.describe_auto_scaling_groups( AutoScalingGroupNames=[rest_api_autoscaling_group_name[0]]) current_instances = autoscaling_group_response['AutoScalingGroups'][0]['Instances'] for instance in current_instances: instance_id = instance['InstanceId'] print(instance_id) print("Instance Lifecycle State: " + instance['LifecycleState']) if instance['LifecycleState'] == "InService": in_service_current_loop += 1 in_service_instances = in_service_current_loop in_service_current_loop = 0 print("Desired Capacity is: " + str(asg_desired_capacity)) print("InService Instance Count is: " + str(in_service_instances)) print("All Instances are InService!")