def _delete_service(self, blocking): """ Delete the service and related LB and Target group. """ elb_client = aws_client_elb.ElbClient(self.config) elb_client.delete_lb_and_friends() service_name = self.config.get_ecs_service_name() cluster_name = self.config.get_ecs_cluster_name() self.log.info('Deleting service %s in cluster %s', service_name, cluster_name) as_client = aws_client_auto_scaling.AutoScalingClient(self.config) try: as_client.update_capacity_to( 0, # min 0, # max 0, # desired "", # termination_policy ) self.client.delete_service( cluster=cluster_name, service=service_name, ) if blocking: while self.is_service_inactive() == False: self.log.info( 'Waiting for service %s to become INACTIVE', service_name) time.sleep(10) except Exception as ex: self.log.info('Error deleting service %s. Error: %s', service_name, ex)
def put_elb_unhealthy_alarm(self): """ Put the Alarm on the LB. The alarm should be created using the EC2 console """ alarm_name = self.config.get_alarm_name() # Find the LB and Target elb_client = aws_client_elb.ElbClient(self.config) grape_lb_arn, grape_tg_arn = elb_client.get_lb_and_tg() response = self.client.put_metric_alarm( AlarmName=alarm_name, AlarmDescription=self.alarm['description'], ActionsEnabled=self.alarm['enabled'], AlarmActions=[ self.config.get_alarm_action(), ], MetricName='UnHealthyHostCount', Namespace='AWS/ApplicationELB', Statistic='Average', Dimensions=[{ 'Name': 'LoadBalancer', 'Value': grape_lb_arn, }, { 'Name': 'TargetGroup', 'Value': grape_tg_arn, }], Period=60, Unit='Count', ComparisonOperator='GreaterThanThreshold', Threshold=0, EvaluationPeriods=1, ) return response
def get_or_create_auto_scaling_group(self): elb_client = aws_client_elb.ElbClient(self.config) tg = elb_client.create_or_update_target_group() # Check for existing auto scaling group response = self.client.describe_auto_scaling_groups( AutoScalingGroupNames=[ self.as_group_name, ], ) if len(response['AutoScalingGroups']) > 0: # Update existing auto scaling group configuration response = self.update_capacity() self.update_tag() return response # Create a new auto-scaling group min_value, max_value, desired, availability_zones, vpc_zone_identifier, default_cooldown = self.config.get_auto_scale_params( ) response = self.client.create_auto_scaling_group( AutoScalingGroupName=self.as_group_name, LaunchConfigurationName=self.launch_configuration_name, MinSize=min_value, MaxSize=max_value, DesiredCapacity=desired, VPCZoneIdentifier=vpc_zone_identifier, AvailabilityZones=availability_zones, DefaultCooldown=default_cooldown, TargetGroupARNs=[tg['TargetGroupArn']]) self.update_tag() return response
def get_or_create_lb(self): lb_name = self.config.get_lb_name() self.log.info( 'Load balancer name is: %s', lb_name) elb_client = aws_client_elb.ElbClient(self.config) elb_client.create_lb_and_friends() grape_lb_arn, _dns_name = elb_client.get_lb_details() grape_tg = elb_client.get_first_matching_target_group( grape_lb_arn) container_definition = self.task_def['containerDefinitions'][0] # Create LB to be passed to create_service lb_info = { 'targetGroupArn': grape_tg['TargetGroupArn'], # 'loadBalancerName': 'grapes-upload', 'containerName': container_definition['name'], 'containerPort': container_definition['portMappings'][0]['containerPort'], } return lb_info
def rolling_upgrade_service(self): # Get instances attached to this target group which will be terminated # once rolling upgrade is done. elb = aws_client_elb.ElbClient(self.config) _, tg_arn = elb.get_lb_and_tg() instanceIds, _ = self._get_running_targets(elb, tg_arn) self.log.info( 'Found %s instances in target group to be upgraded.', instanceIds) # Use AutoScaling group to increase the number of instances. # New instances will be launched with the updated task definition as_client = aws_client_auto_scaling.AutoScalingClient(self.config) app_as_client = aws_client_app_auto_scaling.AppAutoScalingClient(self.config) original_min, original_max, original_desired, _, _, _ = self.config.get_auto_scale_params() # Step 1. Delete all but the latest task definition self.delete_all_but_latest_taskd() # Step 2(optional). Ensure the number of tasks is set to the original capacity before starting the rolling upgrade. # This can happen if a rolling upgrade was aborted in between and min,max,desired was not set back to its original value if options.get_options().normalize_tasks(): self.log.info( 'Ensuring # of tasks are set to the original desired capacity') as_client.update_capacity_to( original_min, original_max, original_desired, "OldestInstance") running, desired = self._get_task_count() while True: if running == desired: self.log.info( 'Running count:%d reached desired count:%d', running, desired) break # Wait for running count of number of tasks to catch up to the desired count running, desired = self._get_task_count() self.log.info( 'Waiting for runningCount:%d to catch upto desiredCount:%d. TargetGroup:%s', running, desired, self.config.get_tg_name()) time.sleep(30) else: self.log.info( 'Not normalizing task count before doing rolling upgrade') # Step 3. Increase the capacity to 2x the original with the new task definition new_desired = max(original_desired * 2, 3) new_min = new_desired new_max = new_desired self.log.info('Starging rolling upgrade with newmin=%d newmax=%d newdesired=%d', new_min, new_max, new_desired) app_as_client.update_ecs_autoscaling_parameters(new_min, new_max) as_client.update_capacity_and_task_definition( new_min, new_max, new_desired, "") running, desired = self._get_task_count() while True: if running >= desired: self.log.info( 'Running count:%d reached desired count:%d', running, desired) break # Wait for running count of number of tasks to catch up to the desired count running, desired = self._get_task_count() self.log.info( 'Waiting for runningCount:%d to catch upto desiredCount:%d. TargetGroup:%s', running, desired, self.config.get_tg_name()) time.sleep(30) # Step 4. Wait for the targets to be marked healthy by the LB before decreasing the number of tasks # Remove/Terminate any old instances. if not options.get_options().wait_for_healthy_targets(): self.log.info( 'Not waiting for targets to become healthy before scaling down') else: while True: healthy_targets = 0 _, instanceIdMap = self._get_running_targets(elb, tg_arn) for k, v in instanceIdMap.items(): health = v['TargetHealth']['State'] if health == 'healthy': healthy_targets += 1 if healthy_targets >= new_desired: self.log.info( 'healthyTargetCount:%d has reached desiredCount:%d.', healthy_targets, new_desired) break self.log.info( 'Waiting for healthyTargetCount:%d to catch up to upscaled desiredCount:%d.', healthy_targets, new_desired) time.sleep(30) inactive_tasks = self._get_inactive_running_tasks() self.log.info('Found %d tasks listed as inactive and running', len(inactive_tasks)) for task in inactive_tasks: self.log.info('Task \'%s\' is listed as inactive', task) as_client.update_capacity_to( original_min, original_max, original_desired, "OldestInstance") # Step 5: Wait for the targets to disappear from the LB before decreasing the number of tasks while True: _, instanceIdMap = self._get_running_targets(elb, tg_arn) num_targets = len(instanceIdMap) if num_targets == original_desired: self.log.info( 'targetCount:%d has reached desiredCount:%d.', num_targets, original_desired) break if num_targets < original_desired: self.log.warn( 'targetCount:%d IS BELOW desiredCount:%d.', num_targets, original_desired) break self.log.info( 'Waiting for targetCount:%d to catch up to original desiredCount:%d.', num_targets, original_desired) time.sleep(30) # Step 6: Drop the number of tasks, restore termination policy to default app_as_client.update_ecs_autoscaling_parameters(original_min, original_max) as_client.update_capacity_to(original_min, original_max, original_desired, "")