def wait_for_new_instances_ready(self): """ Wait for instances launched by autoscale group to be up, running and in the balancer """ with balloon_timer( "Waiting for new instances until they're up and running" ) as balloon: i = 0 while len(self.get_instances_with_status( 'running')) != self.group.desired_capacity: balloon.update(i) i += 1 time.sleep(1) # TODO: Query AWS for instances instances = self.get_instances_with_status('running') new_instances = set(instances) - set(self.old_instances) # Ask the balancer to wait for balancer in self.load_balancers(): balancer.wait_for_instances_with_health(new_instances) # We do it twice, because sometimes the balancer health check is a bit tricky. with balloon_timer( "Waiting for another balancer health check pass") as balloon: for i in range( 1, self.load_balancers()[0].get_health_check_interval()): balloon.update(i) time.sleep(1) for balancer in self.load_balancers(): balancer.wait_for_instances_with_health(new_instances)
def wait_for_new_instances_ready(self): """ Wait for instances launched by autoscale group to be up, running and in the balancer """ with balloon_timer("Waiting for new instances until they're up and running") as balloon: i = 0 while len(self.get_instances_with_status('running')) != self.group.desired_capacity: balloon.update(i) i += 1 time.sleep(1) # TODO: Query AWS for instances instances = self.get_instances_with_status('running') new_instances = set(instances) - set(self.old_instances) # Ask the balancer to wait for balancer in self.load_balancers(): balancer.wait_for_instances_with_health(new_instances) # We do it twice, because sometimes the balancer health check is a bit tricky. with balloon_timer("Waiting for another balancer health check pass") as balloon: for i in range(1, self.load_balancers()[0].get_health_check_interval()): balloon.update(i) time.sleep(1) for balancer in self.load_balancers(): balancer.wait_for_instances_with_health(new_instances)
def register_instance_in_load_balancers(self, instances, wait=True): """ Register instances in the ELB of the autoscale group """ elbs = self.load_balancers() if elbs: instances_ids = [instance.instance_id for instance in instances] for elb in elbs: elb.register_instances(instances_ids) if wait: elb.wait_for_instances_with_health( instances_ids, health='InService' ) target_groups = self.load_target_groups() if target_groups: boto3_elb = boto3.client('elbv2') instances_ids = [{'Id': instance.instance_id} for instance in instances] for group in target_groups: boto3_elb.register_targets( TargetGroupArn=group, Targets=instances_ids ) if wait: with balloon_timer("Waiting for instance to be healthy in target_group") as balloon: i = 0 while self.check_target_group_instance_state(group, instances_ids, 'healthy') is False: balloon.update(i) i += 1 time.sleep(1)
def deploy_instances_in_group(self, group): """ Deploy conde into the instances of the autoscale group. This is done by executing `command` from `deploy` configuration in `working_directory`. """ instances = self._get_instances(group) if not instances: raise ForsetiException( 'This deployer needs to have some instances running in the group' ) with balloon_timer("Deploying new code on instances") as balloon: deploy_configuration = self.configuration.get_application_configuration( self.application)['deploy'] command = deploy_configuration['command'].format(dns_name=','.join( [instance.instance.public_dns_name for instance in instances])) if self.command_args: command = '%s %s' % (command, self.command_args) former_directory = os.getcwd() os.chdir(deploy_configuration['working_directory']) retvalue = os.system(command) if retvalue != 0: raise ForsetiDeployException( 'Deployment command did not return 0 as expected, returned: %s' % retvalue) os.chdir(former_directory) return instances
def deploy(self, ami_id=None): """ Do the code deployment by pushing the code in all instances and create an AMI from an. """ self.send_sns_message("Starting deployment of %s" % self.application) with balloon_timer("") as balloon: group = self._get_group() if not ami_id: # We must suspend autoscaling processes to avoid adding instances with # outdated code group.suspend_processes() try: self.deploy_instances_in_group(group) except ForsetiException: group.resume_processes() raise ami_id = self.generate_ami() try: self.setup_autoscale(ami_id) finally: group.resume_processes() minutes, seconds = divmod(int(balloon.seconds_elapsed), 60) print "Total deployment time: %02d:%02d" % (minutes, seconds) self.send_sns_message( "Finished deployment of %s in %02d:%02d" % \ (self.application, minutes, seconds) )
def deploy(self, ami_id=None): """ Do the code deployment by pushing the code in all instances and create an AMI from an. """ self.send_sns_message( "Starting deployment of %s" % self.application ) with balloon_timer("") as balloon: group = self._get_group() if not ami_id: # We must suspend autoscaling processes to avoid adding instances with # outdated code group.suspend_processes() try: self.deploy_instances_in_group(group) except ForsetiException as exception: group.resume_processes() raise exception ami_id = self.generate_ami() try: self.setup_autoscale(ami_id) finally: group.resume_processes() minutes, seconds = divmod(int(balloon.seconds_elapsed), 60) print "Total deployment time: %02d:%02d" % (minutes, seconds) self.send_sns_message( "Finished deployment of %s in %02d:%02d" % \ (self.application, minutes, seconds) )
def deploy_instances_in_group(self, group): """ Deploy conde into the instances of the autoscale group. This is done by executing `command` from `deploy` configuration in `working_directory`. """ instances = self._get_instances(group) if not instances: raise ForsetiException( 'This deployer needs to have some instances running in the group' ) with balloon_timer("Deploying new code on instances") as balloon: deploy_configuration = self.configuration.get_application_configuration(self.application)['deploy'] command = deploy_configuration['command'].format( dns_name=','.join([instance.instance.public_dns_name for instance in instances]) ) if self.command_args: command = '%s %s' % (command, self.command_args) former_directory = os.getcwd() os.chdir(deploy_configuration['working_directory']) retvalue = os.system(command) if retvalue != 0: raise ForsetiDeployException( 'Deployment command did not return 0 as expected, returned: %s' % retvalue ) os.chdir(former_directory) return instances
def deploy(self, ami_id): """ Do the deployment of an AMI. """ with balloon_timer("") as balloon: self.setup_autoscale(ami_id) minutes, seconds = divmod(int(balloon.seconds_elapsed), 60) print "Total deployment time: %02d:%02d" % (minutes, seconds)
def wait_for_ssh(self): """ Wait until SSH is running """ with balloon_timer("Golden instance %s provisioned. Waiting until SSH is up" % self.instance.id) as balloon: i = 0 while not self.is_ssh_running(): balloon.update(i) i += 1 time.sleep(1)
def wait_for_instances_with_health(self, instances_ids, health='InService'): with balloon_timer("Waiting for %d instances until they're in the balancer %s with status %s" % ( len(instances_ids), self.balancer.name, health )) as balloon: i = 0 while len(self.filter_instances_with_health(instances_ids, health=health)) != len(instances_ids): balloon.update(i) i += 1 time.sleep(1)
def create_image(self, no_reboot=False): """ Create an AMI from a running instance """ with balloon_timer("Instance %s creating image" % self.instance.id) as balloon: i = 0 # FIXME: Sometimes, if the ami was created by forseti but couldn't # tag it, we may have an error. It would be better to look for amis # with the same name amis = self.ec2.get_all_images( owners=['self'], filters={ 'tag:forseti:application': self.application, 'tag:forseti:date': self.today, } ) success = False counter = len(amis) while not success: counter = counter + 1 ami_name = "%s-ami-%s-%s" % (self.application, self.today, counter) try: ami_id = self.instance.create_image( ami_name, description=ami_name, no_reboot=no_reboot ) success = True except EC2ResponseError as exception: if 'is already in use by AMI' not in exception.message: raise balloon.update(i) i += 1 time.sleep(1) ami = self.ec2.get_all_images(image_ids=(ami_id,))[0] while ami.update() == "pending": balloon.update(i) i += 1 time.sleep(1) if ami.update() == "available": ami.add_tag("Name", ami_name) ami.add_tag('forseti:application', self.application) ami.add_tag('forseti:date', self.today) else: raise EC2InstanceException( "Image %s could not be created. Reason: %s" % (ami.id, ami.message) ) return ami_id
def wait_for_ssh(self): """ Wait until SSH is running """ with balloon_timer( "Golden instance %s provisioned. Waiting until SSH is up" % self.instance.id) as balloon: i = 0 while not self.is_ssh_running(): balloon.update(i) i += 1 time.sleep(1)
def create_image(self, no_reboot=False): """ Create an AMI from a running instance """ with balloon_timer("Instance %s creating image" % self.instance.id) as balloon: i = 0 # FIXME: Sometimes, if the ami was created by forseti but couldn't # tag it, we may have an error. It would be better to look for amis # with the same name amis = self.ec2.get_all_images(owners=['self'], filters={ 'tag:forseti:application': self.application, 'tag:forseti:date': self.today, }) success = False counter = len(amis) while not success: counter = counter + 1 ami_name = "%s-ami-%s-%s" % (self.application, self.today, counter) try: ami_id = self.instance.create_image(ami_name, description=ami_name, no_reboot=no_reboot) success = True except EC2ResponseError as exception: if 'is already in use by AMI' not in exception.message: raise balloon.update(i) i += 1 time.sleep(1) ami = self.ec2.get_all_images(image_ids=(ami_id, ))[0] while ami.update() == "pending": balloon.update(i) i += 1 time.sleep(1) if ami.update() == "available": ami.add_tag("Name", ami_name) ami.add_tag('forseti:application', self.application) ami.add_tag('forseti:date', self.today) else: raise EC2InstanceException( "Image %s could not be created. Reason: %s" % (ami.id, ami.message)) return ami_id
def wait_for_instances_with_health(self, instances_ids, health='InService'): with balloon_timer( "Waiting for %d instances until they're in the balancer %s with status %s" % (len(instances_ids), self.balancer.name, health)) as balloon: i = 0 while len( self.filter_instances_with_health( instances_ids, health=health)) != len(instances_ids): balloon.update(i) i += 1 time.sleep(1)
def deploy(self, ami_id=None): """ Do the code deployment in a golden instance and setup an autoscale group with an AMI created from it. :param ami_id: AMI id to be deployed. If it's `None`, a new one will be created """ with balloon_timer("") as balloon: if not ami_id: ami_id = self.create_ami_from_golden_instance() print "New AMI %s from golden instance" % ami_id self.setup_autoscale(ami_id) minutes, seconds = divmod(int(balloon.seconds_elapsed), 60) print "Total deployment time: %02d:%02d" % (minutes, seconds)
def terminate_instances(self, instances_ids): """ Terminate instances that we no longer want in the autoscale group, the old ones """ with balloon_timer("Terminating old instances") as balloon: for instance_id in instances_ids: try: self.autoscale.terminate_instance(instance_id, decrement_capacity=True) except BotoServerError: pass # Force an updated group instance to be sure the update is done correctly self.group = self._get_autoscaling_group() self.group.max_size = self.configuration['max_size'] self.group.update()
def terminate_instances(self, instances_ids): """ Terminate instances that we no longer want in the autoscale group, the old ones """ with balloon_timer("Terminating old instances") as balloon: for instance_id in instances_ids: try: self.autoscale.terminate_instance( instance_id, decrement_capacity=True ) except BotoServerError: pass # Force an updated group instance to be sure the update is done correctly self.group = self._get_autoscaling_group() self.group.max_size = self.configuration['max_size'] self.group.update()
def increase_desired_capacity(self): """ Increases the autoscale group desired capacity and max_size, this implies launching new EC2 instances """ with balloon_timer("Increasing desired capacity to provision new machines") as balloon: current_instances = self.get_instances_with_status('running') self.old_instances = current_instances desired = len(current_instances) * 2 i = 0 while self.group.desired_capacity != desired: self.group.desired_capacity = desired self.group.max_size = self.group.max_size * 2 self.group.update() balloon.update(i) i += 1 time.sleep(1) self.group = self._get_autoscaling_group()
def cleanup_autoscale_configurations(self, desired_configurations=4): """ Clean up all launch configurations of the autoscaling group belonging to the application but leaving `desired_configurations` left. When a launch configuration is deleted, the AMI and snapshot will be deleted too. """ with balloon_timer(""): self.autoscale_group_name = self.application_configuration['autoscale_group'] group = self._get_autoscaling_group() configurations = group.get_all_launch_configurations() # Get the first configurations minus the `desired_configurations` configurations_to_be_deleted = configurations[:-desired_configurations] for configuration in configurations_to_be_deleted: self.send_sns_message( "Deleting launch configuration %s" % configuration.name ) print "Deleting launch configuration %s" % configuration.name configuration.delete()
def provision(self, deployer_args=None): """ Provisions machine using `command` specified in configuration file, `command` is executed locally within `working_directory` specified path. Some extra arguments can be passed to the command by using `deployer_args` """ self.wait_for_ssh() with balloon_timer("Deployed new code on golden instance %s" % self.instance.id) as balloon: command = self.provision_configuration['command'].format( dns_name=self.instance.public_dns_name ) if deployer_args: # `deployer_args` is supposed to be a string command = '%s %s' % (command, deployer_args) former_directory = os.getcwd() os.chdir(self.provision_configuration['working_directory']) os.system(command) os.chdir(former_directory)
def provision(self, deployer_args=None): """ Provisions machine using `command` specified in configuration file, `command` is executed locally within `working_directory` specified path. Some extra arguments can be passed to the command by using `deployer_args` """ self.wait_for_ssh() with balloon_timer("Deployed new code on golden instance %s" % self.instance.id) as balloon: command = self.provision_configuration['command'].format( dns_name=self.instance.public_dns_name) if deployer_args: # `deployer_args` is supposed to be a string command = '%s %s' % (command, deployer_args) former_directory = os.getcwd() os.chdir(self.provision_configuration['working_directory']) os.system(command) os.chdir(former_directory)
def launch_and_wait(self): """ Launch a golden instance and wait until it's running. """ self.launch() with balloon_timer("Golden instance %s launched. Waiting until it's running" % self.instance.id) as balloon: i = 0 while self.instance.update() == "pending": balloon.update(i) i += 1 time.sleep(1) if self.instance.update() == "running": tag_name = "golden-%s-instance-%s" % (self.application, self.today) self.instance.add_tag('Name', tag_name) self.instance.add_tag('forseti:golden-instance', True) self.instance.add_tag('forseti:application', self.application) self.instance.add_tag('forseti:date', self.today) else: raise EC2InstanceException( "Golden instance %s could not be launched" % self.instance.id )
def increase_desired_capacity(self): """ Increases the autoscale group desired capacity and max_size, this implies launching new EC2 instances """ with balloon_timer( "Increasing desired capacity to provision new machines" ) as balloon: current_instances = self.get_instances_with_status('running') self.old_instances = current_instances desired = len(current_instances) * 2 i = 0 while self.group.desired_capacity != desired: self.group.desired_capacity = desired self.group.max_size = self.group.max_size * 2 self.group.update() balloon.update(i) i += 1 time.sleep(1) self.group = self._get_autoscaling_group()
def launch_and_wait(self): """ Launch a golden instance and wait until it's running. """ self.launch() with balloon_timer( "Golden instance %s launched. Waiting until it's running" % self.instance.id) as balloon: i = 0 while self.instance.update() == "pending": balloon.update(i) i += 1 time.sleep(1) if self.instance.update() == "running": tag_name = "golden-%s-instance-%s" % (self.application, self.today) self.instance.add_tag('Name', tag_name) self.instance.add_tag('forseti:golden-instance', True) self.instance.add_tag('forseti:application', self.application) self.instance.add_tag('forseti:date', self.today) else: raise EC2InstanceException( "Golden instance %s could not be launched" % self.instance.id)
def cleanup_autoscale_configurations(self, desired_configurations=4): """ Clean up all launch configurations of the autoscaling group belonging to the application but leaving `desired_configurations` left. When a launch configuration is deleted, the AMI and snapshot will be deleted too. """ with balloon_timer(""): self.autoscale_group_name = self.application_configuration[ 'autoscale_group'] group = self._get_autoscaling_group() configurations = group.get_all_launch_configurations() # Get the first configurations minus the `desired_configurations` configurations_to_be_deleted = configurations[: -desired_configurations] configurations_to_be_deleted = natsort.natsorted( configurations_to_be_deleted) for configuration in configurations_to_be_deleted: self.send_sns_message("Deleting launch configuration %s" % configuration.name) print "Deleting launch configuration %s" % configuration.name configuration.delete()