예제 #1
0
파일: cluster.py 프로젝트: Bauerpauer/udo
    def activate(self):
        conn = vpc_conn()

        cfg = _cfg.get('clusters', self.name)
        if not cfg:
            print "No configuration found for {}".format(self.name)
            return False

        vpc = get_vpc_by_name(self.name)
        if vpc:
            print "Cluster {} already exists".format(self.name)
            return

        # create VPC
        subnet_cidr = cfg.get('subnet_cidr')
        if not subnet_cidr:
            print "No subnet definition found for {}".format(self.name)
            return False
        vpc = conn.create_vpc(subnet_cidr)
        vpc.add_tag('Name', value=self.name)
        # for now assume that our subnet is the same CIDR as the VPC
        # this is simpler but less fancy
        subnet = conn.create_subnet(vpc.id, subnet_cidr)
        # all done
        util.message_integrations("Created VPC {}".format(self.name))

        # mark that this cluster is being managed by udo
        vpc.add_tag('udo', value=True)

        return True
예제 #2
0
    def scale(self, desired):
        debug("In asgroup.y scale")
        asgroup = self.get_asgroup()
        client = util.as_conn()
        asg_name = self.name()

        if desired < asgroup['MinSize']:
            print "Cannot scale: {} is lower than MinSize ({})".format(desired, asgroup['MinSize'])
            return
        if desired > asgroup['MaxSize']:
            print "Cannot scale: {} is greater than MaxSize ({})".format(desired, asgroup['MaxSize'])
            if not util.confirm("Increase MaxSize to {}?".format(desired)):
                return
            asgroup['MaxSize'] = desired
            client.update_auto_scaling_group( AutoScalingGroupName = asg_name, MaxSize = desired )
        current = asgroup['DesiredCapacity']

        # Set DesiredCapacity
        response = client.set_desired_capacity( AutoScalingGroupName = asg_name, DesiredCapacity = desired )

        # Check if DesiredCapacity was changed
        debug("in asgroup.py scale: running 'asgroup = self.get_asgroup()'")
        asgroup = self.get_asgroup()
        new = asgroup['DesiredCapacity']
        if (new != current):
            msg = "Changed ASgroup {} desired_capacity from {} to {}".format(asg_name, current, new)
            util.message_integrations(msg)
예제 #3
0
파일: asgroup.py 프로젝트: abunuwas/udo
 def suspend(self):
     debug("In asgroup.py suspend")
     name = self.name()
     asg_policies = self.conn.describe_policies(
         AutoScalingGroupName=name)['ScalingPolicies']
     if not asg_policies:  # if we cant find the status of any ScalingPolicies, there are no policies
         print("ASG %s has no autoscaling processes to suspend" % name)
         return
     if self.suspend_status():
         print("ASG %s is already suspended" % name)
         return
     else:
         group = []
         group.append(
             name
         )  # I add a ASG name to a list because suspend_processes expects a list
         self.conn.suspend_processes(AutoScalingGroupName=name)
         if self.suspend_status():
             util.message_integrations(
                 "Suspended all autoscaling processes for {}".format(name))
             return
         else:
             util.message_integrations(
                 "Failed to suspend autoscaling processes for {}".format(
                     name))
             return
예제 #4
0
    def activate(self):
        debug("in launchconfig.py activate")
        conn = util.as_conn()
        name = self.name()

        if self.exists():
            pprint("in launchconfig.py self.exists()")
            # NOTE: I don't think program logic ever gets here
            if not util.confirm("LaunchConfig {} already exists, overwrite?".format(name)):
                pprint("in launchconfig.py activate: Confirmed overwriting LaunchConfig")
                return True
            # delete existing
            pprint("in launchconfig.py activate: deleting LaunchConfig")
            conn.delete_launch_configuration(LaunchConfigurationName=name)

        # get configuration for this LC
        cfg = self.role_config

        # NOTE: wrap the following in a try block to catch errors
        lc = conn.create_launch_configuration(
            AssociatePublicIpAddress = True, # this is required to make your stuff actually work
            LaunchConfigurationName = name,
            IamInstanceProfile = cfg.get('iam_profile'),
            ImageId = cfg.get('ami'),
            InstanceType = cfg.get('instance_type'),
            KeyName = cfg.get('keypair_name'),
            UserData = self.cloud_init_script(),
            SecurityGroups = cfg.get('security_groups')
        )
        #if not conn.create_launch_configuration(lc):
        #    print "Error creating LaunchConfig {}".format(name)
        #    return False
        util.message_integrations("Activated LaunchConfig {}".format(name))
        return lc
예제 #5
0
    def activate(self):
        conn = vpc_conn()

        cfg = _cfg.get('clusters', self.name)
        if not cfg:
            print "No configuration found for {}".format(self.name)
            return False

        vpc = get_vpc_by_name(self.name)
        if vpc:
            print "Cluster {} already exists".format(self.name)
            return

        # create VPC
        subnet_cidr = cfg.get('subnet_cidr')
        if not subnet_cidr:
            print "No subnet definition found for {}".format(self.name)
            return False
        vpc = conn.create_vpc(subnet_cidr)
        vpc.add_tag('Name', value=self.name)
        # for now assume that our subnet is the same CIDR as the VPC
        # this is simpler but less fancy
        subnet = conn.create_subnet(vpc.id, subnet_cidr)
        # all done
        util.message_integrations("Created VPC {}".format(self.name))

        # mark that this cluster is being managed by udo
        vpc.add_tag('udo', value=True)

        return True
예제 #6
0
파일: launchconfig.py 프로젝트: anbet/udo
    def activate(self):
        conn = util.as_conn()
        conn = boto.ec2.autoscale.connect_to_region('us-west-2')

        name = self.name()

        # check if this LC already exists
        if self.exists():
            if not util.confirm("LaunchConfig {} already exists, overwrite?".format(name)):
                return True
            # delete existing
            conn.delete_launch_configuration(name)

        # get configuration for this LC
        cfg = self.role_config
        lc = LaunchConfiguration(
            name = name,
            image_id = cfg.get('ami'),
            instance_profile_name = cfg.get('iam_profile'),
            instance_type = cfg.get('instance_type'),
            security_groups = cfg.get('security_groups'),
            key_name = cfg.get('keypair_name'),
            user_data = self.cloud_init_script(),
            associate_public_ip_address = True,  # this is required for your shit to actually work
        )
        if not conn.create_launch_configuration(lc):
            print "Error creating LaunchConfig {}".format(name)
            return False

        util.message_integrations("Activated LaunchConfig {}".format(name))

        return lc
예제 #7
0
파일: asgroup.py 프로젝트: abunuwas/udo
    def scale(self, desired):
        debug("In asgroup.y scale")
        asgroup = self.get_asgroup()
        client = util.as_conn()
        asg_name = self.name()

        if desired < asgroup['MinSize']:
            print "Cannot scale: {} is lower than MinSize ({})".format(
                desired, asgroup['MinSize'])
            return
        if desired > asgroup['MaxSize']:
            print "Cannot scale: {} is greater than MaxSize ({})".format(
                desired, asgroup['MaxSize'])
            if not util.confirm("Increase MaxSize to {}?".format(desired)):
                return
            asgroup['MaxSize'] = desired
            client.update_auto_scaling_group(AutoScalingGroupName=asg_name,
                                             MaxSize=desired)
        current = asgroup['DesiredCapacity']

        # Set DesiredCapacity
        response = client.set_desired_capacity(AutoScalingGroupName=asg_name,
                                               DesiredCapacity=desired)

        # Check if DesiredCapacity was changed
        debug("in asgroup.py scale: running 'asgroup = self.get_asgroup()'")
        asgroup = self.get_asgroup()
        new = asgroup['DesiredCapacity']
        if (new != current):
            msg = "Changed ASgroup {} desired_capacity from {} to {}".format(
                asg_name, current, new)
            util.message_integrations(msg)
예제 #8
0
    def activate(self):
        conn = util.as_conn()
        conn = boto.ec2.autoscale.connect_to_region('us-west-2')

        name = self.name()

        # check if this LC already exists
        if self.exists():
            if not util.confirm("LaunchConfig {} already exists, overwrite?".format(name)):
                return True
            # delete existing
            conn.delete_launch_configuration(name)

        # get configuration for this LC
        cfg = self.role_config
        lc = LaunchConfiguration(
            name = name,
            image_id = cfg.get('ami'),
            instance_profile_name = cfg.get('iam_profile'),
            instance_type = cfg.get('instance_type'),
            security_groups = cfg.get('security_groups'),
            key_name = cfg.get('keypair_name'),
            user_data = self.cloud_init_script(),
            associate_public_ip_address = True,  # this is required for your shit to actually work
        )
        if not conn.create_launch_configuration(lc):
            print "Error creating LaunchConfig {}".format(name)
            return False

        util.message_integrations("Activated LaunchConfig {}".format(name))

        return lc
예제 #9
0
파일: asgroup.py 프로젝트: nivertech/udo
 def reload(self):
     if not util.confirm(
             "Are you sure you want to tear down the {} ASgroup and recreate it?"
             .format(self.name())):
         return
     util.message_integrations("Reloading ASgroup {}".format(self.name()))
     self.deactivate()
     util.retry(lambda: self.activate(), 60)
예제 #10
0
파일: launchconfig.py 프로젝트: anbet/udo
 def deactivate(self):
     if not self.exists():
         return
     print "Deleting launchconfig..."
     lc = self.get_lc()
     if util.retry(lambda: lc.delete(), 500):
         util.message_integrations("Deleted LaunchConfig {}".format(self.name()))
     else:
         util.message_integrations("Failed to delete LaunchConfig {}".format(self.name()))
예제 #11
0
 def deactivate(self):
     if not self.exists():
         return
     print "Deleting launchconfig..."
     lc = self.get_lc()
     if util.retry(lambda: lc.delete(), 500):
         util.message_integrations("Deleted LaunchConfig {}".format(self.name()))
     else:
         util.message_integrations("Failed to delete LaunchConfig {}".format(self.name()))
예제 #12
0
파일: main.py 프로젝트: anbet/udo
    def test(self, *args):
        args = list(args)
        if not len(args) or not args[0]:
            print "test command requires an action. Valid actions are: "
            print " integrations"
            return
        action = args.pop(0)

        if action == 'integrations':
            util.message_integrations("Testing Udo integrations")
        else:
            print "Unknown test command: {}".format(action)
예제 #13
0
파일: main.py 프로젝트: joegross/udo
    def test(self, *args):
        args = list(args)
        if not len(args) or not args[0]:
            print "test command requires an action. Valid actions are: "
            print " integrations"
            return
        action = args.pop(0)

        if action == 'integrations':
            util.message_integrations("Testing Udo integrations")
        else:
            print "Unknown test command: {}".format(action)
예제 #14
0
    def create(self, commit_id):
        cfg = self.get_deploy_config()
        if not cfg:
            print "deploy configuration not found"
            return
        if not 'group' in cfg:
            print "deployment group not specified in deployment configuration"
            print "deployment configuration:"
            print cfg
            return
        if not 'application' in cfg:
            print "deployment application not specified in deployment configuration"
            self.list_applications()
            return

        # get source info
        # assume github for now
        repo_name = None
        source = None
        rev_type = 'github'
        if not 'github' in cfg:
            print "github info not specified in deployment configuration"
            return
        source = cfg['github']
        if not 'repo' in source:
            print "deployment github repository not specified in deployment configuration"
            return
        repo_name = source['repo']

        group_name = cfg['group']
        application_name = cfg['application']
        deploy_rev = {
            'revisionType': 'GitHub',
            'gitHubLocation': {
                'repository': repo_name,
                'commitId': commit_id,
            }
        }
        msg = "Deploying commit {} to deployment group: {}".format(
            self.commit_id_display(commit_id), group_name)
        deployment = self.conn.create_deployment(
            application_name,
            deployment_group_name=group_name,
            revision=deploy_rev,
            ignore_application_stop_failures=False,
        )
        if not deployment:
            # prob won't reach here, will throw error instead
            print "Deployment failed"
            return
        deployment_id = deployment['deploymentId']
        util.message_integrations(msg)
        self.list_deployments(deployment_id)
예제 #15
0
파일: asgroup.py 프로젝트: nivertech/udo
    def activate(self):
        conn = util.as_conn()
        cfg = self.role_config
        name = self.name()

        # ensure this LC already exists
        if not self.activate_lc():
            return False

        # look up subnet id
        subnets = [
            self.find_vpc_subnet_by_cidr(cidr)
            for cidr in cfg.get('subnets_cidr')
        ]
        if not subnets or not len(subnets) or None in subnets:
            print "Valid subnets_cidr are required for {}/{}".format(
                self.cluster_name, self.role_name)
            return False
        print "Using subnets {}".format(", ".join([s.id for s in subnets]))
        print "AZs: {}".format(cfg.get('availability_zones'))

        # does the ASgroup already exist?
        ag = AutoScalingGroup(
            group_name=self.name(),
            load_balancers=cfg.get('elbs'),
            availability_zones=cfg.get('availability_zones'),
            vpc_zone_identifier=[s.id for s in subnets],
            launch_config=self.lc().name(),
            desired_capacity=cfg.get('scale_policy', 'desired'),
            min_size=cfg.get('scale_policy', 'min_size'),
            max_size=cfg.get('scale_policy', 'max_size'),
        )

        if not conn.create_auto_scaling_group(ag):
            print "Failed to create autoscale group"
            return False

        # prepare instance tags
        tags = cfg.get('tags')
        if not tags:
            tags = {}
        tags['cluster'] = self.cluster_name
        tags['role'] = self.role_name
        tags['Name'] = self.name()

        # apply tags
        tag_set = [self.ag_tag(ag, k, v) for (k, v) in tags.iteritems()]
        conn.create_or_update_tags(tag_set)

        util.message_integrations("Activated ASgroup {}".format(name))

        return ag
예제 #16
0
    def reload(self):
        # skip deactivation if it doesn't exist
        asgroup = self.get_asgroup()
        if not asgroup or not self.exists():
            self.activate()
            return

        debug("In asgroup.py reload")
        if not util.confirm("Are you sure you want to tear down the {} ASgroup and recreate it?".format(self.name())):
            return
        util.message_integrations("Reloading ASgroup {}".format(self.name()))
        self.deactivate()
        util.retry(lambda: self.activate(), 60)
예제 #17
0
 def deactivate(self):
     debug("in launchconfig.py deactivate")
     if not self.exists():
         return
     print "Deleting launchconfig..."
     client = util.as_conn()
     response = client.delete_launch_configuration( LaunchConfigurationName = self.name() )
     sleep(5) # give aws a chance to delete the launchconfig
     try:
         response = client.describe_launch_configurations( LaunchConfigurationName = self.name() )
         util.message_integrations("Failed to delete LaunchConfig {}".format(self.name()))
     except:
         util.message_integrations("Deleted LaunchConfig {}".format(self.name()))
예제 #18
0
파일: asgroup.py 프로젝트: abunuwas/udo
    def reload(self):
        # skip deactivation if it doesn't exist
        asgroup = self.get_asgroup()
        if not asgroup or not self.exists():
            self.activate()
            return

        debug("In asgroup.py reload")
        if not util.confirm(
                "Are you sure you want to tear down the {} ASgroup and recreate it?"
                .format(self.name())):
            return
        util.message_integrations("Reloading ASgroup {}".format(self.name()))
        self.deactivate()
        util.retry(lambda: self.activate(), 60)
예제 #19
0
    def deactivate(self): # a.k.a asg destroy
        # NOTE
        # deleting asg logic should be in its own function

        # * delete ASG by reducing capacities of asg to 0
        # * delete launchconfig
        #
        # reducing ASG capacities to 0 triggers eventual instance
        # termination
        debug("In asgroup.py deactivate")        

        asg_name = self.name()
        ag = util.as_conn()
        ec2 = util.ec2_conn()
    
        asg_info = ag.describe_auto_scaling_groups( AutoScalingGroupNames = [ asg_name ] )

        if not asg_info['AutoScalingGroups']:
            print("ASG does not exist.  Maybe it was already deleted?")
        else:
            # delete the ASG
            num_instances = len(asg_info['AutoScalingGroups'][0]['Instances'])
            if self.get_num_instances() == 0:
                pprint("There are no instances in asg: " + asg_name)
                print("Deleting asg: " + asg_name)
                response = ag.delete_auto_scaling_group( AutoScalingGroupName=asg_name )
                util.message_integrations("Deleted ASgroup {}".format(asg_name))
            else:
                debug("There are " + str(num_instances) + " instances that need to be removed from asg: " + asg_name)
                debug("terminating instances in asg: " + asg_name)
                debug("by setting to 0 MinSize, MaxSize, DesiredCapacity")
                response = ag.update_auto_scaling_group(AutoScalingGroupName = asg_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
                debug("Waiting 30 seconds to give AWS time to terminate the instances")

                if self.get_num_instances() != 0:
                    util.retry(lambda: ag.delete_auto_scaling_group(AutoScalingGroupName=asg_name), 300)
                if self.get_num_instances() != 0 or self.get_num_instances():
                    print("unable to delete instances in asg.")
                    return False
                util.message_integrations("Deleted ASgroup {}".format(asg_name))

        # if launch config exists, delete it 
        lc = self.lc()
        if not lc.exists():
            print("launchconfig does not exist.  Maybe you deleted it already?")
        else:
            lc.deactivate()
        return True
예제 #20
0
파일: asgroup.py 프로젝트: Bauerpauer/udo
    def activate(self):
        conn = util.as_conn()
        cfg = self.role_config
        name = self.name()

        # ensure this LC already exists
        if not self.activate_lc():
            return False

        # look up subnet id
        subnets = [self.find_vpc_subnet_by_cidr(cidr) for cidr in cfg.get('subnets_cidr')]
        if not subnets or not len(subnets) or None in subnets:
            print "Valid subnets_cidr are required for {}/{}".format(self.cluster_name, self.role_name)
            return False
        print "Using subnets {}".format(", ".join([s.id for s in subnets]))
        print "AZs: {}".format(cfg.get('availability_zones'))

        # does the ASgroup already exist?
        ag = AutoScalingGroup(
            group_name=self.name(),
            load_balancers=cfg.get('elbs'),
            availability_zones=cfg.get('availability_zones'),
            vpc_zone_identifier=[s.id for s in subnets],
            launch_config=self.lc().name(),
            desired_capacity=cfg.get('scale_policy', 'desired'),
            min_size=cfg.get('scale_policy', 'min_size'),
            max_size=cfg.get('scale_policy', 'max_size'),
        )

        if not conn.create_auto_scaling_group(ag):
            print "Failed to create autoscale group"
            return False

        # prepare instance tags
        tags = cfg.get('tags')
        if not tags:
            tags = {}
        tags['cluster'] = self.cluster_name
        tags['role'] = self.role_name
        tags['Name'] = self.name()

        # apply tags        
        tag_set = [self.ag_tag(ag, k,v) for (k,v) in tags.iteritems()]
        conn.create_or_update_tags(tag_set)

        util.message_integrations("Activated ASgroup {}".format(name))

        return ag
예제 #21
0
파일: asgroup.py 프로젝트: Bauerpauer/udo
 def deactivate(self):
     if not self.exists():
         return
     ag = self.get_asgroup()
     ag.min_size = 0
     ag.max_size = 0
     ag.desired_capacity = 0
     ag.update()
     ag.shutdown_instances()
     print "Deleting... this may take a few minutes..."
     if util.retry(lambda: ag.delete(), 500):
         util.message_integrations("Deleted ASgroup {}".format(self.name()))
         # delete launchconfig too
         lc = self.lc()
         lc.deactivate()
     else:
         util.message_integrations("Failed to delete ASgroup {}".format(self.name()))
예제 #22
0
파일: asgroup.py 프로젝트: nivertech/udo
 def deactivate(self):
     if not self.exists():
         return
     ag = self.get_asgroup()
     ag.min_size = 0
     ag.max_size = 0
     ag.desired_capacity = 0
     ag.update()
     ag.shutdown_instances()
     print "Deleting... this may take a few minutes..."
     if util.retry(lambda: ag.delete(), 500):
         util.message_integrations("Deleted ASgroup {}".format(self.name()))
         # delete launchconfig too
         lc = self.lc()
         lc.deactivate()
     else:
         util.message_integrations("Failed to delete ASgroup {}".format(
             self.name()))
예제 #23
0
 def resume(self):
     debug("In asgroup.py resume")
     name = self.name()
     asg_policies = self.conn.describe_policies( AutoScalingGroupName = name )['ScalingPolicies']
     if not asg_policies:
         print("ASG %s has no autoscaling processes to resume" % name)
         return
     if self.suspend_status():
         self.conn.resume_processes( AutoScalingGroupName = name)
     else:
         print("ASG %s has no suspended processes to resume" % name)
         return
     if not self.suspend_status():
         util.message_integrations("Resumed autoscaling processes for {}".format(name))
         return
     else:
         util.message_integrations("Failed to resume autoscaling processes for {}".format(name))
         return
예제 #24
0
파일: deploy.py 프로젝트: Bauerpauer/udo
    def create(self, group_name, commit_id):
        cfg = self.config()

        if not 'application' in cfg:
            print "deployment application not specified in deployment configuration"
            self.list_applications()
            return

        # get source info
        # assume github for now
        repo_name = None
        source = None
        rev_type = 'github'
        if not 'github' in cfg:
            print "github info not specified in deployment configuration"
            return
        source = cfg['github']
        if not 'repo' in source:
            print "deployment github repository not specified in deployment configuration"
            return
        repo_name = source['repo']

        application_name = cfg['application']
        deploy_rev = {
            'revisionType': 'GitHub',
            'gitHubLocation': {
                'repository': repo_name,
                'commitId': commit_id,
            }
        }
        msg = "Deploying commit {} to deployment group: {}".format(self.commit_id_display(commit_id), group_name)
        deployment = self.conn.create_deployment(application_name,
            deployment_group_name=group_name,
            revision=deploy_rev,
            ignore_application_stop_failures=False,
        )
        if not deployment:
            # prob won't reach here, will throw error instead
            print "Deployment failed"
            return
        deployment_id = deployment['deploymentId']
        util.message_integrations(msg)
        self.list_deployments(deployment_id)
예제 #25
0
 def suspend(self):
     debug("In asgroup.py suspend")
     name = self.name()
     asg_policies = self.conn.describe_policies( AutoScalingGroupName = name )['ScalingPolicies']
     if not asg_policies: # if we cant find the status of any ScalingPolicies, there are no policies
         print("ASG %s has no autoscaling processes to suspend" % name)
         return
     if self.suspend_status():
         print("ASG %s is already suspended" % name)
         return
     else:
         group = []
         group.append(name) # I add a ASG name to a list because suspend_processes expects a list
         self.conn.suspend_processes( AutoScalingGroupName = name)
         if self.suspend_status():
             util.message_integrations("Suspended all autoscaling processes for {}".format(name))
             return
         else:
             util.message_integrations("Failed to suspend autoscaling processes for {}".format(name))
             return
예제 #26
0
파일: asgroup.py 프로젝트: Bauerpauer/udo
    def scale(self, desired):
        asgroup = self.get_asgroup()

        if desired < asgroup.min_size:
            print "Cannot scale: {} is lower than min_size ({})".format(desired, asgroup.min_size)
            return
        if desired > asgroup.max_size:
            print "Cannot scale: {} is greater than max_size ({})".format(desired, asgroup.max_size)
            if not util.confirm("Increase max_size to {}?".format(desired)):
                return
            asgroup.max_size = desired

        current = asgroup.desired_capacity
        asgroup.desired_capacity = desired
        asgroup.update()
        asgroup = self.get_asgroup()
        new = asgroup.desired_capacity
        if (new != current):
            msg = "Changed ASgroup {} desired_capacity from {} to {}".format(self.name(), current, new)
            util.message_integrations(msg)
예제 #27
0
파일: asgroup.py 프로젝트: abunuwas/udo
 def resume(self):
     debug("In asgroup.py resume")
     name = self.name()
     asg_policies = self.conn.describe_policies(
         AutoScalingGroupName=name)['ScalingPolicies']
     if not asg_policies:
         print("ASG %s has no autoscaling processes to resume" % name)
         return
     if self.suspend_status():
         self.conn.resume_processes(AutoScalingGroupName=name)
     else:
         print("ASG %s has no suspended processes to resume" % name)
         return
     if not self.suspend_status():
         util.message_integrations(
             "Resumed autoscaling processes for {}".format(name))
         return
     else:
         util.message_integrations(
             "Failed to resume autoscaling processes for {}".format(name))
         return
예제 #28
0
파일: cluster.py 프로젝트: joegross/udo
    def activate(self):
        debug("In cluster.py activate")
        conn = vpc_conn()

        cfg = _cfg.get('clusters', self.name)
        if not cfg:
            print "No configuration found for {}".format(self.name)
            return False

        vpc = get_vpc_by_name(self.name)
        if vpc:
            print "Cluster {} already exists".format(self.name)
            return

        # create VPC
        #
        # boto3 docs example for creating a vpc:
        # ec2 = boto3.resource('ec2')
        # vpc = ec2.create_vpc(CidrBlock='10.0.0.0/24')
        # subnet = vpc.create_subnet(CidrBlock='10.0.0.0/25')
        # gateway = ec2.create_internet_gateway()
 
        subnet_cidr = cfg.get('subnet_cidr')
        if not subnet_cidr:
            print "No subnet definition found for {}".format(self.name)
            return False
        vpc = conn.create_vpc(subnet_cidr)
        vpc.add_tag('Name', value=self.name)
        # for now assume that our subnet is the same CIDR as the VPC
        # this is simpler but less fancy
        subnet = conn.create_subnet(vpc.id, subnet_cidr)
        # all done
        util.message_integrations("Created VPC {}".format(self.name))

        # mark that this cluster is being managed by udo
        vpc.add_tag('udo', value=True)

        return True
예제 #29
0
파일: asgroup.py 프로젝트: nivertech/udo
    def scale(self, desired):
        asgroup = self.get_asgroup()

        if desired < asgroup.min_size:
            print "Cannot scale: {} is lower than min_size ({})".format(
                desired, asgroup.min_size)
            return
        if desired > asgroup.max_size:
            print "Cannot scale: {} is greater than max_size ({})".format(
                desired, asgroup.max_size)
            if not util.confirm("Increase max_size to {}?".format(desired)):
                return
            asgroup.max_size = desired

        current = asgroup.desired_capacity
        asgroup.desired_capacity = desired
        asgroup.update()
        asgroup = self.get_asgroup()
        new = asgroup.desired_capacity
        if (new != current):
            msg = "Changed ASgroup {} desired_capacity from {} to {}".format(
                self.name(), current, new)
            util.message_integrations(msg)
예제 #30
0
    def create(self, group_name, commit_id):
        debug("in deploy.py create")
        cfg = self.config()

        if not 'application' in cfg:
            print "deployment application not specified in deployment configuration"
            self.list_applications()
            return

        # get source info
        # assume github for now
        repo_name = None
        source = None
        rev_type = 'github'
        if not 'github' in cfg:
            print "github info not specified in deployment configuration"
            return
        source = cfg['github']
        if not 'repo' in source:
            print "deployment github repository not specified in deployment configuration"
            return
        repo_name = source['repo']

        application_name = cfg['application']

        msg = "Deploying commit {} to deployment group: {}".format(self.commit_id_display(commit_id), group_name)

        deployment_asg_info = self.conn.get_deployment_group(applicationName=application_name,
                    deploymentGroupName=group_name)['deploymentGroupInfo']['autoScalingGroups']

        # NOTE: There is probably a better way of getting role_name and cluster_name
        # This depends on role_name and cluster_name being seperated with a "."
        # This works but I don't like it
        def asg_autoscaling_control(action):
            # This will suspend or resume every the autoscaling processes in every ASG that is
            # part of the CodeDeploy group
            for asg_info in deployment_asg_info:
                _asg = (asg_info['name'])
                cluster_name = re.search(r'^(.*?)-', _asg).group(1)
                role_name = _asg.split(cluster_name + '-')[1]
                asg = asgroup.AutoscaleGroup(cluster_name, role_name)
                if action == 'suspend':
                    asg.suspend()
                elif action == 'resume':
                    asg.resume()

        if _suspend_on_deploy:
            asg_autoscaling_control('suspend')

        deployment = self.conn.create_deployment(applicationName=application_name,
            deploymentGroupName=group_name,
            revision={ 'revisionType': 'GitHub',
                       'gitHubLocation': {
                           'repository': repo_name,
                           'commitId' : commit_id
                           }
                   },
            # deploymentConfigName = string,
            ignoreApplicationStopFailures = False,
            )
        if not deployment:
            # prob won't reach here, will throw error instead
            print "Deployment failed"
            return
        deployment_id = deployment['deploymentId']
        debug("in deploy.py create: deploymentId: " + deployment_id)
        
        util.message_integrations(':ship:' + msg)

        debug("in deploy.py create: deploymentId: " + deployment_id)

        pprint("Waiting for deployment...")
        try:
            sleep(5)
        except KeyboardInterrupt:
            return

        interval = 5
        tries = 60
        # deployment_status[status] will be:
        # 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped',
        #
        # what you see in AWS dashboard is usually what the current state actually is.
        # info you grab from the API appears to be slightly delayed
        #
        for x in range(0, tries):
            try:
            # should get status here, then check for it, instead of in the loop
                status = self.deployment_status(deployment_id)['status']
                if status == 'Succeeded':
                    _msg = 'Deployment of commit ' + commit_id + ' to deployment group: ' + group_name + ' successful.'
                    util.message_integrations(_msg)
                    if _suspend_on_deploy:
                        asg_autoscaling_control('resume')
                    # define actions in post_deploy_hooks in udo.yml
                    post_deploy_hooks = self.get_post_deploy_hooks(application_name, group_name)
                    if post_deploy_hooks:
                        for post_deploy_hook in post_deploy_hooks:
                            print("running: " + post_deploy_hook)
                            try:
                                command = subprocess.Popen(post_deploy_hook.split())
                            except OSError as e:
                                print e
                                pass
                            except ValueError as e:
                                print e    
                                pass
                            except:
                                pass
                    break
                elif status == 'Failed':
                    if _suspend_on_deploy:
                        asg_autoscaling_control('resume')
                    _msg = "FAILURE to deploy commit ' + commid_id + ' to deployment group: ' + group_name"
                    break
                elif status == 'Created':
                    raise ValueError("deployment has been created... nothing has happened yet")
                elif status == 'Queued':
                    raise ValueError("deployment is Queued")
                elif status == 'InProgress':
                    print("."),
                elif status == 'Stopped':
                    _msg = 'deployment to deployment group' + group_name + ' is stopped'
                    util.message_integrations(_msg)
                else:
                    pprint("An unknown condition has occured")
                    pprint("status: " + str(status))
                    sys.exit(1)
            except KeyboardInterrupt:
                break
            except ValueError as e:
                pprint(e)
                pass

            try:
                sleep(interval)
            except KeyboardInterrupt:
                break
예제 #31
0
파일: asgroup.py 프로젝트: Bauerpauer/udo
 def reload(self):
     if not util.confirm("Are you sure you want to tear down the {} ASgroup and recreate it?".format(self.name())):
         return
     util.message_integrations("Reloading ASgroup {}".format(self.name()))
     self.deactivate()
     util.retry(lambda: self.activate(), 60)
예제 #32
0
파일: asgroup.py 프로젝트: abunuwas/udo
    def activate(self):
        debug("In asgroup.py activate")
        conn = util.as_conn()
        cfg = self.role_config
        name = self.name()
        if not self.activate_lc():  # ensure this LaunchConfig already exists
            return False

        subnet_cidrs = cfg.get('subnets_cidr')
        if not subnet_cidrs or not len(subnet_cidrs) or None in subnet_cidrs:
            print "Valid subnets_cidr are required for {}/{}".format(
                self.cluster_name, self.role_name)
            return False
        print("Using subnets " + str(subnet_cidrs))

        subnet_ids = self.get_subnet_ids_by_cidrs(subnet_cidrs)
        azs = cfg.get('availability_zones')
        cfg_args = {}

        # If AvailabilityZones is defined, add it to the args we will pass to conn.create_auto_scaling_group()
        if azs:
            cfg_args['AvailabilityZones'] = azs
            print "AZs: {}".format(azs)
        else:
            pprint("No availability_zones set")

        # VPCZoneIdentifier ( which can be plural ) takes a string
        subnet_ids_string = ''
        _length = len(subnet_ids)
        for subnet_id in subnet_ids:
            subnet_ids_string = subnet_ids_string + subnet_id
            if _length > 1:
                subnet_ids_string = subnet_ids_string + ', '
            _length = _length - 1
        pprint("Using subnet ids: " + str(subnet_ids_string))

        cfg_args['AutoScalingGroupName'] = self.name()
        cfg_args['DesiredCapacity'] = cfg.get('scale_policy')['desired']
        cfg_args['LoadBalancerNames'] = cfg.get('elbs')
        cfg_args['LaunchConfigurationName'] = self.lc().get_lc_server_name()
        cfg_args['MaxSize'] = cfg.get('scale_policy', 'max_size')
        cfg_args['MinSize'] = cfg.get('scale_policy', 'min_size')
        cfg_args['VPCZoneIdentifier'] = subnet_ids_string

        if not cfg_args['LoadBalancerNames']:
            cfg_args['LoadBalancerNames'] = []

        response = conn.create_auto_scaling_group(**cfg_args)
        # NOTE: should check if asg was created

        debug('Preparing tags that will be applied to the asg')
        tags = cfg.get('tags')
        if not tags:
            tags = {}
        tags['cluster'] = self.cluster_name
        tags['role'] = self.role_name
        tags['Name'] = self.name()

        # apply tags
        tag_set = [self.ag_tag(name, k, v) for (k, v) in tags.iteritems()]
        debug("Applying tags to asg")
        conn.create_or_update_tags(Tags=tag_set)

        util.message_integrations("Activated ASgroup {}".format(name))
        # NOTE: what should we be returning here?  Not sure.
        #return ag
        return name
예제 #33
0
파일: asgroup.py 프로젝트: abunuwas/udo
    def deactivate(self):  # a.k.a asg destroy
        # NOTE
        # deleting asg logic should be in its own function

        # * delete ASG by reducing capacities of asg to 0
        # * delete launchconfig
        #
        # reducing ASG capacities to 0 triggers eventual instance
        # termination
        debug("In asgroup.py deactivate")

        asg_name = self.name()
        ag = util.as_conn()
        ec2 = util.ec2_conn()

        asg_info = ag.describe_auto_scaling_groups(
            AutoScalingGroupNames=[asg_name])

        if not asg_info['AutoScalingGroups']:
            print("ASG does not exist.  Maybe it was already deleted?")
        else:
            # delete the ASG
            num_instances = len(asg_info['AutoScalingGroups'][0]['Instances'])
            if self.get_num_instances() == 0:
                pprint("There are no instances in asg: " + asg_name)
                print("Deleting asg: " + asg_name)
                response = ag.delete_auto_scaling_group(
                    AutoScalingGroupName=asg_name)
                util.message_integrations(
                    "Deleted ASgroup {}".format(asg_name))
            else:
                debug("There are " + str(num_instances) +
                      " instances that need to be removed from asg: " +
                      asg_name)
                debug("terminating instances in asg: " + asg_name)
                debug("by setting to 0 MinSize, MaxSize, DesiredCapacity")
                response = ag.update_auto_scaling_group(
                    AutoScalingGroupName=asg_name,
                    MinSize=0,
                    MaxSize=0,
                    DesiredCapacity=0)
                debug(
                    "Waiting 30 seconds to give AWS time to terminate the instances"
                )

                if self.get_num_instances() != 0:
                    util.retry(
                        lambda: ag.delete_auto_scaling_group(
                            AutoScalingGroupName=asg_name), 300)
                if self.get_num_instances() != 0 or self.get_num_instances():
                    print("unable to delete instances in asg.")
                    return False
                util.message_integrations(
                    "Deleted ASgroup {}".format(asg_name))

        # if launch config exists, delete it
        lc = self.lc()
        if not lc.exists():
            print(
                "launchconfig does not exist.  Maybe you deleted it already?")
        else:
            lc.deactivate()
        return True
예제 #34
0
    def create(self, group_name, commit_id):
        debug("in deploy.py create")
        cfg = self.config()

        if not 'application' in cfg:
            print "deployment application not specified in deployment configuration"
            self.list_applications()
            return

        # get source info
        # assume github for now
        repo_name = None
        source = None
        rev_type = 'github'
        if not 'github' in cfg:
            print "github info not specified in deployment configuration"
            return
        source = cfg['github']
        if not 'repo' in source:
            print "deployment github repository not specified in deployment configuration"
            return
        repo_name = source['repo']
        application_name = cfg['application']
        deployment_asg_info = self.conn.get_deployment_group(applicationName=application_name,
                    deploymentGroupName=group_name)['deploymentGroupInfo']['autoScalingGroups']

        # NOTE: There is probably a better way of getting role_name and cluster_name
        # This depends on role_name and cluster_name being seperated with a "."
        # This works but I don't like it
        def asg_autoscaling_control(action):
            # This will suspend or resume every the autoscaling processes in every ASG that is
            # part of the CodeDeploy group
            for asg_info in deployment_asg_info:
                _asg = (asg_info['name'])
                cluster_name = re.search(r'^(.*?)-', _asg).group(1)
                role_name = _asg.split(cluster_name + '-')[1]
                asg = asgroup.AutoscaleGroup(cluster_name, role_name)
                if action == 'suspend':
                    asg.suspend()
                elif action == 'resume':
                    asg.resume()

        if _suspend_on_deploy:
            asg_autoscaling_control('suspend')

        deployment = self.conn.create_deployment(applicationName=application_name,
            deploymentGroupName=group_name,
            revision={ 'revisionType': 'GitHub',
                       'gitHubLocation': {
                           'repository': repo_name,
                           'commitId' : commit_id
                           }
                   },
            # deploymentConfigName = string,
            ignoreApplicationStopFailures = False,
            )
        if not deployment:
            # prob won't reach here, will throw error instead
            print "Deployment failed"
            return

        msg = "Deploying commit {} to deployment group: {}".format(self.commit_id_display(commit_id), group_name)
        util.message_integrations(msg, icon=':ship:')

        # now we wait...
        # CodeDeploy waiters courtesy of https://github.com/boto/boto3/issues/708
        waiter = self.conn.get_waiter('deployment_successful')
        deployment_id = deployment['deploymentId']
        print("Waiting for deployment completion...")
        try:
            deploy_err = waiter.wait(deploymentId=deployment_id)
            if deploy_err:
                print("Deploy failed:", deploy_err)
                return
        except botocore.exceptions.WaiterError as we:
            print("Failure:", we)
            return
        status = self.deployment_status(deployment_id)['status']
        print("Deploy status: {}".format(status))
        if status == 'Succeeded':
            _msg = 'Deployment of commit ' + commit_id + ' to deployment group: ' + group_name + ' successful.'
            util.message_integrations(_msg, icon=':ship:')
            if _suspend_on_deploy:
                asg_autoscaling_control('resume')
            # define actions in post_deploy_hooks in udo.yml
            post_deploy_hooks = self.get_post_deploy_hooks(application_name, group_name)
            if post_deploy_hooks:
                for post_deploy_hook in post_deploy_hooks:
                    print("Running post_deploy_hook: " + post_deploy_hook)
                    try:
                        command = subprocess.Popen(post_deploy_hook.split())
                    except Exception as e:
                        print e
                        pass
        elif status == 'Failed':
            if _suspend_on_deploy:
                asg_autoscaling_control('resume')
            _msg = "FAILURE to deploy commit ' + commid_id + ' to deployment group: ' + group_name"
        elif status == 'Created':
            raise ValueError("deployment has been created... nothing has happened yet")
        elif status == 'Queued':
            raise ValueError("deployment is Queued")
        elif status == 'InProgress':
            print("."),
            sys.stdout.flush()
        elif status == 'Stopped':
            _msg = 'deployment to deployment group' + group_name + ' is stopped'
            util.message_integrations(_msg, icon=':ship:')
        else:
            pprint("An unknown condition has occured")
            pprint("status: " + str(status))
            sys.exit(1)
예제 #35
0
파일: deploy.py 프로젝트: joegross/udo
    def create(self, group_name, commit_id):
    # NOTE:
    #
    # we could put something at end that alerts if deployment was successful or not
        debug("in deploy.py create")
        cfg = self.config()

        if not 'application' in cfg:
            print "deployment application not specified in deployment configuration"
            self.list_applications()
            return

        # get source info
        # assume github for now
        repo_name = None
        source = None
        rev_type = 'github'
        if not 'github' in cfg:
            print "github info not specified in deployment configuration"
            return
        source = cfg['github']
        if not 'repo' in source:
            print "deployment github repository not specified in deployment configuration"
            return
        repo_name = source['repo']

        application_name = cfg['application']
        #deploy_rev = {
        #    'revisionType': 'GitHub',
        #    'gitHubLocation': {
        #        'repository': repo_name,
        #        'commitId': commit_id,
        #    }
        #}
        msg = "Deploying commit {} to deployment group: {}".format(self.commit_id_display(commit_id), group_name)


        deployment = self.conn.create_deployment(applicationName=application_name,
            deploymentGroupName=group_name,
            revision={ 'revisionType': 'GitHub',
                       'gitHubLocation': {
                           'repository': repo_name,
                           'commitId' : commit_id
                           }
                   },
            # deploymentConfigName = string,
            ignoreApplicationStopFailures = False,
            )
        if not deployment:
            # prob won't reach here, will throw error instead
            print "Deployment failed"
            return
        deployment_id = deployment['deploymentId']
        debug("in deploy.py create: deploymentId: " + deployment_id)
        
        util.message_integrations(':ship:' + msg)

        debug("in deploy.py create: deploymentId: " + deployment_id)

        pprint("Waiting 20 seconds to give AWS a chance to complete deployment")
        try:
            sleep(20)
        except KeyboardInterrupt:
            pprint("Got impatient")

        interval = 20
        tries = 20
        # deployment_status[status] will be:
        # 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped',
        #
        # what you see in AWS dashboard appears to be current
        # info you grab from the API is slightly delayed
        #
        for x in range(0, tries):
            try:
            # should get status here, then check for it, instead of in the loop
                status = self.deployment_status(deployment_id)['status']
                if status == 'Succeeded':
                    _msg = 'Deployment of commit ' + commit_id + ' to deployment group: ' + group_name + ' successful.'
                    util.message_integrations(_msg)
                    # NOTE: this is where we would run a jenkins batch job
                    post_deploy_hooks = self.get_post_deploy_hooks(application_name, group_name)
                    if not post_deploy_hooks:
                        pprint("No post deploy hooks defined")
                    else:
                        for post_deploy_hook in post_deploy_hooks:
                            print("running: " + post_deploy_hook)
                            try:
                                command = subprocess.Popen(post_deploy_hook.split())
                            except OSError as e:
                                print e
                                pass
                            except ValueError as e:
                                print e    
                                pass
                            except:
                                pass
                    break
                elif status == 'Failed':
                    _msg = "FAILURE to deploy commit ' + commid_id + ' to deployment group: ' + group_name"
                    break
                elif status == 'Created':
                    raise ValueError("deployment has been created... nothing has happened yet")
                elif status == 'Queued':
                    raise ValueError("deployment is Queued")
                elif status == 'InProgress':
                    raise ValueError("deployment is InProgress")
                elif status == 'Stopped':
                    _msg = 'deployment to deployment group' + group_name + ' is stopped'
                    util.message_integrations(_msg)
                else:
                    pprint("An unknown condition has occured")
                    pprint("status: " + str(status))
                    sys.exit(1)
            except KeyboardInterrupt:
                pprint("Got impatient")
            except ValueError as e:
                pprint(e)
                pass

            try:
                if e:
                    pprint("pausing " + str(interval) + " seconds")
                    sleep(interval)
                else:
                    break
            except KeyboardInterrupt:
                pprint("Got impatient")
예제 #36
0
    def create(self, group_name, commit_id):
        debug("in deploy.py create")
        cfg = self.config()

        if not 'application' in cfg:
            print "deployment application not specified in deployment configuration"
            self.list_applications()
            return

        # get source info
        # assume github for now
        repo_name = None
        source = None
        rev_type = 'github'
        if not 'github' in cfg:
            print "github info not specified in deployment configuration"
            return
        source = cfg['github']
        if not 'repo' in source:
            print "deployment github repository not specified in deployment configuration"
            return
        repo_name = source['repo']

        application_name = cfg['application']

        msg = "Deploying commit {} to deployment group: {}".format(self.commit_id_display(commit_id), group_name)

        deployment_asg_info = self.conn.get_deployment_group(applicationName=application_name,
                    deploymentGroupName=group_name)['deploymentGroupInfo']['autoScalingGroups']

        # NOTE: There is probably a better way of getting role_name and cluster_name
        # This depends on role_name and cluster_name being seperated with a "."
        # This works but I don't like it
        def asg_autoscaling_control(action):
            # This will suspend or resume every the autoscaling processes in every ASG that is
            # part of the CodeDeploy group
            for asg_info in deployment_asg_info:
                _asg = (asg_info['name'])
                cluster_name = re.search(r'^(.*?)-', _asg).group(1)
                role_name = _asg.split(cluster_name + '-')[1]
                asg = asgroup.AutoscaleGroup(cluster_name, role_name)
                if action == 'suspend':
                    asg.suspend()
                elif action == 'resume':
                    asg.resume()

        if _suspend_on_deploy:
            asg_autoscaling_control('suspend')

        deployment = self.conn.create_deployment(applicationName=application_name,
            deploymentGroupName=group_name,
            revision={ 'revisionType': 'GitHub',
                       'gitHubLocation': {
                           'repository': repo_name,
                           'commitId' : commit_id
                           }
                   },
            # deploymentConfigName = string,
            ignoreApplicationStopFailures = False,
            )
        if not deployment:
            # prob won't reach here, will throw error instead
            print "Deployment failed"
            return
        deployment_id = deployment['deploymentId']
        debug("in deploy.py create: deploymentId: " + deployment_id)
        
        util.message_integrations(msg, icon=':ship:')

        debug("in deploy.py create: deploymentId: " + deployment_id)

        pprint("Waiting for deployment...")
        try:
            sleep(5)
        except KeyboardInterrupt:
            return

        interval = 5
        tries = 60
        # deployment_status[status] will be:
        # 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped',
        #
        # what you see in AWS dashboard is usually what the current state actually is.
        # info you grab from the API appears to be slightly delayed
        #
        for x in range(0, tries):
            try:
            # should get status here, then check for it, instead of in the loop
                status = self.deployment_status(deployment_id)['status']
                if status == 'Succeeded':
                    _msg = 'Deployment of commit ' + commit_id + ' to deployment group: ' + group_name + ' successful.'
                    util.message_integrations(_msg, icon=':ship:')
                    if _suspend_on_deploy:
                        asg_autoscaling_control('resume')
                    # define actions in post_deploy_hooks in udo.yml
                    post_deploy_hooks = self.get_post_deploy_hooks(application_name, group_name)
                    if post_deploy_hooks:
                        for post_deploy_hook in post_deploy_hooks:
                            print("running: " + post_deploy_hook)
                            try:
                                command = subprocess.Popen(post_deploy_hook.split())
                            except OSError as e:
                                print e
                                pass
                            except ValueError as e:
                                print e    
                                pass
                            except:
                                pass
                    break
                elif status == 'Failed':
                    if _suspend_on_deploy:
                        asg_autoscaling_control('resume')
                    _msg = "FAILURE to deploy commit ' + commid_id + ' to deployment group: ' + group_name"
                    break
                elif status == 'Created':
                    raise ValueError("deployment has been created... nothing has happened yet")
                elif status == 'Queued':
                    raise ValueError("deployment is Queued")
                elif status == 'InProgress':
                    print("."),
                    sys.stdout.flush()
                elif status == 'Stopped':
                    _msg = 'deployment to deployment group' + group_name + ' is stopped'
                    util.message_integrations(_msg, icon=':ship:')
                else:
                    pprint("An unknown condition has occured")
                    pprint("status: " + str(status))
                    sys.exit(1)
            except KeyboardInterrupt:
                break
            except ValueError as e:
                pprint(e)
                pass

            try:
                sleep(interval)
            except KeyboardInterrupt:
                break
예제 #37
0
파일: asgroup.py 프로젝트: joegross/udo
    def deactivate(self): # a.k.a asg destroy
        # NOTE
        # deleting asg logic should be in its own function

        # * delete ASG by reducing capacities of asg to 0
        # * delete launchconfig
        #
        # reducing ASG capacities to 0 triggers eventual instance
        # termination
        debug("In asgroup.py deactivate")        

        asg_name = self.name()
        ag = util.as_conn()
        ec2 = util.ec2_conn()
    
        asg_info = ag.describe_auto_scaling_groups( AutoScalingGroupNames = [ asg_name ] )

        if not asg_info['AutoScalingGroups']:
            print("ASG does not exist.  Maybe it was already deleted? ")
        else:
            # delete the ASG
            num_instances = len(asg_info['AutoScalingGroups'][0]['Instances'])
            if self.get_num_instances() == 0:
                pprint("There are no instances in asg: " + asg_name)
                pprint("Deleting asg: " + asg_name)
                response = ag.delete_auto_scaling_group( AutoScalingGroupName=asg_name )
                util.message_integrations("Deleted ASgroup {}".format(asg_name))
            else:
                debug("There are " + str(num_instances) + " instances that need to be removed from asg: " + asg_name)
                debug("terminating instances in asg: " + asg_name)
                debug("by setting to 0 MinSize, MaxSize, DesiredCapacity")
                response = ag.update_auto_scaling_group(AutoScalingGroupName = asg_name, MinSize=0, MaxSize=0, DesiredCapacity=0)
                debug("Waiting 30 seconds to give AWS time to terminate the instances")
                try:
                    sleep(30)
                except KeyboardInterrupt:
                    pprint("Got impatient.")
                    sys.exit(1)
                interval = 10
                tries = 20
                for x in range(0,tries):
                    try:
                        if self.get_num_instances() != 0:
                            raise ValueError("there are still instances in the ASG")
                            break
                        else:
                            # if num instances in asg is 0,
                            # we are clear to delete
                            break
                    except KeyboardInterrupt:
                        pprint("Got impatient")
                        sys.exit(1)
                    except ValueError as e:
                        pprint(e)
                        pass

                    try:
                        if e:
                            pprint("pausing " + str(interval) + " seconds")
                            sleep(interval)
                        else:
                            break
                    except KeyboardInterrupt:
                        pprint("Got impatient")
       
                if self.get_num_instances() == 0 or not self.get_num_instances():
                    pprint("instances in asg deleted.")
                    response = ag.delete_auto_scaling_group( AutoScalingGroupName=asg_name )
                    util.message_integrations("Deleted ASgroup {}".format(asg_name))
                else:
                    pprint("unable to delete instances in asg.")

        # if launch config exists, delete it 
        lc = self.lc()
        if not lc.exists():
            print("launchconfig does not exist.  Maybe you deleted it already?")
        else:
            lc.deactivate()
예제 #38
0
파일: asgroup.py 프로젝트: joegross/udo
    def activate(self):
        debug("In asgroup.py activate")
        conn = util.as_conn()
        cfg = self.role_config
        name = self.name()
        if not self.activate_lc(): # ensure this LaunchConfig already exists
            return False

        subnet_cidrs = cfg.get('subnets_cidr')
        if not subnet_cidrs or not len(subnet_cidrs) or None in subnet_cidrs:
            print "Valid subnets_cidr are required for {}/{}".format(self.cluster_name, self.role_name)
            return False
        print("Using subnets " + str(subnet_cidrs))

        subnet_ids = self.get_subnet_ids_by_cidrs(subnet_cidrs)
        azs=cfg.get('availability_zones')
        cfg_args = {}

        # If AvailabilityZones is defined, add it to the args we will pass to conn.create_auto_scaling_group()
        if azs:
            cfg_args['AvailabilityZones'] = azs
            print "AZs: {}".format(azs)
        else:
            pprint("No availability_zones set")

       # VPCZoneIdentifier ( which can be plural ) takes a string
        subnet_ids_string=''
        _length = len(subnet_ids)
        for subnet_id in subnet_ids:
            subnet_ids_string=subnet_ids_string + subnet_id
            if _length > 1:
                subnet_ids_string=subnet_ids_string + ', '
            _length = _length - 1
        pprint("Using subnet ids: " + str(subnet_ids_string))

        cfg_args['AutoScalingGroupName'] = self.name()
        cfg_args['DesiredCapacity'] = cfg.get('scale_policy')['desired']
        cfg_args['LoadBalancerNames'] = cfg.get('elbs')
        cfg_args['LaunchConfigurationName'] = self.lc().name()
        cfg_args['MaxSize'] = cfg.get('scale_policy', 'max_size')
        cfg_args['MinSize'] = cfg.get('scale_policy', 'min_size')
        cfg_args['VPCZoneIdentifier'] = subnet_ids_string

        if not cfg_args['LoadBalancerNames']:
            cfg_args['LoadBalancerNames'] = []

        response = conn.create_auto_scaling_group(**cfg_args)
        # NOTE: should check if asg was created

        debug('Preparing tags that will be applied to the asg')
        tags = cfg.get('tags')
        if not tags:
            tags = {}
        tags['cluster'] = self.cluster_name
        tags['role'] = self.role_name
        tags['Name'] = self.name()

        # apply tags        
        tag_set = [self.ag_tag(name, k,v) for (k,v) in tags.iteritems()]
        debug("Applying tags to asg")
        conn.create_or_update_tags(Tags=tag_set)

        util.message_integrations("Activated ASgroup {}".format(name))
        # NOTE: what should we be returning here?  Not sure.
        #return ag
        return name