Example #1
0
 def read_rds_sg_config(self):
     ''' Reads RDS SG authorizations from ini files. '''
     for rule in self.ini.items('rds_securitygroup'):
         if re.match('.*rule', rule[0]):
             (rtype, rvalue) = rule[1].split(':')
             if rtype == 'Net':
                 cidr = c3.utils.naming.get_cidr(rvalue)
                 if cidr:
                     logging.debug('Appending RDS CIDR rule %s' % cidr,
                                 self.verbose)
                     self.rds_sg.add_cidr(cidr)
             elif rtype == 'CIDR':
                 logging.debug('Appending RDS CIDR rule %s' % rvalue,
                             self.verbose)
                 self.rds_sg.add_cidr(rvalue)
             elif rtype == 'SG':
                 (oid, sid) = rvalue.split('/')
                 if oid != 'self':
                     acctid = c3.utils.accounts.get_account_id(oid)
                 else:
                     acctid = c3.utils.accounts.get_account_id(
                         self.get_aws_account())
                 if acctid:
                     logging.debug(
                         'Appending RDS SG rule %s:%s' % (acctid, sid),
                         self.verbose)
                     self.rds_sg.add_sg(acctid, sid)
                 else:
                     logging.warn("Can't find account for %s" % oid)
Example #2
0
File: c3ec2.py Project: CityGrid/c3
 def tag_by_instance(self, servers):
     """ Tag resources tied to an instnace ID. """
     tagger = cluster_tagger(self.conn, verbose=self.opts.verbose)
     for host in self.hostnames:
         try:
             instance_id = servers[host].get_id()
             if not tagger.add_tags([instance_id], self.cconfig.get_tagset()):
                 logging.error("Problem adding %s to %s" % (self.cconfig.get_tagset(), instance_id))
         except AttributeError:
             instance_id = None
             logging.warn("Failed to set cost tags on failed " "instance %s" % host)
Example #3
0
 def destroy(self):
     """ Destroys a Security Group """
     stime = 10
     timeout = 120
     while timeout > 0:
         try:
             self.sgrp.delete()
             return True
         except EC2ResponseError:
             logging.warn("SG %s could not be removed, sleeping %ds" % (self.name, stime))
             sleep(stime)
             timeout -= stime
     logging.error("SG %s could not be deleted")
Example #4
0
 def hibernate(self):
     ''' Hibernate instances in cluster. '''
     count = 0
     for iid in self.c3instances:
         if iid.hibernate():
             logging.info('Waiting for %s to stop' % iid.name)
             if wait_for_instance(iid, desired_state='down',
                                  verbose=self.verbose):
                 count += 1
     if count != len(self.c3instances):
         logging.warn(
             'Asked for %d but only %d stopped' %
             (len(self.c3instances), count))
     return count
Example #5
0
 def wake(self):
     ''' Wake instances in cluster. '''
     count = 0
     for iid in self.c3instances:
         if iid.wake():
             logging.info('Waiting for %s to start' % iid.name)
             if wait_for_instance(iid, verbose=self.verbose):
                 logging.debug('Wait for %s successful' %
                               iid.name, self.verbose)
                 count += 1
     if count != len(self.c3instances):
         logging.warn(
             'Asked for %d but only %d started' %
             (len(self.c3instances), count))
     return count
Example #6
0
 def destroy(self):
     ''' Terminates instances in cluster. '''
     count = 0
     for iid in self.c3instances:
         if iid.get_state() not in ['terminated']:
             if iid.destroy():
                 logging.info(
                     'Waiting for %s (%s) to terminate' %
                     (iid.name, iid.inst_id))
                 if wait_for_instance(iid, desired_state='down',
                                      verbose=self.verbose):
                     count += 1
         else:
             logging.warn('%s already teriminated' % iid.name)
             count += 1
     if count != len(self.c3instances):
         logging.warn(
             'Asked for %d but only %d terminated' %
             (len(self.c3instances), count))
     return count
Example #7
0
def gen_s3_entry(ini, user, user_account):
    ''' Generate entries from config '''
    entry = list()
    pattern = re.compile("path")
    if ini.sections():
        for action in ini.sections():
            try:
                effect = ini.get(action, 'effect')
            except ConfigParser.NoOptionError, msg:
                logging.warn(msg)
            if ini.has_option(action, 'condition'):
                condition = ini.get(action, 'condition')
            else:
                condition = 'empty'
            for item in ini.items(action):
                if pattern.match(item[0]):
                    path = item[1]
                    entry.append('%s|%s|%s|%s|%s|%s' % (effect, action, user,
                                                        user_account, path,
                                                        condition))
Example #8
0
def do_condition(condition):
    ''' Generate the condition block in the json
    condition is a three item string like:
    ConditionName,ConditionProperty,ConditionValue '''
    item = dict()
    if condition and len(condition.split(',')) == 3:
        (name, prop, value) = condition.split(',')
        item[name] = {}
        if is_ipnetwork(value):
            item[name][prop] = value
        elif c3.utils.naming.get_cidr(value):
            value = c3.utils.naming.get_cidr(value)
            item[name][prop] = value
        else:
            item[name][prop] = value
        return item
    elif condition == 'empty':
        pass
    else:
        logging.warn('Not enough values given to assign '
                              'condition in %s' % condition)
        return False
Example #9
0
File: c3ec2.py Project: CityGrid/c3
 def cluster_create(self):
     """ Provisions a new cluster based on a config. """
     self.conn = self.aws_conn("ec2")
     node_db = nv_connect(self.opts.nv_ini)
     success = 0
     failed = 0
     self.check_config_types()
     logging.info("Applying SG Rules to %s" % self.cconfig.get_primary_sg())
     self.sg_rules()
     if self.cconfig.get_count():
         servers = dict()
         logging.debug(
             "Creating %d %s in %s using %s."
             % (self.cconfig.get_count(), self.cconfig.get_size(), self.cconfig.get_azs(), self.cconfig.get_ami()),
             self.opts.verbose,
         )
         self.hostnames = c3.utils.naming.find_available_hostnames(
             self.cconfig.get_primary_sg(),
             self.cconfig.get_count(),
             self.cconfig.get_aws_account(),
             self.cconfig.get_aws_region(),
             "ctgrd.com",
             node_db,
         )
         start_time = time.time()
         logging.debug("Creating new servers: %s" % self.hostnames, self.opts.verbose)
         for host in self.hostnames:
             servers[host] = C3Instance(conn=self.conn, node_db=node_db, verbose=self.opts.verbose)
             userdata = self.cconfig.get_user_data(self.userdata_replacements(host))
             tries = 1
             if self.opts.substitute_zones:
                 tries = len(self.cconfig.get_azs())
             while tries > 0:
                 tries -= 1
                 used_az = self.cconfig.get_next_az()
                 logging.info("Starting %s in %s" % (host, used_az))
                 instance = servers[host].start(
                     self.cconfig.get_ami(),
                     self.cconfig.get_ssh_key(),
                     self.cconfig.get_sgs(),
                     userdata,
                     host,
                     self.cconfig.get_size(),
                     used_az,
                     self.cconfig.get_node_groups(),
                     self.cconfig.get_allocate_eips(),
                     self.cconfig.get_use_ebs_optimized(),
                     self.cconfig.get_placement_group(),
                 )
                 if instance:
                     success += 1
                     break
                 else:
                     if tries:
                         logging.warn("Failed to create %s in %s, retrying" % (host, used_az))
             else:
                 logging.error("Failed to create %s in all AZs, trying next instance" % host)
                 failed += 1
             if len(self.cconfig.get_ebs_config()) > 0:
                 self.create_ebs(used_az, host, servers[host].get_id())
         if failed == self.cconfig.get_count():
             logging.error("%d of %d failed to create, dying" % (failed, self.cconfig.get_count()))
             sys.exit(1)
         logging.info("%d of %d server(s) created" % (success, self.cconfig.get_count()))
         self.wait_for_servers(servers, start_time, success)
         if self.volume_instances:
             self.attach_ebs()
         self.tag_by_instance(servers)
         if self.cconfig.get_server_env() == "prd":
             self.puppet_whitelist()
     logging.info("Cluster config complete")