Example #1
0
def configure(name, rec):
    """Write <rec['keys']> to my authorized_keys file."""
    logger.verbose('create_lxc: configuring %s'%name)
    #new_keys = rec['keys']
    
    # get the unix account info
    gid = grp.getgrnam("slices")[2]
    pw_info = pwd.getpwnam(name)
    uid = pw_info[2]
    pw_dir = pw_info[5]

    # write out authorized_keys file and conditionally create
    # the .ssh subdir if need be.
    dot_ssh = os.path.join(pw_dir,'.ssh')
    if not os.path.isdir(dot_ssh):
        if not os.path.isdir(pw_dir):
            logger.verbose('create_lxc: WARNING: homedir %s does not exist for %s!'%(pw_dir,name))
            os.mkdir(pw_dir)
            os.chown(pw_dir, uid, gid)
        os.mkdir(dot_ssh)

    auth_keys = os.path.join(dot_ssh,'authorized_keys')

    for new_keys in rec['keys']:
	tools.write_file(auth_keys, lambda f: f.write(new_keys['key']))

    # set access permissions and ownership properly
    os.chmod(dot_ssh, 0700)
    os.chown(dot_ssh, uid, gid)
    os.chmod(auth_keys, 0600)
    os.chown(auth_keys, uid, gid)

    logger.log('create_lxc: %s: installed ssh keys' % name)
def retrieve(url, cacert=None, postdata=None, timeout=90):
    #    command = ('/usr/bin/curl', '--fail', '--silent')
    command = (
        '/usr/bin/curl',
        '--fail',
    )
    if cacert: command += ('--cacert', cacert)
    if postdata: command += ('--data', '@-')
    if timeout:
        command += ('--max-time', str(timeout))
        command += ('--connect-timeout', str(timeout))
    command += (url, )
    if verbose:
        print 'Invoking ', command
        if postdata: print 'with postdata=', postdata
    p = Sopen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
    if postdata: p.stdin.write(postdata)
    p.stdin.close()
    sout, sin, serr = select([p.stdout, p.stderr], [], [], timeout)
    if len(sout) == 0 and len(sin) == 0 and len(serr) == 0:
        logger.verbose("curlwrapper: timed out after %s" % timeout)
        p.kill(signal.SIGKILL)
    data = p.stdout.read()
    err = p.stderr.read()
    rc = p.wait()
    if rc != 0:
        # when this triggers, the error sometimes doesn't get printed
        logger.log("curlwrapper: retrieve, got stderr <%s>" % err)
        raise xmlrpclib.ProtocolError(url, rc, err, postdata)
    else:
        return data
Example #3
0
def retrieve(url, cacert=None, postdata=None, timeout=90):
#    command = ('/usr/bin/curl', '--fail', '--silent')
    command = ('/usr/bin/curl', '--fail', )
    if cacert: command += ('--cacert', cacert)
    if postdata: command += ('--data', '@-')
    if timeout: 
        command += ('--max-time', str(timeout))
        command += ('--connect-timeout', str(timeout))
    command += (url,)
    if verbose:
        print 'Invoking ',command
        if postdata: print 'with postdata=',postdata
    p = Sopen(command , stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
    if postdata: p.stdin.write(postdata)
    p.stdin.close()
    sout, sin, serr = select([p.stdout,p.stderr],[],[], timeout)
    if len(sout) == 0 and len(sin) == 0 and len(serr) == 0: 
        logger.verbose("curlwrapper: timed out after %s" % timeout)
        p.kill(signal.SIGKILL) 
    data = p.stdout.read()
    err = p.stderr.read()
    rc = p.wait()
    if rc != 0: 
        # when this triggers, the error sometimes doesn't get printed
        logger.log ("curlwrapper: retrieve, got stderr <%s>"%err)
        raise xmlrpclib.ProtocolError(url, rc, err, postdata)
    else: 
        return data
Example #4
0
    def run(self):
        try:
            if self.options.daemon: tools.daemon()

            # set log level
            if (self.options.verbose):
                logger.set_level(logger.LOG_VERBOSE)

            # Load /etc/planetlab/plc_config
            config = Config(self.options.config)

            try:
                other_pid = tools.pid_file()
                if other_pid != None:
                    print """There might be another instance of the node manager running as pid %d.
If this is not the case, please remove the pid file %s. -- exiting""" % (other_pid, tools.PID_FILE)
                    return
            except OSError, err:
                print "Warning while writing PID file:", err

            # load modules
            self.loaded_modules = []
            for module in self.modules:
                try:
                    m = __import__(module)
                    logger.verbose("nodemanager: triggering %s.start"%m.__name__)
                    m.start()
                    self.loaded_modules.append(m)
                except ImportError, err:
                    print "Warning while loading module %s:" % module, err
 def is_running(self):
     ''' Return True if the domain is running '''
     (state, _) = self.dom.state()
     result = (state == libvirt.VIR_DOMAIN_RUNNING)
     logger.verbose('sliver_libvirt.is_running: {} => {}'
                    .format(self, result))
     return result
Example #6
0
 def is_running(self):
     if (self._acct != None) and self._acct.is_running():
         status = True
     else:
         status = False
         logger.verbose("account: Worker(%s): is not running" % self.name)
     return status
def configure(name, rec):
    """Write <rec['keys']> to my authorized_keys file."""
    logger.verbose('create_lxc: configuring %s' % name)
    #new_keys = rec['keys']

    # get the unix account info
    gid = grp.getgrnam("slices")[2]
    pw_info = pwd.getpwnam(name)
    uid = pw_info[2]
    pw_dir = pw_info[5]

    # write out authorized_keys file and conditionally create
    # the .ssh subdir if need be.
    dot_ssh = os.path.join(pw_dir, '.ssh')
    if not os.path.isdir(dot_ssh):
        if not os.path.isdir(pw_dir):
            logger.verbose(
                'create_lxc: WARNING: homedir %s does not exist for %s!' %
                (pw_dir, name))
            os.mkdir(pw_dir)
            os.chown(pw_dir, uid, gid)
        os.mkdir(dot_ssh)

    auth_keys = os.path.join(dot_ssh, 'authorized_keys')

    for new_keys in rec['keys']:
        tools.write_file(auth_keys, lambda f: f.write(new_keys['key']))

    # set access permissions and ownership properly
    os.chmod(dot_ssh, 0700)
    os.chown(dot_ssh, uid, gid)
    os.chmod(auth_keys, 0600)
    os.chown(auth_keys, uid, gid)

    logger.log('create_lxc: %s: installed ssh keys' % name)
Example #8
0
 def GetSlivers(self, config, plc):
     """Retrieves GetSlivers at PLC and triggers callbacks defined in modules/plugins"""
     try:
         logger.log("nodemanager: Syncing w/ PLC")
         # retrieve GetSlivers from PLC
         data = plc.GetSlivers()
         # use the magic 'default' slice to retrieve system-wide defaults
         self.getPLCDefaults(data, config)
         # tweak the 'vref' attribute from GetSliceFamily
         self.setSliversVref (data)
         # dump it too, so it can be retrieved later in case of comm. failure
         self.dumpSlivers(data)
         # log it for debug purposes, no matter what verbose is
         logger.log_slivers(data)
         logger.verbose("nodemanager: Sync w/ PLC done")
         last_data=data
     except:
         logger.log_exc("nodemanager: failed in GetSlivers")
         #  XXX So some modules can at least boostrap.
         logger.log("nodemanager:  Can't contact PLC to GetSlivers().  Continuing.")
         data = {}
         # for modules that request it though the 'persistent_data' property
         last_data=self.loadSlivers()
     #  Invoke GetSlivers() functions from the callback modules
     for module in self.loaded_modules:
         logger.verbose('nodemanager: triggering %s.GetSlivers'%module.__name__)
         try:
             callback = getattr(module, 'GetSlivers')
             module_data=data
             if getattr(module,'persistent_data',False):
                 module_data=last_data
             callback(data, config, plc)
         except:
             logger.log_exc("nodemanager: GetSlivers failed to run callback for module %r"%module)
Example #9
0
 def update_conf_file(self, cf_rec):
     if not cf_rec['enabled']: return
     dest = cf_rec['dest']
     err_cmd = cf_rec['error_cmd']
     mode = string.atoi(cf_rec['file_permissions'], base=8)
     try:
         uid = pwd.getpwnam(cf_rec['file_owner'])[2]
     except:
         logger.log('conf_files: cannot find user %s -- %s not updated'%(cf_rec['file_owner'],dest))
         return
     try:
         gid = grp.getgrnam(cf_rec['file_group'])[2]
     except:
         logger.log('conf_files: cannot find group %s -- %s not updated'%(cf_rec['file_group'],dest))
         return
     url = 'https://%s/%s' % (self.config.PLC_BOOT_HOST, cf_rec['source'])
     # set node_id at the end of the request - hacky
     if tools.node_id():
         if url.find('?') >0: url += '&'
         else:                url += '?'
         url += "node_id=%d"%tools.node_id()
     else:
         logger.log('conf_files: %s -- WARNING, cannot add node_id to request'%dest)
     try:
         logger.verbose("conf_files: retrieving URL=%s"%url)
         contents = curlwrapper.retrieve(url, self.config.cacert)
     except xmlrpclib.ProtocolError,e:
         logger.log('conf_files: failed to retrieve %s from %s, skipping' % (dest, url))
         return
def replace_file_with_string(target,
                             new_contents,
                             chmod=None,
                             remove_if_empty=False):
    try:
        current = file(target).read()
    except:
        current = ""
    if current == new_contents:
        # if turns out to be an empty string, and remove_if_empty is set,
        # then make sure to trash the file if it exists
        if remove_if_empty and not new_contents and os.path.isfile(target):
            logger.verbose("tools.replace_file_with_string: removing file %s" %
                           target)
            try:
                os.unlink(target)
            finally:
                return True
        return False
    # overwrite target file: create a temp in the same directory
    path = os.path.dirname(target) or '.'
    fd, name = tempfile.mkstemp('', 'repl', path)
    os.write(fd, new_contents)
    os.close(fd)
    if os.path.exists(target):
        os.unlink(target)
    shutil.move(name, target)
    if chmod: os.chmod(target, chmod)
    return True
Example #11
0
def GetSlivers(data, config, plc):
    # added by caglar
    # band-aid for short period as old API returns networks instead of interfaces
    global KEY_NAME
    KEY_NAME = "interfaces"
    #################

    logger.verbose("net: GetSlivers called.")
    if not 'interfaces' in data:
        # added by caglar
        # band-aid for short period as old API returns networks instead of interfaces
        # logger.log_missing_data('net.GetSlivers','interfaces')
        # return
        if not 'networks' in data:
            logger.log_missing_data('net.GetSlivers','interfaces')
            return
        else:
            KEY_NAME = "networks"
        ##################

    plnet.InitInterfaces(logger, plc, data)
    if 'OVERRIDES' in dir(config):
        if config.OVERRIDES.get('net_max_rate') == '-1':
            logger.log("net: Slice and node BW Limits disabled.")
            if len(bwlimit.tc("class show dev %s" % dev_default)):
                logger.verbose("net: *** DISABLING NODE BW LIMITS ***")
                bwlimit.stop()
        else:
            InitNodeLimit(data)
            InitI2(plc, data)
    else:
        InitNodeLimit(data)
        InitI2(plc, data)
    InitNAT(plc, data)
Example #12
0
def GetSlivers(data, config, plc):
    # added by caglar
    # band-aid for short period as old API returns networks instead of interfaces
    global KEY_NAME
    KEY_NAME = "interfaces"
    #################

    logger.verbose("net: GetSlivers called.")
    if not 'interfaces' in data:
        # added by caglar
        # band-aid for short period as old API returns networks instead of interfaces
        # logger.log_missing_data('net.GetSlivers','interfaces')
        # return
        if not 'networks' in data:
            logger.log_missing_data('net.GetSlivers','interfaces')
            return
        else:
            KEY_NAME = "networks"
        ##################

    plnet.InitInterfaces(logger, plc, data)
    if 'OVERRIDES' in dir(config):
        if config.OVERRIDES.get('net_max_rate') == '-1':
            logger.log("net: Slice and node BW Limits disabled.")
            if len(bwlimit.tc("class show dev %s" % dev_default)):
                logger.verbose("net: *** DISABLING NODE BW LIMITS ***")
                bwlimit.stop()
        else:
            InitNodeLimit(data)
            InitI2(plc, data)
    else:
        InitNodeLimit(data)
        InitI2(plc, data)
    InitNAT(plc, data)
Example #13
0
    def process_IN_DELETE(self, event):
        try:
	    del CGROUPS[event.name]
        except:
            logger.verbose("Cgroup Notify: Cgroup %s does not exist, continuing..."%event.name)
	logger.verbose("Cgroup Notify: Deleted cgroup %s on %s" % \
			(event.name, event.path))
Example #14
0
    def configure(self, rec):
        """Write <rec['keys']> to my authorized_keys file."""
        logger.verbose('account: configuring %s'%self.name)
        new_keys = rec['keys']
        if new_keys != self.keys:
            # get the unix account info
            gid = grp.getgrnam("slices")[2]
            pw_info = pwd.getpwnam(self.name)
            uid = pw_info[2]
            pw_dir = pw_info[5]

            # write out authorized_keys file and conditionally create
            # the .ssh subdir if need be.
            dot_ssh = os.path.join(pw_dir,'.ssh')
            if not os.path.isdir(dot_ssh):
                if not os.path.isdir(pw_dir):
                    logger.verbose('account: WARNING: homedir %s does not exist for %s!'%(pw_dir,self.name))
                    os.mkdir(pw_dir)
                    os.chown(pw_dir, uid, gid)
                os.mkdir(dot_ssh)

            auth_keys = os.path.join(dot_ssh,'authorized_keys')
            tools.write_file(auth_keys, lambda f: f.write(new_keys))

            # set access permissions and ownership properly
            os.chmod(dot_ssh, 0700)
            os.chown(dot_ssh, uid, gid)
            os.chmod(auth_keys, 0600)
            os.chown(auth_keys, uid, gid)

            # set self.keys to new_keys only when all of the above ops succeed
            self.keys = new_keys

            logger.log('account: %s: installed ssh keys' % self.name)
Example #15
0
 def is_running(self):
     if (self._acct != None) and self._acct.is_running():
         status = True
     else:
         status = False
         logger.verbose("account: Worker(%s): is not running" % self.name)
     return status
Example #16
0
    def ensure_created(self, rec):
        """Check account type is still valid.  If not, recreate sliver.
If still valid, check if running and configure/start if not."""
        logger.log_data_in_file(rec,"/var/lib/nodemanager/%s.rec.txt"%rec['name'],
                                'raw rec captured in ensure_created',logger.LOG_VERBOSE)
        curr_class = self._get_class()
        next_class = type_acct_class[rec['type']]
        if next_class != curr_class:
            self._destroy(curr_class)
            create_sem.acquire()
            try: next_class.create(self.name, rec)
            finally: create_sem.release()
        if not isinstance(self._acct, next_class): self._acct = next_class(rec)
        logger.verbose("account.Worker.ensure_created: %s, running=%r"%(self.name,self.is_running()))

        # reservation_alive is set on reservable nodes, and its value is a boolean
        if 'reservation_alive' in rec:
            # reservable nodes
            if rec['reservation_alive']:
                # this sliver has the lease, it is safe to start it
                if not self.is_running(): self.start(rec)
                else: self.configure(rec)
            else:
                # not having the lease, do not start it
                self.configure(rec)
        # usual nodes - preserve old code
        # xxx it's not clear what to do when a sliver changes type/class
        # in a reservable node
        else:
            if not self.is_running() or next_class != curr_class:
                self.start(rec)
            else: self.configure(rec)
    def write(perfDataList, timestamp, whitelistDict=None):
        '''
        main formatter function: returns a string of formatted entries
        '''
        unixTime = str(time.mktime(timestamp)).replace(".", "") + "00"
        l.debug("influxFormatter.write: timestamp:'%s' , unixTime: '%s'",
                str(timestamp), unixTime)

        entries = []
        for perfEntry in perfDataList:
            if (whitelistedJ2eeType(perfEntry, whitelistDict) == True):
                l.debug("Writing output record as j2eeType is whitelisted")
                formattedEntry = "{},{} {} {}".format(
                    getMeasurement(perfEntry), formatTags(perfEntry),
                    formatFields(perfEntry, whitelistDict), unixTime)
                entries.append(formattedEntry)
                l.debug("Influx formattedEntry: '%s" % (formattedEntry))
            else:
                l.debug(
                    "Output record not written as j2eeType: '%s' is not whitelisted"
                    % (perfEntry["tags"]))

        l.verbose("Number of rows returned: %d" % (len(entries)))
        returnedObj = "\n".join(entries)
        return returnedObj
 def check_if_test_exists(test_file_path: str):
     logger.verbose(
         f"{CombinationValidator.__name__}: Checking the existence of test: '"
         f"{CombinationValidator.UNIT_TEST_NAME}'.")
     return CombinationValidator.trigger_test_output_test(
         test_file_path, check_for_existence=True) not in [
             ExitCode.NO_TESTS_COLLECTED, ExitCode.USAGE_ERROR
         ]
Example #19
0
 def process_IN_DELETE(self, event):
     try:
         del CGROUPS[event.name]
     except:
         logger.verbose(
             "Cgroup Notify: Cgroup %s does not exist, continuing..." %
             event.name)
     logger.verbose("Cgroup Notify: Deleted cgroup %s on %s" % \
       (event.name, event.path))
    def __init__ (self):

        parser = ArgumentParser()
        parser.add_argument(
            '-d', '--daemon', action='store_true', dest='daemon',
            default=False,
            help='run daemonized')
        parser.add_argument(
            '-f', '--config', action='store', dest='config',
            default='/etc/planetlab/plc_config',
            help='PLC configuration file')
        parser.add_argument(
            '-k', '--session', action='store', dest='session',
            default='/etc/planetlab/session',
            help='API session key (or file)')
        parser.add_argument(
            '-p', '--period', action='store', dest='period',
            default=NodeManager.default_period,
            help='Polling interval (sec) - default {}'
                 .format(NodeManager.default_period))
        parser.add_argument(
            '-r', '--random', action='store', dest='random',
            default=NodeManager.default_random,
            help='Range for additional random polling interval (sec) -- default {}'
                 .format(NodeManager.default_random))
        parser.add_argument(
            '-v', '--verbose', action='store_true', dest='verbose',
            default=False,
            help='more verbose log')
        parser.add_argument(
            '-P', '--path', action='store', dest='path',
            default=NodeManager.PLUGIN_PATH,
            help='Path to plugins directory')

        parser.add_argument(
            '-m', '--module', action='store', dest='user_module',
            default='',
            help='run a single module')
        self.options = parser.parse_args()

        # determine the modules to be run
        self.modules = NodeManager.core_modules
        # Deal with plugins directory
        if os.path.exists(self.options.path):
            sys.path.append(self.options.path)
            plugins = [
                os.path.split(os.path.splitext(x)[0])[1]
                for x in glob.glob( os.path.join(self.options.path, '*.py') )
                if not x.endswith("/__init__.py")
                ]
            self.modules += plugins
        if self.options.user_module:
            assert self.options.user_module in self.modules
            self.modules = [self.options.user_module]
            logger.verbose('nodemanager: Running single module {}'.format(self.options.user_module))
    def stop(self):
        logger.verbose('sliver_libvirt: {} stop'.format(self.name))

        # Remove the ebtables rule before stopping 
        bwlimit.ebtables("-D INPUT -i veth{} -j mark --set-mark {}"
                         .format(self.xid, self.xid))

        try:
            self.dom.destroy()
        except:
            logger.log_exc("in sliver_libvirt.stop", name=self.name)
    def start(self, delay=0):
        '''Just start the sliver'''
        logger.verbose('sliver_libvirt: {} start'.format(self.name))

        # TD: Added OpenFlow rules to avoid OpenVSwitch-based slivers'
        # auto-configuration issues when IPv6 auto-config and/or DHCP are
        # available in the node's network. Sliver configuration is static.
        if os.path.exists('/usr/bin/ovs-ofctl'):
            logger.log('Adding OpenFlow rules to prevent IPv6 auto-config and DHCP in OpenVSwitch slivers')
            # IPv6 ICMP Router Solicitation and Advertisement
            logger.log_call([ '/usr/bin/ovs-ofctl', 'add-flow', 'public0', 'priority=100,icmp6,icmp_type=133,idle_timeout=0,hard_timeout=0,actions=drop' ])
            logger.log_call([ '/usr/bin/ovs-ofctl', 'add-flow', 'public0', 'priority=100,icmp6,icmp_type=134,idle_timeout=0,hard_timeout=0,actions=drop' ])
            # IPv4 DHCP
            logger.log_call([ '/usr/bin/ovs-ofctl', 'add-flow', 'public0', 'priority=101,udp,nw_src=0.0.0.0,nw_dst=255.255.255.255,tp_src=68,tp_dst=67,idle_timeout=0,hard_timeout=0,actions=drop' ])
            logger.log_call([ '/usr/bin/ovs-ofctl', 'add-flow', 'public0', 'priority=101,udp,tp_src=67,tp_dst=68,idle_timeout=0,hard_timeout=0,actions=drop' ])
        else:
            logger.log('NOTE: /usr/bin/ovs-ofctl not found!')

        # Check if it's running to avoid throwing an exception if the
        # domain was already running
        if not self.is_running():
            try:
                # create actually means start
                self.dom.create()
            except Exception as e:
                # XXX smbaker: attempt to resolve slivers that are stuck in
                #   "failed to allocate free veth".
                if "ailed to allocate free veth" in str(e):
                     logger.log("failed to allocate free veth on {}".format(self.name))
                     self.repair_veth()
                     logger.log("trying dom.create again")
                     self.dom.create()
                else:
                    raise
        else:
            logger.verbose('sliver_libvirt: sliver {} already started'.format(self.name))

        # After the VM is started... we can play with the virtual interface
        # Create the ebtables rule to mark the packets going out from the virtual
        # interface to the actual device so the filter canmatch against the mark
        bwlimit.ebtables("-A INPUT -i veth{} -j mark --set-mark {}"
                         .format(self.xid, self.xid))

        # TD: Turn off SCTP checksum offloading. It is currently not working. FIXME: check for a kernel fix!
        result = logger.log_call(['/usr/sbin/lxcsu', '-r', self.name, '--', '/usr/sbin/ethtool', '-K', 'eth0', 'tx-checksum-sctp', 'off'])
        if not result:
            logger.log('unable to apply SCTP checksum bug work-around for %s' % self.name)

        # TD: Work-around for missing interface configuration: ensure that networking service is running.
        result = logger.log_call(['/usr/sbin/lxcsu', '-r', self.name, '/sbin/service', 'network', 'restart'])
        if not result:
            logger.log('unable to restart networking service for %s' % self.name)
Example #23
0
    def create(name, rec = None):
        logger.verbose('sliver_vs: %s: create'%name)
        vref = rec['vref']
        if vref is None:
            logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
            # added by caglar
            # band-aid for short period as old API doesn't have GetSliceFamily function
            #return
            vref = "planetlab-f8-i386"

        # used to look in /etc/planetlab/family,
        # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
        # which for legacy is still exposed here as the 'vref' key

        # check the template exists -- there's probably a better way..
        if not os.path.isdir ("/vservers/.vref/%s"%vref):
            logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
            return

        # guess arch
        try:
            (x,y,arch)=vref.split('-')
        # mh, this of course applies when 'vref' is e.g. 'netflow'
        # and that's not quite right
        except:
            arch='i386'

        def personality (arch):
            personality="linux32"
            if arch.find("64")>=0:
                personality="linux64"
            return personality

        command=[]
        # be verbose
        command += ['/bin/bash','-x',]
        command += ['/usr/sbin/vuseradd', ]
        if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
            command += [ "-i",]
        # the vsliver imge to use
        command += [ '-t', vref, ]
        # slice name
        command += [ name, ]            
#        logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
        logger.log_call(command, timeout=15*60)
        # export slicename to the slice in /etc/slicename
        file('/vservers/%s/etc/slicename' % name, 'w').write(name)
        file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
        # set personality: only if needed (if arch's differ)
        if tools.root_context_arch() != arch:
            file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
            logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
def run_subprocess(command: list or str, cwd: str = os.curdir):
    if isinstance(command, list):
        command = " ".join(command)
    logger.verbose(f'Running {command} command')
    pipes = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, shell=True,
                             env=os.environ, universal_newlines=True)
    std_out, std_err = pipes.communicate()
    return_code = pipes.returncode
    pipes.kill()
    del pipes
    if return_code != 0:
        raise subprocess.CalledProcessError(cmd=command, output=std_out, stderr=std_err, returncode=return_code)
    return std_out, std_err, return_code
    def destroy(name):
        # umount .ssh directory - only if mounted
        Account.umount_ssh_dir(name)
        logger.verbose('sliver_lxc: %s destroy' % (name))
        conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)

        containerDir = Sliver_LXC.CON_BASE_DIR + '/%s' % (name)

        try:
            # Destroy libvirt domain
            dom = conn.lookupByName(name)
        except:
            logger.verbose('sliver_lxc: Domain %s does not exist!' % name)

        try:
            dom.destroy()
        except:
            logger.verbose('sliver_lxc: Domain %s not running... continuing.' %
                           name)

        try:
            dom.undefine()
        except:
            logger.verbose(
                'sliver_lxc: Domain %s is not defined... continuing.' % name)

        # Remove user after destroy domain to force logout
        command = ['/usr/sbin/userdel', '-f', '-r', name]
        logger.log_call(command, timeout=15 * 60)

        if os.path.exists(os.path.join(containerDir, "vsys")):
            # Slivers with vsys running will fail the subvolume delete.
            # A more permanent solution may be to ensure that the vsys module
            # is called before the sliver is destroyed.
            logger.log("destroying vsys directory and restarting vsys")
            logger.log_call(["rm", "-fR", os.path.join(containerDir, "vsys")])
            logger.log_call([
                "/etc/init.d/vsys",
                "restart",
            ])

        # Remove rootfs of destroyed domain
        command = ['btrfs', 'subvolume', 'delete', containerDir]
        logger.log_call(command, timeout=60)

        if os.path.exists(containerDir):
            # oh no, it's still here...
            logger.log("WARNING: failed to destroy container %s" %
                       containerDir)

        logger.verbose('sliver_libvirt: %s destroyed.' % name)
    def stop(self):
        logger.verbose('sliver_libvirt: %s stop'%(self.name))

        # Remove the ebtables rule before stopping 
        bwlimit.ebtables("-D INPUT -i veth%d -j mark --set-mark %d" % \
            (self.xid, self.xid))

        try:
            self.dom.destroy()
        except:
            logger.verbose('sliver_libvirt: Domain %s not running ' \
                           'UNEXPECTED: %s'%(self.name, sys.exc_info()[1]))
            print 'sliver_libvirt: Domain %s not running ' \
                  'UNEXPECTED: %s'%(self.name, sys.exc_info()[1])
 def update_conf_file(self, cf_rec):
     if not cf_rec['enabled']:
         return
     dest = cf_rec['dest']
     err_cmd = cf_rec['error_cmd']
     mode = int(cf_rec['file_permissions'], base=8)
     try:
         uid = pwd.getpwnam(cf_rec['file_owner'])[2]
     except:
         logger.log('conf_files: cannot find user %s -- %s not updated'
                    %(cf_rec['file_owner'], dest))
         return
     try:
         gid = grp.getgrnam(cf_rec['file_group'])[2]
     except:
         logger.log('conf_files: cannot find group %s -- %s not updated'
                    %(cf_rec['file_group'], dest))
         return
     url = 'https://%s/%s' % (self.config.PLC_BOOT_HOST, cf_rec['source'])
     # set node_id at the end of the request - hacky
     if tools.node_id():
         if url.find('?') > 0:
             url += '&'
         else:
             url += '?'
         url += "node_id=%d"%tools.node_id()
     else:
         logger.log('conf_files: %s -- WARNING, cannot add node_id to request'
                    % dest)
     try:
         logger.verbose("conf_files: retrieving URL=%s"%url)
         contents = curlwrapper.retrieve(url, self.config.cacert)
     except xmlrpc.client.ProtocolError as e:
         logger.log('conf_files: failed to retrieve %s from %s, skipping' % (dest, url))
         return
     if not cf_rec['always_update'] and sha(contents).digest() == self.checksum(dest):
         return
     if self.system(cf_rec['preinstall_cmd']):
         self.system(err_cmd)
         if not cf_rec['ignore_cmd_errors']:
             return
     logger.log('conf_files: installing file %s from %s' % (dest, url))
     try:
         os.makedirs(os.path.dirname(dest))
     except OSError:
         pass
     tools.write_file(dest, lambda f: f.write(contents.decode()),
                      mode=mode, uidgid=(uid, gid))
     if self.system(cf_rec['postinstall_cmd']):
         self.system(err_cmd)
Example #28
0
 def __init__(self, rec):
     name=rec['name']
     logger.verbose ('sliver_vs: %s init'%name)
     try:
         logger.log("sliver_vs: %s: first chance..."%name)
         vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
     except Exception, err:
         if not isinstance(err, vserver.NoSuchVServer):
             # Probably a bad vserver or vserver configuration file
             logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
             logger.log('sliver_vs: %s: recreating bad vserver' % name)
             self.destroy(name)
         self.create(name, rec)
         logger.log("sliver_vs: %s: second chance..."%name)
         vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
Example #29
0
    def run(self):
        # make sure to create /etc/planetlab/virt so others can read that
        # used e.g. in vsys-scripts's sliceip
        tools.get_node_virt()
        try:
            if self.options.daemon: tools.daemon()

            # set log level
            if (self.options.verbose):
                logger.set_level(logger.LOG_VERBOSE)

            # Load /etc/planetlab/plc_config
            config = Config(self.options.config)

            try:
                other_pid = tools.pid_file()
                if other_pid != None:
                    print """There might be another instance of the node manager running as pid %d.
If this is not the case, please remove the pid file %s. -- exiting""" % (
                        other_pid, tools.PID_FILE)
                    return
            except OSError, err:
                print "Warning while writing PID file:", err

            # load modules
            self.loaded_modules = []
            for module in self.modules:
                try:
                    m = __import__(module)
                    logger.verbose("nodemanager: triggering %s.start" %
                                   m.__name__)
                    m.start()
                    self.loaded_modules.append(m)
                except ImportError, err:
                    logger.log_exc(
                        "ERROR while loading module %s - skipping:" % module)
                    # if we fail to load any of these, it's really no need to go on any further
                    if module in NodeManager.core_modules:
                        logger.log("FATAL : failed to load core module %s" %
                                   module)
                except AttributeError, err:
                    # triggered when module doesn't have a 'start' method
                    logger.log_exc(
                        "ERROR while starting module %s - skipping:" % module)
                    # if we fail to load any of these, it's really no need to go on any further
                    if module in NodeManager.core_modules:
                        logger.log("FATAL : failed to start core module %s" %
                                   module)
Example #30
0
 def getPLCDefaults(self, data, config):
     """
     Get PLC wide defaults from _default system slice.  Adds them to config class.
     """
     for slice in data.get('slivers'):
         if slice['name'] == config.PLC_SLICE_PREFIX+"_default":
             attr_dict = {}
             for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value']
             if len(attr_dict):
                 logger.verbose("nodemanager: Found default slice overrides.\n %s" % attr_dict)
                 config.OVERRIDES = attr_dict
                 return
     # NOTE: if an _default slice existed, it would have been found above and
     #           the routine would return.  Thus, if we've gotten here, then no default
     #           slice is bound to this node.
     if 'OVERRIDES' in dir(config): del config.OVERRIDES
    def start(self, delay=0):
        ''' Just start the sliver '''
        logger.verbose('sliver_libvirt: %s start'%(self.name))

        # Check if it's running to avoid throwing an exception if the
        # domain was already running, create actually means start
        if not self.is_running():
            self.dom.create()
        else:
            logger.verbose('sliver_libvirt: sliver %s already started'%(self.name))

        # After the VM is started... we can play with the virtual interface
        # Create the ebtables rule to mark the packets going out from the virtual
        # interface to the actual device so the filter canmatch against the mark
        bwlimit.ebtables("-A INPUT -i veth%d -j mark --set-mark %d" % \
            (self.xid, self.xid))
def log_call_read(command, timeout=logger.default_timeout_minutes*60, poll=1):
    message = " ".join(command)
    logger.log("log_call: running command %s" % message)
    logger.verbose("log_call: timeout=%r s" % timeout)
    logger.verbose("log_call: poll=%r s" % poll)
    trigger = time.time()+timeout
    try:
        child = subprocess.Popen(
            command, bufsize=1,
            stdout=subprocess.PIPE, stderr=subprocess.PIPE,
            close_fds=True,
            universal_newlines=True)

        stdout = ""
        while True:
            # see if anything can be read within the poll interval
            (r, w, x) = select.select([child.stdout], [], [], poll)
            if r:
                stdout = stdout + child.stdout.read(1)
            # is process over ?
            returncode=child.poll()
            # yes
            if returncode != None:
                stdout = stdout + child.stdout.read()
                # child is done and return 0
                if returncode == 0:
                    logger.log("log_call:end command (%s) completed" % message)
                    if stdout != "":
                        logger.log("log_call:stdout: %s" % stdout)
                    return (returncode, stdout)
                # child has failed
                else:
                    log("log_call:end command (%s) returned with code %d"
                        %(message, returncode))
                    return (returncode, stdout)
            # no : still within timeout ?
            if time.time() >= trigger:
                child.terminate()
                logger.log("log_call:end terminating command (%s) - exceeded timeout %d s"
                           %(message, timeout))
                return (-2, None)
                break
    except Exception as e:
        logger.log_exc("failed to run command %s -> %s" % (message, e))

    return (-1, None)
Example #33
0
    def destroy(name):
        # umount .ssh directory - only if mounted
        Account.umount_ssh_dir(name)
        logger.verbose ('sliver_lxc: %s destroy'%(name))
        conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)

        containerDir = Sliver_LXC.CON_BASE_DIR + '/%s'%(name)

        try:
            # Destroy libvirt domain
            dom = conn.lookupByName(name)
        except:
            logger.verbose('sliver_lxc: Domain %s does not exist!' % name)

        try:
            dom.destroy()
        except:
            logger.verbose('sliver_lxc: Domain %s not running... continuing.' % name)

        try:
            dom.undefine()
        except:
            logger.verbose('sliver_lxc: Domain %s is not defined... continuing.' % name)

        # Remove user after destroy domain to force logout
        command = ['/usr/sbin/userdel', '-f', '-r', name]
        logger.log_call(command, timeout=15*60)

        if os.path.exists(os.path.join(containerDir,"vsys")):
            # Slivers with vsys running will fail the subvolume delete.
            # A more permanent solution may be to ensure that the vsys module
            # is called before the sliver is destroyed.
            logger.log("destroying vsys directory and restarting vsys")
            logger.log_call(["rm", "-fR", os.path.join(containerDir, "vsys")])
            logger.log_call(["/etc/init.d/vsys", "restart", ])

        # Remove rootfs of destroyed domain
        command = ['btrfs', 'subvolume', 'delete', containerDir]
        logger.log_call(command, timeout=60)

        if os.path.exists(containerDir):
           # oh no, it's still here...
           logger.log("WARNING: failed to destroy container %s" % containerDir)

        logger.verbose('sliver_libvirt: %s destroyed.'%name)
    def freezeUnits (self, var_name, freezeList):
        for (slicename, freeze) in list(freezeList.items()):
            try:
                cgroup_path = cgroups.get_cgroup_path(slicename, 'freezer')
                logger.verbose("CoreSched: setting freezer for {} to {} - path={} var={}"
                               .format(slicename,freeze, cgroup_path, var_name))
                cgroup = os.path.join(cgroup_path, var_name)
                if not cgroup:
                    logger.log("Warning: Could not spot 'freezer' cgroup file for slice {} - ignored".format(slicename))
                    break

                if glo_coresched_simulate:
                        print("F", cgroup)
                else:
                    with open(cgroup, "w") as f:
                        f.write(freeze)
            except Exception as e:
                # the cgroup probably didn't exit...
                logger.log("CoreSched: exception while setting freeze for {} ({})".format(slicename, e))
Example #35
0
 def getPLCDefaults(self, data, config):
     """
     Get PLC wide defaults from _default system slice.  Adds them to config class.
     """
     for slice in data.get('slivers'):
         if slice['name'] == config.PLC_SLICE_PREFIX + "_default":
             attr_dict = {}
             for attr in slice.get('attributes'):
                 attr_dict[attr['tagname']] = attr['value']
             if len(attr_dict):
                 logger.verbose(
                     "nodemanager: Found default slice overrides.\n %s" %
                     attr_dict)
                 config.OVERRIDES = attr_dict
                 return
     # NOTE: if an _default slice existed, it would have been found above and
     #           the routine would return.  Thus, if we've gotten here, then no default
     #           slice is bound to this node.
     if 'OVERRIDES' in dir(config): del config.OVERRIDES
Example #36
0
def run():
    """
    When run as a thread, wait for event, lock db, deep copy it, release it,
    run bwmon.GetSlivers(), then go back to waiting.
    """
    logger.verbose("bwmon: Thread started")
    while True:
        lock.wait()
        logger.verbose("bwmon: Event received.  Running.")
        database.db_lock.acquire()
        nmdbcopy = copy.deepcopy(database.db)
        database.db_lock.release()
        try:
            if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
                # class show to check if net:InitNodeLimit:bwlimit.init has run.
                sync(nmdbcopy)
            else: logger.log("bwmon: BW limits DISABLED.")
        except: logger.log_exc("bwmon failed")
        lock.clear()
Example #37
0
def make_tiles_for_output(dir, wn_results, forecast):
    logger.verbose("make_tiles_for_output({} {} {})".format(
        dir, wn_results, forecast))
    output_dir = wn_results[0]
    results = wn_results[1]
    layer_info_array = {}

    # Iterate through runs to get GLOBAL maximum speed
    max_speed = 0.
    for res in results:
        file_path = os.path.join(output_dir, res)
        layer_info = getLayerInfo(file_path)
        max_speed = max(max_speed, layer_info['max_speed'])

    for res in results:
        k = res.rfind("_")
        time = res[:k]
        time = time.replace("dem_", "")
        name = res.split(".")[0]
        tile_dir = os.path.join(dir, CONFIG.TILE_RASTER_FILE_NAME, name)
        if not os.path.exists(tile_dir):
            os.makedirs(tile_dir)

        file_path = os.path.join(output_dir, res)
        layer_info = getLayerInfo(file_path)
        logger.debug("LAYER INFO: {}".format(layer_info))
        ext = layer_info['extents']['4326']
        bbox = (ext['xmin'], ext['ymin'], ext['xmax'], ext['ymax'])
        proj4string = layer_info['extents'][
            layer_info["native_wkid"]]["proj4string"]
        logger.debug("Proj4String: {}".format(proj4string))
        layer_info['max_speed'] = max_speed
        layer_info_array[res] = layer_info
        render_tiles(bbox, file_path, proj4string, max_speed, tile_dir,
                     CONFIG.TILE_RASTER_MIN_LEVEL,
                     CONFIG.TILE_RASTER_MAX_LEVEL, "WindNinja")
    zipf = zipfile.ZipFile(str(os.path.join(dir, CONFIG.TILE_RASTER_ZIP_NAME)),
                           'w')
    zipdir(os.path.join(dir, CONFIG.TILE_RASTER_FILE_NAME), zipf)
    zipf.close()

    return CONFIG.TILE_RASTER_ZIP_NAME, layer_info_array
    def run(self):
        # make sure to create /etc/planetlab/virt so others can read that
        # used e.g. in vsys-scripts's sliceip
        tools.get_node_virt()
        try:
            if self.options.daemon: tools.daemon()

            # set log level
            if (self.options.verbose):
                logger.set_level(logger.LOG_VERBOSE)

            # Load /etc/planetlab/plc_config
            config = Config(self.options.config)

            try:
                other_pid = tools.pid_file()
                if other_pid != None:
                    print """There might be another instance of the node manager running as pid %d.
If this is not the case, please remove the pid file %s. -- exiting""" % (other_pid, tools.PID_FILE)
                    return
            except OSError, err:
                print "Warning while writing PID file:", err

            # load modules
            self.loaded_modules = []
            for module in self.modules:
                try:
                    m = __import__(module)
                    logger.verbose("nodemanager: triggering %s.start"%m.__name__)
                    m.start()
                    self.loaded_modules.append(m)
                except ImportError, err:
                    logger.log_exc ("ERROR while loading module %s - skipping:" % module)
                    # if we fail to load any of these, it's really no need to go on any further
                    if module in NodeManager.core_modules:
                        logger.log("FATAL : failed to load core module %s"%module)
                except AttributeError, err:
                    # triggered when module doesn't have a 'start' method
                    logger.log_exc ("ERROR while starting module %s - skipping:" % module)
                    # if we fail to load any of these, it's really no need to go on any further
                    if module in NodeManager.core_modules:
                        logger.log("FATAL : failed to start core module %s"%module)
 def write(perfDataList, timestamp, whitelistDict=None):
     '''
     main formatter function: returns a string of formatted entries
     '''
     entries = []
     for perfEntry in perfDataList:
         if (whitelistedJ2eeType(perfEntry, whitelistDict) == True):
             l.debug("Writing output record as j2eeType is whitelisted")
             formattedEntry = "{} {} {}".format(
                 formatTimeStamp(timestamp), formatTags(None),
                 formatFields(None, whitelistDict))
             entries.append(formattedEntry)
             l.debug(formattedEntry)
         else:
             l.debug(
                 "Output record not written as j2eeType: '%s' is not whitelisted"
                 % (perfEntry["tags"]))
     l.verbose("Number of rows returned: %d" % (len(entries)))
     returnedObj = "\n".join(entries)
     return returnedObj
 def trigger_test_output_test(test_file_path: str,
                              working_dir: str = "",
                              output_file_name: str = "",
                              check_for_existence: bool = False):
     command = ["pytest"]
     command += [f"{test_file_path}::{CombinationValidator.UNIT_TEST_NAME}"]
     if working_dir:
         command += ["--working_dir", working_dir]
     if output_file_name:
         command += ["--output_file_name", output_file_name]
     command = " ".join(command)
     try:
         stdout, stderr, exit_code = run_subprocess(command)
     except CalledProcessError as e:
         if e.returncode is None or e.returncode not in [
                 code for code in ExitCode
         ]:
             logger.info_error(
                 f"{CombinationValidator.__name__}: "
                 f"pytest operation failed. could not run the test.\n{e}")
             return ExitCode.INTERNAL_ERROR
         stdout = e.stdout
         stderr = e.stderr
         exit_code = e.returncode
     except Exception as ex:
         logger.info_error(
             f"{CombinationValidator.__name__}: exception thrown during pytest operation."
             f" could not run the test.\n{ex}")
         return ExitCode.INTERNAL_ERROR
     if not check_for_existence:
         if exit_code == ExitCode.OK:
             logger.verbose(
                 f"{CombinationValidator.__name__}: test '{CombinationValidator.UNIT_TEST_NAME}' passed."
             )
         else:
             logger.info_error(
                 f"{CombinationValidator.__name__}: "
                 f"test '{CombinationValidator.UNIT_TEST_NAME}' failed.")
         logger.debug(
             f"{CombinationValidator.__name__}: {stdout}\n{stderr}.")
     return exit_code
Example #41
0
def log_call_read(command,timeout=logger.default_timeout_minutes*60,poll=1):
    message=" ".join(command)
    logger.log("log_call: running command %s" % message)
    logger.verbose("log_call: timeout=%r s" % timeout)
    logger.verbose("log_call: poll=%r s" % poll)
    trigger=time.time()+timeout
    try:
        child = subprocess.Popen(command, bufsize=1,
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)

        stdout = ""
        while True:
            # see if anything can be read within the poll interval
            (r,w,x)=select.select([child.stdout],[],[],poll)
            if r: stdout = stdout + child.stdout.read(1)
            # is process over ?
            returncode=child.poll()
            # yes
            if returncode != None:
                stdout = stdout + child.stdout.read()
                # child is done and return 0
                if returncode == 0:
                    logger.log("log_call:end command (%s) completed" % message)
                    if stdout != "":
                        logger.log("log_call:stdout: %s" % stdout)
                    return (returncode, stdout)
                # child has failed
                else:
                    logger.log("log_call:end command (%s) returned with code %d" %(message,returncode))
                    return (returncode, stdout)
            # no : still within timeout ?
            if time.time() >= trigger:
                child.terminate()
                logger.log("log_call:end terminating command (%s) - exceeded timeout %d s"%(message,timeout))
                return (-2, None)
                break
    except:
        logger.log_exc("failed to run command %s" % message)

    return (-1, None)
Example #42
0
def run():
    """
    When run as a thread, wait for event, lock db, deep copy it, release it,
    run bwmon.GetSlivers(), then go back to waiting.
    """
    logger.verbose("bwmon: Thread started")
    while True:
        lock.wait()
        logger.verbose("bwmon: Event received.  Running.")
        database.db_lock.acquire()
        nmdbcopy = copy.deepcopy(database.db)
        database.db_lock.release()
        try:
            if getDefaults(nmdbcopy) and len(
                    bwlimit.tc("class show dev %s" % dev_default)) > 0:
                # class show to check if net:InitNodeLimit:bwlimit.init has run.
                sync(nmdbcopy)
            else:
                logger.log("bwmon: BW limits DISABLED.")
        except:
            logger.log_exc("bwmon failed")
        lock.clear()
    def __init__(self, rec):
        self.name = rec['name']
        logger.verbose ('sliver_libvirt: %s init'%(self.name))

        # Assume the directory with the image and config files
        # are in place

        self.keys = ''
        self.rspec = {}
        self.slice_id = rec['slice_id']
        self.enabled = True
        self.conn = Sliver_Libvirt.getConnection(rec['type'])
        self.xid = bwlimit.get_xid(self.name)

        dom = None
        try:
            dom = self.conn.lookupByName(self.name)
        except:
            logger.log('sliver_libvirt: Domain %s does not exist. ' \
                       'Will try to create it again.' % (self.name))
            self.__class__.create(rec['name'], rec)
            dom = self.conn.lookupByName(self.name)
        self.dom = dom
Example #44
0
def replace_file_with_string (target, new_contents, chmod=None, remove_if_empty=False):
    try:
        current=file(target).read()
    except:
        current=""
    if current==new_contents:
        # if turns out to be an empty string, and remove_if_empty is set,
        # then make sure to trash the file if it exists
        if remove_if_empty and not new_contents and os.path.isfile(target):
            logger.verbose("tools.replace_file_with_string: removing file %s"%target)
            try: os.unlink(target)
            finally: return True
        return False
    # overwrite target file: create a temp in the same directory
    path=os.path.dirname(target) or '.'
    fd, name = tempfile.mkstemp('','repl',path)
    os.write(fd,new_contents)
    os.close(fd)
    if os.path.exists(target):
        os.unlink(target)
    os.rename(name,target)
    if chmod: os.chmod(target,chmod)
    return True
    def __init__(self, rec):
        self.name = rec['name']
        logger.verbose ('sliver_libvirt: {} init'.format(self.name))

        # Assume the directory with the image and config files
        # are in place

        self.keys = ''
        self.rspec = {}
        self.slice_id = rec['slice_id']
        self.enabled = True
        self.conn = Sliver_Libvirt.getConnection(rec['type'])
        self.xid = bwlimit.get_xid(self.name)

        dom = None
        try:
            dom = self.conn.lookupByName(self.name)
        except:
            logger.log('sliver_libvirt: Domain {} does not exist. ' \
                       'Will try to create it again.'.format(self.name))
            self.__class__.create(rec['name'], rec)
            dom = self.conn.lookupByName(self.name)
        self.dom = dom
Example #46
0
    def __init__ (self):

        parser = optparse.OptionParser()
        parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False,
                          help='run daemonized')
        parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config',
                          help='PLC configuration file')
        parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session',
                          help='API session key (or file)')
        parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period,
                          help='Polling interval (sec) - default %d'%NodeManager.default_period)
        parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random,
                          help='Range for additional random polling interval (sec) -- default %d'%NodeManager.default_random)
        parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
                          help='more verbose log')
        parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH,
                          help='Path to plugins directory')

        # NOTE: BUG the 'help' for this parser.add_option() wont list plugins from the --path argument
        parser.add_option('-m', '--module', action='store', dest='user_module', default='', help='run a single module')
        (self.options, args) = parser.parse_args()

        if len(args) != 0:
            parser.print_help()
            sys.exit(1)

        # determine the modules to be run
        self.modules = NodeManager.core_modules
        # Deal with plugins directory
        if os.path.exists(self.options.path):
            sys.path.append(self.options.path)
            plugins = [ os.path.split(os.path.splitext(x)[0])[1] for x in glob.glob( os.path.join(self.options.path,'*.py') ) ]
            self.modules += plugins
        if self.options.user_module:
            assert self.options.user_module in self.modules
            self.modules=[self.options.user_module]
            logger.verbose('nodemanager: Running single module %s'%self.options.user_module)
Example #47
0
    def ensure_created(self, rec):
        """Check account type is still valid.  If not, recreate sliver.
If still valid, check if running and configure/start if not."""
        logger.log_data_in_file(
            rec, "/var/lib/nodemanager/%s.rec.txt" % rec['name'],
            'raw rec captured in ensure_created', logger.LOG_VERBOSE)
        curr_class = self._get_class()
        next_class = type_acct_class[rec['type']]
        if next_class != curr_class:
            self._destroy(curr_class)
            create_sem.acquire()
            try:
                next_class.create(self.name, rec)
            finally:
                create_sem.release()
        if not isinstance(self._acct, next_class): self._acct = next_class(rec)
        logger.verbose("account.Worker.ensure_created: %s, running=%r" %
                       (self.name, self.is_running()))

        # reservation_alive is set on reservable nodes, and its value is a boolean
        if 'reservation_alive' in rec:
            # reservable nodes
            if rec['reservation_alive']:
                # this sliver has the lease, it is safe to start it
                if not self.is_running(): self.start(rec)
                else: self.configure(rec)
            else:
                # not having the lease, do not start it
                self.configure(rec)
        # usual nodes - preserve old code
        # xxx it's not clear what to do when a sliver changes type/class
        # in a reservable node
        else:
            if not self.is_running() or next_class != curr_class:
                self.start(rec)
            else:
                self.configure(rec)
Example #48
0
def replace_file_with_string(target, new_contents,
                             chmod=None, remove_if_empty=False):
    """
    Replace a target file with a new contents checks for changes: does not do
    anything if previous state was already right can handle chmod if requested
    can also remove resulting file if contents are void, if requested performs
    atomically: writes in a tmp file, which is then renamed (from sliverauth
    originally) returns True if a change occurred, or the file is deleted
    """
    try:
        with open(target) as feed:
            current = feed.read()
    except:
        current = ""
    if current == new_contents:
        # if turns out to be an empty string, and remove_if_empty is set,
        # then make sure to trash the file if it exists
        if remove_if_empty and not new_contents and os.path.isfile(target):
            logger.verbose(
                "tools.replace_file_with_string: removing file {}".format(target))
            try:
                os.unlink(target)
            finally:
                return True
        return False
    # overwrite target file: create a temp in the same directory
    path = os.path.dirname(target) or '.'
    fd, name = tempfile.mkstemp('', 'repl', path)
    os.write(fd, new_contents.encode())
    os.close(fd)
    if os.path.exists(target):
        os.unlink(target)
    shutil.move(name, target)
    if chmod:
        os.chmod(target, chmod)
    return True
Example #49
0
    def configure(self, rec):
        """Write <rec['keys']> to my authorized_keys file."""
        logger.verbose('account: configuring %s' % self.name)
        new_keys = rec['keys']
        if new_keys != self.keys:
            # get the unix account info
            gid = grp.getgrnam("slices")[2]
            pw_info = pwd.getpwnam(self.name)
            uid = pw_info[2]
            pw_dir = pw_info[5]

            # write out authorized_keys file and conditionally create
            # the .ssh subdir if need be.
            dot_ssh = os.path.join(pw_dir, '.ssh')
            if not os.path.isdir(dot_ssh):
                if not os.path.isdir(pw_dir):
                    logger.verbose(
                        'account: WARNING: homedir %s does not exist for %s!' %
                        (pw_dir, self.name))
                    os.mkdir(pw_dir)
                    os.chown(pw_dir, uid, gid)
                os.mkdir(dot_ssh)

            auth_keys = os.path.join(dot_ssh, 'authorized_keys')
            tools.write_file(auth_keys, lambda f: f.write(new_keys))

            # set access permissions and ownership properly
            os.chmod(dot_ssh, 0700)
            os.chown(dot_ssh, uid, gid)
            os.chmod(auth_keys, 0600)
            os.chown(auth_keys, uid, gid)

            # set self.keys to new_keys only when all of the above ops succeed
            self.keys = new_keys

            logger.log('account: %s: installed ssh keys' % self.name)
Example #50
0
def InitI2(plc, data):
    if not 'groups' in data: return

    if "Internet2" in data['groups']:
        logger.log("net: This is an Internet2 node.  Setting rules.")
        i2nodes = []
        i2nodeids = plc.GetNodeGroups(["Internet2"])[0]['node_ids']
        for node in plc.GetInterfaces({"node_id": i2nodeids}, ["ip"]):
            # Get the IPs
            i2nodes.append(node['ip'])
        # this will create the set if it doesn't already exist
        # and add IPs that don't exist in the set rather than
        # just recreateing the set.
        bwlimit.exempt_init('Internet2', i2nodes)

        # set the iptables classification rule if it doesnt exist.
        cmd = '-A POSTROUTING -m set --set Internet2 dst -j CLASSIFY --set-class 0001:2000 --add-mark'
        rules = []
        ipt = os.popen("/sbin/iptables-save")
        for line in ipt.readlines(): rules.append(line.strip(" \n"))
        ipt.close()
        if cmd not in rules:
            logger.verbose("net:  Adding iptables rule for Internet2")
            os.popen("/sbin/iptables -t mangle " + cmd)
Example #51
0
def InitI2(plc, data):
    if not 'groups' in data: return

    if "Internet2" in data['groups']:
        logger.log("net: This is an Internet2 node.  Setting rules.")
        i2nodes = []
        i2nodeids = plc.GetNodeGroups(["Internet2"])[0]['node_ids']
        for node in plc.GetInterfaces({"node_id": i2nodeids}, ["ip"]):
            # Get the IPs
            i2nodes.append(node['ip'])
        # this will create the set if it doesn't already exist
        # and add IPs that don't exist in the set rather than
        # just recreateing the set.
        bwlimit.exempt_init('Internet2', i2nodes)

        # set the iptables classification rule if it doesnt exist.
        cmd = '-A POSTROUTING -m set --set Internet2 dst -j CLASSIFY --set-class 0001:2000 --add-mark'
        rules = []
        ipt = os.popen("/sbin/iptables-save")
        for line in ipt.readlines(): rules.append(line.strip(" \n"))
        ipt.close()
        if cmd not in rules:
            logger.verbose("net:  Adding iptables rule for Internet2")
            os.popen("/sbin/iptables -t mangle " + cmd)
Example #52
0
def get_targets(targets):
    # parses an input of targets to get a list of all possible ips
    target_list = []

    try:
        with open(targets, 'r') as file:
            contents = file.readlines()
            for i in (contents):
                target = i.rstrip()
                target_list.append(target)
            logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list)))))
            return target_list
    except:
        try:
            if "/" in targets:
                try:
                    subnet = IPNetwork(targets)
                except:
                    logger.red('failed to parse')
                    quit()

                for i in subnet:
                    tmp_str = str(i)
                    last_octet = str(tmp_str.split('.')[3])
                    if last_octet == '0' or last_octet == '255':
                        pass
                    else:
                        target_list.append(str(i))
                logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list)))))
                return target_list
            elif "," in targets:
                ips=targets.split(',')
                for ip in ips:
                    target_list.append(ip)
                logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list)))))
                return target_list

            else:
                target_list.append(targets)
                logger.verbose('Amount of targets from input: {}'.format(logger.BLUE(str(len(target_list)))))
                return target_list
        except:
            logger.red('Failed to parse targets.')
            quit()
 def is_running(self):
     ''' Return True if the domain is running '''
     logger.verbose('sliver_libvirt: %s is_running'%self.name)
     try:
         [state, _, _, _, _] = self.dom.info()
         if state == libvirt.VIR_DOMAIN_RUNNING:
             logger.verbose('sliver_libvirt: %s is RUNNING'%self.name)
             return True
         else:
             info = debuginfo(self.dom)
             logger.verbose('sliver_libvirt: %s is ' \
                            'NOT RUNNING...\n%s'%(self.name, info))
             return False
     except:
         logger.verbose('sliver_libvirt: UNEXPECTED ERROR in ' \
                        '%s: %s'%(self.name, sys.exc_info()[1]))
         print 'sliver_libvirt: UNEXPECTED ERROR in ' \
               '%s: %s'%(self.name, sys.exc_info()[1])
         return False
Example #54
0
def GetSlivers(data, config = None, plc=None, fullupdate=True):
    """This function has two purposes.  One, convert GetSlivers() data
    into a more convenient format.  Two, even if no updates are coming
    in, use the GetSlivers() heartbeat as a cue to scan for expired
    slivers."""

    logger.verbose("slivermanager: Entering GetSlivers with fullupdate=%r"%fullupdate)
    for key in data.keys():
        logger.verbose('slivermanager: GetSlivers key : ' + key)

    node_id = None
    try:
        f = open('/etc/planetlab/node_id')
        try: node_id = int(f.read())
        finally: f.close()
    except: logger.log_exc("slivermanager: GetSlivers failed to read /etc/planetlab/node_id")

    if data.has_key('node_id') and data['node_id'] != node_id: return

    if data.has_key('networks'):
        for network in data['networks']:
            if network['is_primary'] and network['bwlimit'] is not None:
                DEFAULT_ALLOCATION['net_max_rate'] = network['bwlimit'] / 1000

    # Take initscripts (global) returned by API, build a hash scriptname->code
    iscripts_hash = {}
    if 'initscripts' not in data:
        logger.log_missing_data("slivermanager.GetSlivers",'initscripts')
        return
    for initscript_rec in data['initscripts']:
        logger.verbose("slivermanager: initscript: %s" % initscript_rec['name'])
        iscripts_hash[str(initscript_rec['name'])] = initscript_rec['script']

    adjustReservedSlivers (data)
    
    for sliver in data['slivers']:
        logger.verbose("slivermanager: %s: slivermanager.GetSlivers in slivers loop"%sliver['name'])
        rec = sliver.copy()
        rec.setdefault('timestamp', data['timestamp'])

        # convert attributes field to a proper dict
        attributes = {}
        for attr in rec.pop('attributes'): attributes[attr['tagname']] = attr['value']
        rec.setdefault("attributes", attributes)

        # squash keys
        keys = rec.pop('keys')
        rec.setdefault('keys', '\n'.join([key_struct['key'] for key_struct in keys]))

        ## 'Type' isn't returned by GetSlivers() for whatever reason.  We're overloading
        ## instantiation here, but i suppose its the same thing when you think about it. -FA
        # Handle nm-controller here
        if rec['instantiation'].lower() == 'nm-controller':
            rec.setdefault('type', attributes.get('type', 'controller.Controller'))
        else:
            rec.setdefault('type', attributes.get('type', sliver_default_type))

        # set the vserver reference.  If none, set to default.
        rec.setdefault('vref', attributes.get('vref', 'default'))

        ### set initscripts; set empty rec['initscript'] if not
        # if tag 'initscript_code' is set, that's what we use
        iscode = attributes.get('initscript_code','')
        if iscode:
            rec['initscript']=iscode
        else:
            isname = attributes.get('initscript')
            if isname is not None and isname in iscripts_hash:
                rec['initscript'] = iscripts_hash[isname]
            else:
                rec['initscript'] = ''

        # set delegations, if none, set empty
        rec.setdefault('delegations', attributes.get("delegations", []))

        # extract the implied rspec
        rspec = {}
        rec['rspec'] = rspec
        for resname, default_amount in DEFAULT_ALLOCATION.iteritems():
            try:
                t = type(default_amount)
                amount = t.__new__(t, attributes[resname])
            except (KeyError, ValueError): amount = default_amount
            rspec[resname] = amount

        # add in sysctl attributes into the rspec
        for key in attributes.keys():
            if key.find("sysctl.") == 0:
                rspec[key] = attributes[key]

        # also export tags in rspec so they make it to the sliver_vs.start call
        rspec['tags']=attributes

        database.db.deliver_record(rec)
    if fullupdate: database.db.set_min_timestamp(data['timestamp'])
    # slivers are created here.
    database.db.sync()
Example #55
0
    def GetSlivers(self, config, plc):
        """Retrieves GetSlivers at PLC and triggers callbacks defined in modules/plugins"""

        try:
            logger.log("nodemanager: Syncing w/ PLC")
            # retrieve GetSlivers from PLC
            data = plc.GetSlivers()
            # use the magic 'default' slice to retrieve system-wide defaults
            self.getPLCDefaults(data, config)
            # tweak the 'vref' attribute from GetSliceFamily
            self.setSliversVref(data)
            # dump it too, so it can be retrieved later in case of comm. failure
            self.dumpSlivers(data)
            # log it for debug purposes, no matter what verbose is
            logger.log_slivers(data)
            logger.verbose("nodemanager: Sync w/ PLC done")
            last_data = data
        except:
            logger.log_exc("nodemanager: failed in GetSlivers")
            #  XXX So some modules can at least boostrap.
            logger.log(
                "nodemanager:  Can't contact PLC to GetSlivers().  Continuing."
            )
            data = {}
            # for modules that request it though the 'persistent_data' property
            last_data = self.loadSlivers()
        logger.log("*************************************************")
        #logger.log("we should provide these information to PEARL TEAM")
        logger.log_map({}, "******************************************")
        #wangyang,get slice map from date fetched from myplc
        slicemap = self.getslicemap(data)
        #logger.log_map(slicemap,"slicemap")
        #wangyang,get slice map from db
        slicemapdb = self.loadmap(slicemap)
        #logger.log_map(slicemapdb,"slicedb")
        #wangyang,compare two files
        slicemapdb = self.handlemap(slicemap, slicemapdb)
        #logger.log_map(slicemapdb,"dbafter compare")
        #wangyang,update to router
        slicemapdb = self.updatetoRouter(slicemapdb)
        #logger.log_map(slicemapdb,"db after update")
        #wangyang,update to router
        self.savemap(slicemapdb)
        #wangyang,write into txt
        logger.log_map(slicemapdb, "write to db")
        '''
        for sliver in last_data['slivers']:
            logger.log("sliceid is %s"%sliver['slice_id'])
            if sliver['slice_id'] > 4:
                logfile = '/var/log/slice/slice.'+sliver['name']
                #logger.logslice("slicename: %s"%sliver['name'],logfile)    
                logger.logslice("sliceid: %s"%sliver['slice_id'],logfile)
                vmid=self.createslver(sliver['slice_id']) 
                logger.log("vmid is %s"%vmid)
                logger.logmap(sliver['slice_id'],vmid)
                
                #logger.logslice("keys: %s"%sliver['keys'],logfile)
                '''
        logger.log("*************************************************")
        #  Invoke GetSlivers() functions from the callback modules
        for module in self.loaded_modules:
            logger.verbose('nodemanager: triggering %s.GetSlivers' %
                           module.__name__)
            try:
                callback = getattr(module, 'GetSlivers')
                module_data = data
                if getattr(module, 'persistent_data', False):
                    module_data = last_data
                callback(data, config, plc)
            except:
                logger.log_exc(
                    "nodemanager: GetSlivers failed to run callback for module %r"
                    % module)
Example #56
0
 def __init__(self, name):
     self.name = name
     self.keys = ''
     logger.verbose('account: Initing account %s' % name)
Example #57
0
def render_tiles(bbox,
                 data_file,
                 proj4string,
                 max_speed,
                 tile_dir,
                 minZoom=1,
                 maxZoom=18,
                 name="unknown",
                 num_threads=NUM_THREADS,
                 tms_scheme=False):
    logger.verbose("render_tiles({} {} {} {} {} {})".format(
        bbox, data_file, tile_dir, minZoom, maxZoom, name))

    # Launch rendering threads
    queue = Queue(32)
    printLock = threading.Lock()
    renderers = {}
    for i in range(num_threads):
        renderer = RenderThread(tile_dir, data_file, proj4string, max_speed,
                                queue, printLock, maxZoom)
        render_thread = threading.Thread(target=renderer.loop)
        render_thread.start()
        renderers[i] = render_thread

    if not os.path.isdir(tile_dir):
        os.makedirs(tile_dir)

    gprj = GoogleProjection(maxZoom + 1)
    ll0 = (bbox[0], bbox[3])
    ll1 = (bbox[2], bbox[1])

    for z in range(minZoom, maxZoom + 1):
        px0 = gprj.fromLLtoPixel(ll0, z)
        px1 = gprj.fromLLtoPixel(ll1, z)
        logger.debug("\nZOOOOOOOOM: {}".format(z))
        logger.debug("PX 0: {}".format(px0))
        logger.debug("PX 1: {}".format(px1))
        logger.debug("BBOX: {}".format(bbox))
        for x in range(int(px0[0] / (SIZE_X * 1.0)),
                       int(px1[0] / (SIZE_Y * 1.0)) + 1):
            # Validate x co-ordinate
            if (x < 0) or (x >= 2**z):
                continue

            # check if we have directories in place
            zoom = "%s" % z
            str_x = "%s" % x
            zx_dir = os.path.join(tile_dir, zoom, str_x)
            if not os.path.isdir(zx_dir):
                os.makedirs(zx_dir)

            for y in range(int(px0[1] / (SIZE_X * 1.0)),
                           int(px1[1] / (SIZE_Y * 1.0)) + 1):
                # Validate x co-ordinate
                if (y < 0) or (y >= 2**z):
                    continue
                # flip y to match OSGEO TMS spec
                if tms_scheme:
                    str_y = "%s" % ((2**z - 1) - y)
                else:
                    str_y = "%s" % y

                tile_uri = os.path.join(tile_dir, zoom, str_x,
                                        '{0}.png'.format(str_y))
                # Submit tile to be rendered into the queue
                t = (name, tile_uri, x, y, z)
                try:
                    queue.put(t)
                except KeyboardInterrupt:
                    raise SystemExit("Ctrl-c detected, exiting...")

    # Signal render threads to exit by sending empty request to queue
    for i in range(num_threads):
        queue.put(None)
    # wait for pending rendering jobs to complete
    queue.join()
    for i in range(num_threads):
        renderers[i].join()
 def is_running(self):
     logger.verbose("controller: is_running:  %s" % self.name)
     return getpwnam(self.name)[6] == self.SHELL