コード例 #1
0
ファイル: testdirectory.py プロジェクト: fortian/python-etce
    def __init__(self, rootdir, basedir_override):

        self._rootdir = rootdir

        self._platform = Platform()

        self._testdoc = TestFileDoc(
            os.path.join(self._rootdir, TestDirectory.TESTFILENAME))

        self._merged = not self._testdoc.has_base_directory

        self._basedir = self._testdoc.base_directory

        if not basedir_override is None:
            self._basedir = basedir_override

        self._configfile = ConfigFileDoc(
            os.path.join(self._rootdir, TestDirectory.CONFIGFILENAME))

        # add the hostfile to the test directory
        # before copying it to hostfile's root nodes
        hostfile = os.path.join(self._rootdir, TestDirectory.HOSTFILENAME)

        self._verified_nodes = []
        if os.path.exists(hostfile) or os.path.isfile(hostfile):
            self._verified_nodes = self._verify_nodes_in_hostfile(hostfile)
コード例 #2
0
ファイル: kill.py プロジェクト: fortian/python-etce
    def kill(self, signal=signal.SIGQUIT, sudo=True):
        p = Platform()

        my_pidfile_toks = ('etce', p.hostname())

        lockfiledir = os.path.join(
            ConfigDictionary().get('etce', 'WORK_DIRECTORY'), 'lock')

        if not os.path.isdir(lockfiledir):
            return

        pidfiles = os.listdir(lockfiledir)

        for pidfile in pidfiles:
            toks = pidfile.split('.')

            if len(toks) < 2:
                continue

            if my_pidfile_toks == (toks[0], toks[1]):
                fullpidfile = os.path.join(lockfiledir, pidfile)

                pid = p.kill(fullpidfile, signal, sudo)

                if pid:
                    print 'killed process "%d" from pidfile "%s"' \
                        % (pid, fullpidfile)
コード例 #3
0
def startfield(args):
    this_hostname = Platform().hostname()

    plandoc = LXCPlanFileDoc(args.lxcplanfile)

    config = ConfigDictionary()

    workdir = config.get('etce', 'WORK_DIRECTORY')

    if not os.path.exists(workdir):
        raise LXCError('ETCE WORK_DIRECTORY "%s" not found. ' \
                       'Please create it before starting.' % workdir)

    # lockfile
    lockfilename = \
        os.path.join(plandoc.lxc_root_directory(this_hostname),
                     'etce.lxc.lock')

    if os.path.isfile(lockfilename):
        err = 'Detected an active lxc field with root at: %s. ' \
              'Run "etce-lxc stop" first.' % \
              plandoc.lxc_root_directory(this_hostname)
        raise LXCError(err)

    startlxcs(plandoc, args.writehosts, args.forcelxcroot, args.dryrun)

    if not args.dryrun:
        shutil.copy(args.lxcplanfile, lockfilename)

    other_hosts = set(plandoc.hostnames()).difference(
        ['localhost', this_hostname])

    # start containers on other hosts, if any
    if other_hosts:
        client = None
        try:
            client = ClientBuilder().build(\
                        other_hosts,
                        user=args.user,
                        port=args.port)

            # push the file and execute
            client.put(args.lxcplanfile, '.', other_hosts, doclobber=True)

            # on the destination node the netplan file gets pushed to the
            # ETCE WORK_DIRECTORY
            command = 'lxcmanager startlxcs %s writehosts=%s forcelxcroot=%s' \
                      % (os.path.basename(args.lxcplanfile),
                         args.writehosts,
                         args.forcelxcroot)

            ret = client.execute(command, other_hosts)

            for k in ret:
                print '[%s] return: %s' % (k, ret[k].retval['result'])

        finally:
            if client:
                client.close()
コード例 #4
0
    def __init__(self, wrappername, wrapperinstance, trialargs, testargs,
                 config, testdir):
        self._trialargs = trialargs
        self._testargs = testargs
        self._config = config
        self._testdir = testdir
        self._platform = Platform()
        self._wrappername = wrappername
        self._sudo = False
        self._default_pidfilename = \
            '%s/etce.%s.%s.pid' \
            % (os.path.join(self._config.get('etce', 'WORK_DIRECTORY'), 'lock'),
               self.platform.hostname(),
               self._wrappername)

        self._description = wrapperinstance.__doc__

        # start with reserved args set here ...
        self._args = {
            'default_pidfilename': self._default_pidfilename,
            'nodename': self._testdir.nodename(),
            'nodeid': self._testdir.nodeid(),
            'testname': self._testdir.name(),
            'wrappername': self._wrappername,
            'infile': None,
            'outfile': None
        }

        # ... and the ones passed in
        self._args.update(trialargs)

        # these are the reserved args that cannot be overwritten
        self._reserved_args = set(self._args)

        # fill in the arguments registered by the wrapper
        wrapperinstance.register(self)

        storefile = os.path.join(self._trialargs['logdirectory'], 'etce.store')

        self._wrapperstore = WrapperStore(storefile)

        self._wrapperstore.update(
            {'etce': {
                'starttime': self._trialargs['starttime']
            }}, self._args['nodename'])
コード例 #5
0
    def kill(self):
        p = Platform()

        my_pidfile_prefix = 'etce.%s' % p.hostname()

        lockfiledir = os.path.join(
            ConfigDictionary().get('etce', 'WORK_DIRECTORY'), 'lock')

        if not os.path.isdir(lockfiledir):
            return

        for pidfile in os.listdir(lockfiledir):
            if pidfile.startswith(my_pidfile_prefix):
                fullpidfile = os.path.join(lockfiledir, pidfile)

                pid = p.kill(fullpidfile)

                if pid:
                    print 'killed process "%d" from pidfile "%s"' \
                        % (pid, fullpidfile)
コード例 #6
0
ファイル: executer.py プロジェクト: fortian/python-etce
    def step(self, stepname, starttime, logsubdirectory):
        wrappers = self._stepsfiledoc.getwrappers(stepname)

        hostname = Platform().hostname()

        hostdir = os.path.join(self._test.location(), hostname)

        if not os.path.exists(hostdir):
            return

        logdirectory = os.path.join(self._config.get('etce', 'WORK_DIRECTORY'),
                                    logsubdirectory, hostname)

        if not os.path.exists(logdirectory):
            os.makedirs(logdirectory)

        if wrappers:
            trialargs = {
                'logdirectory': logdirectory,
                'starttime': starttime,
                'stepname': stepname,
                'dryrun': False
            }

            wldr = WrapperLoader()

            for wrapperentry, methodname, testargs in wrappers:
                wrapperinstance = \
                    wldr.loadwrapper(wrapperentry.name,
                                     self._stepsfiledoc.getpackageprefixes())

                # ensure each wrapper is called with the testdirectory as
                # the current working directory, and with it's own
                # instance of the wrapper context
                os.chdir(hostdir)

                ctx = WrapperContext(
                    WrapperContextImpl(wrapperentry.name, wrapperinstance,
                                       trialargs, testargs, self._config,
                                       self._test))

                if methodname == 'run':
                    # run calls prerun, run, postrun to encourage
                    #   pre/post condition checks
                    wrapperinstance.prerun(ctx)
                    wrapperinstance.run(ctx)
                    wrapperinstance.postrun(ctx)
                else:
                    wrapperinstance.stop(ctx)
コード例 #7
0
def stopfield(args):
    workdir = ConfigDictionary().get('etce', 'WORK_DIRECTORY')

    lockfilename = os.path.join(workdir, 'lxcroot', 'etce.lxc.lock')

    if not os.path.exists(lockfilename) or not os.path.isfile(lockfilename):
        raise LXCError('Lockfile "%s" not found. Quitting.' % lockfilename)

    plandoc = LXCPlanFileDoc(lockfilename)

    other_hosts = set(plandoc.hostnames()).difference(
        ['localhost', Platform().hostname()])

    # stop containers on other hosts, if any
    try:
        if other_hosts:
            client = None
            try:
                client = ClientBuilder().build(other_hosts,
                                               user=args.user,
                                               port=args.port)

                # push the file and execute
                client.put(lockfilename, '.', other_hosts, doclobber=True)

                # on the destination node the netplan file gets pushed to the
                # ETCE WORK_DIRECTORY
                command = 'lxcmanager stoplxcs %s' % os.path.basename(
                    lockfilename)

                ret = client.execute(command, other_hosts)

                for k in ret:
                    print('[%s] return: %s' % (k, ret[k].retval['result']))

            finally:
                if client:
                    client.close()
    finally:
        stoplxcs(plandoc)
コード例 #8
0
ファイル: testdirectory.py プロジェクト: fortian/python-etce
class TestDirectory(object):
    TESTFILENAME = 'test.xml'
    STEPSFILENAME = 'steps.xml'
    CONFIGFILENAME = 'config.xml'
    HOSTFILENAME = 'nodefile.txt'
    DOCSUBDIRNAME = 'doc'

    def __init__(self, rootdir, basedir_override):

        self._rootdir = rootdir

        self._platform = Platform()

        self._testdoc = TestFileDoc(
            os.path.join(self._rootdir, TestDirectory.TESTFILENAME))

        self._merged = not self._testdoc.has_base_directory

        self._basedir = self._testdoc.base_directory

        if not basedir_override is None:
            self._basedir = basedir_override

        self._configfile = ConfigFileDoc(
            os.path.join(self._rootdir, TestDirectory.CONFIGFILENAME))

        # add the hostfile to the test directory
        # before copying it to hostfile's root nodes
        hostfile = os.path.join(self._rootdir, TestDirectory.HOSTFILENAME)

        self._verified_nodes = []
        if os.path.exists(hostfile) or os.path.isfile(hostfile):
            self._verified_nodes = self._verify_nodes_in_hostfile(hostfile)

    def hasconfig(self, wrappername, argname):
        return self._configfile.hasconfig(wrappername, argname)

    def getconfig(self, wrappername, argname, default):
        return self._configfile.getconfig(wrappername, argname, default)

    def location(self):
        return self._rootdir

    def info(self):
        return {'name': self.name(), 'description': self.description()}

    def name(self):
        return self._testdoc.name

    def tags(self):
        return self._testdoc.tags

    def description(self):
        return self._testdoc.description

    def overlay_names(self):
        return self._find_overlay_names()

    def stepsfile(self):
        return TestDirectory.STEPSFILENAME

    def __str__(self):
        info = self.info()
        s = '-' * len(info['name']) + '\n'
        s += info['name'] + '\n'
        s += '-' * len(info['name']) + '\n'
        s += 'location:\n\t%s\n' % self._rootdir
        s += 'description:\n\t%s\n' % info['description']
        s += 'overlays:\n'
        for p in self.overlay_names():
            s += '\t%s\n' % p
        return s

    def determine_nodenames(self):
        # Determine the nodenames defined by the test files and templates:
        #
        # 1. read the base directory and test directory and take any
        #    subdirectory that does not end with .TEMPLATE_DIRECTORY_SUFFIX to
        #    be a nodename
        #
        # 2. add all of the directory names that will be generated
        #    by template directories
        #
        # 3. remove the doc subdirectory (the doc subdirectory is ignored,
        #                                 a place for additional test
        #                                 documentation).
        #
        template_suffix = ConfigDictionary().get('etce',
                                                 'TEMPLATE_DIRECTORY_SUFFIX')

        hostnames = set([])

        # if this is already a merged test directory, ignore base directory
        # search
        if not self._merged:
            for entry in os.listdir(
                    os.path.join(self.location(), self._basedir)):
                abs_entry = os.path.join(self.location(), self._basedir, entry)

                if os.path.isdir(abs_entry):
                    if entry.split('.')[-1] == template_suffix:
                        continue
                    hostnames.update([entry])

        for entry in os.listdir(self.location()):
            abs_entry = os.path.join(self.location(), entry)

            if os.path.isdir(abs_entry):
                if entry.split('.')[-1] == template_suffix:
                    continue
                hostnames.update([entry])

        formatted_dirnames = self._testdoc.formatted_directory_names

        hostnames.update(formatted_dirnames)

        # and the doc directory
        hostnames.difference_update([TestDirectory.DOCSUBDIRNAME])

        return list(hostnames)

    def _verify_nodes_in_hostfile(self, hostfile):
        field = Field(hostfile)

        hostnames = field.leaves()

        nodenames = self.determine_nodenames()

        for nodename in nodenames:
            if not nodename in hostnames:
                errstr = 'Hostname "%s" required by test, but not ' \
                         'found in nodefile "%s". Quitting.' \
                         % (nodename, nodefile)
                raise TestDirectoryError(errstr)

        return nodenames

    def nodename_from_hostname(self, hostname):
        if hostname in self._verified_nodes:
            return hostname

        samehosts = self._find_this_host_names(self._verified_nodes)

        if len(samehosts) == 1:
            return samehosts[0]

        return None

    def nodename(self):
        return self.nodename_from_hostname(self._platform.hostname())

    def nodeid(self):
        nodename = self.nodename()

        if not nodename:
            return None

        regex = re.compile(r'(\d+)')

        match = regex.search(nodename)

        if match:
            return int(match.group(1))
        return None

    def getfile(self, name):
        for entry in os.listdir(os.path.join(self._rootdir, self.nodename())):
            if entry == name:
                if os.path.isfile(entry):
                    return os.path.join(self._rootdir, self.nodename(), entry)

        return None

    def _find_this_host_names(self, namelist):
        ''' Determine which names in namelist map to an 
            ip address on this host.
        '''
        return [
            other for other in namelist
            if self._platform.hostname_has_local_address(other)
        ]

    def _find_overlay_names(self):
        overlays = set([])

        search_dirs = [self._rootdir]

        if not self._merged:
            # push the basedirectory if this directory is not already merged
            search_dirs.insert(0, os.path.join(self.location(), self._basedir))

        for search_dir in search_dirs:
            for dirname, dirnames, filenames in os.walk(search_dir):
                if TestDirectory.DOCSUBDIRNAME in dirname.split('/'):
                    # ignore doc sub directory
                    continue
                for filename in filenames:
                    overlays.update(
                        get_file_overlays(os.path.join(dirname, filename)))

        return tuple(sorted(overlays))
コード例 #9
0
ファイル: lxcmanager.py プロジェクト: prj8121/python-etce
 def __init__(self):
     self._platform = Platform()
コード例 #10
0
ファイル: lxcmanager.py プロジェクト: prj8121/python-etce
class LXCManagerImpl(object):
    def __init__(self):
        self._platform = Platform()


    def start(self, plandoc, writehosts, dryrun=False):
        hostname = socket.gethostname().split('.')[0]
        lxcrootdir = plandoc.lxc_root_directory(hostname)
        containers = plandoc.containers(hostname)

        if not containers:
            print('No containers assigned to "%s". Skipping.' % hostname)
            return
        
        if not lxcrootdir[0] == '/':
            print('root_directory "%s" for hostname "%s" is not an absolute path. ' \
                  'Quitting.' % \
                  (lxcrootdir, hostname))
            return

        directory_level = len(lxcrootdir.split('/')) - 1
        if not directory_level >= 3:
            print('root_directory "%s" for hostname "%s" is less than 3 levels deep. ' \
                  'Quitting.' % \
                  (lxcrootdir, hostname))
            return

        allowed_roots = ('tmp', 'opt', 'home', 'var', 'mnt')
        if not lxcrootdir.split('/')[1] in allowed_roots:
            print('root_directory "%s" for hostname "%s" is not located in one of {%s} ' \
                  'directory trees. Quitting.' % \
                  (lxcrootdir, hostname, ', '.join(allowed_roots)))
            return

        if lxcrootdir is None or len(containers) == 0:
            print('No containers assigned to host %s. Quitting.' % hostname)
            return

        # delete and remake the node root
        if os.path.exists(lxcrootdir):
            print('Removing contents of "%s" directory.' % lxcrootdir)

            for subentry in os.listdir(lxcrootdir):
                entry = os.path.join(lxcrootdir, subentry)
                if os.path.isfile(entry):
                    os.remove(entry)
                elif os.path.isdir(entry):
                    shutil.rmtree(entry)
        else:
            os.makedirs(lxcrootdir)

        # set kernelparameters
        kernelparameters = plandoc.kernelparameters(hostname)
        if len(kernelparameters) > 0:
            print('Setting kernel parameters:')

            for kernelparamname,kernelparamval in kernelparameters.items():
                os.system('sysctl %s=%s' % (kernelparamname,kernelparamval))

        # bring up bridge
        if not dryrun:
            for _,bridge in plandoc.bridges(hostname).items():
                if not bridge.persistent:
                    print('Bringing up bridge: %s' % bridge.devicename)

                    self._platform.bridgeup(bridge.devicename,
                                            bridge.addifs,
                                            enablemulticastsnooping=True)

                    if not bridge.ipv4 is None:
                        self._platform.adddeviceaddress(bridge.devicename,
                                                        bridge.ipv4)

                    if not bridge.ipv6 is None:
                        self._platform.adddeviceaddress(bridge.devicename,
                                                        bridge.ipv6)


                    time.sleep(0.1)
                        
                elif not self._platform.isdeviceup(bridge.devicename):
                    raise RuntimeError('Bridge %s marked persistent is not up. Quitting.')

        # write hosts file
        if not dryrun:
            if writehosts:
                self._writehosts(containers)

        # create container files
        for container in containers:
            lxc_directory = container.lxc_directory

            self._makedirs(lxc_directory)

            # make the config
            with open(os.path.join(lxc_directory, 'config'), 'w') as configf:
                configf.write(str(container))

            # make init script
            filename,initscripttext = container.initscript

            if initscripttext:
                scriptfile = os.path.join(lxc_directory, filename)

                with open(scriptfile, 'w') as sf:
                    sf.write(initscripttext)

                    os.chmod(scriptfile, 
                             stat.S_IRWXU | stat.S_IRGRP | \
                             stat.S_IXGRP | stat.S_IROTH | \
                             stat.S_IXOTH)

        if dryrun:
            print('dryrun')
        else:
            self._startnodes(containers)


    def stop(self, plandoc):
        hostname = self._platform.hostname()

        noderoot = plandoc.lxc_root_directory(hostname)

        for container in plandoc.containers(hostname):
            command = 'lxc-stop -n %s -k &> /dev/null' % container.lxc_name
            print(command)
            os.system(command)

        for _,bridge in plandoc.bridges(hostname).items():
            if not bridge.persistent:
                print('Bringing down bridge: %s' % bridge.devicename)
                self._platform.bridgedown(bridge.devicename)

        os.remove(plandoc.planfile())


    def _makedirs(self, noderoot):
        os.makedirs(noderoot)

        vardir = os.path.join(noderoot, 'var')
        os.makedirs(vardir)
        os.makedirs(os.path.join(vardir, 'run'))
        os.makedirs(os.path.join(vardir, 'log'))
        os.makedirs(os.path.join(vardir, 'lib'))

        mntdir = os.path.join(noderoot, 'mnt')
        os.makedirs(mntdir)


    def _startnodes(self, containers):
        for container in containers:
            command = 'lxc-execute -f %s/config  ' \
                      '-n %s '                     \
                      '-o %s/log '                 \
                      '-- %s/init.sh '             \
                      '2> /dev/null &' %             \
                      (container.lxc_directory,
                       container.lxc_name,
                       container.lxc_directory,
                       container.lxc_directory)

            pid,sp = etce.utils.daemonize_command(command)

            if pid == 0:
                # child
                sp.wait()
                sys.exit(0)

            time.sleep(0.1)
            

    def _waitstart(self, nodecount, lxcroot):
        numstarted = 0
        for i in range(10):
            command = 'lxc-ls -1 --active'
            numstarted = len(self._platform.runcommand(command))
            print('Waiting for lxc containers: %d of %d are running.' % \
                  (numstarted, nodecount))

            if numstarted == nodecount:
                break

            time.sleep(1)

        print('Continuing with %d of %d running lxc containers.' % \
              (numstarted, nodecount))

        
    def _writehosts(self, containers):
        opentag = '#### Start auto-generated ETCE control mappings\n'
        closetag = '#### Stop auto-generated ETCE control mappings\n'
        etcehostlines = []
        searchstate = 0
        for line in open('/etc/hosts', 'r'):
            if searchstate == 0:
                if line.startswith(opentag):
                    searchstate = 1
                else:
                    etcehostlines.append(line)   
            elif searchstate == 1:
                if line.startswith(closetag):
                    searchstate == 2
            else:
                etcehostlines.append(line)

        # strip off trailing white spaces
        etcehostlines.reverse()
        for i,line in enumerate(etcehostlines):
            if len(line.strip()) > 0:
                etcehostlines = etcehostlines[i:]
                break
        etcehostlines.reverse()

        with open('/etc/hosts', 'w') as ofd:
            for line in etcehostlines:
                ofd.write(line)

            ofd.write('\n')
            ofd.write(opentag)
            
            # ipv4
            ipv4_entries = []
            for container in containers:
                for hostentry,hostaddr in container.hosts_entries_ipv4:
                    ipv4_entries.append((hostentry,hostaddr))
            for hostentry,hostaddr in sorted(ipv4_entries):
                ofd.write('%s %s\n' % (hostaddr,hostentry))

            #ipv6 = []
            ipv6_entries = []
            for container in containers:
                for hostentry,hostaddr in container.hosts_entries_ipv6:
                    ipv6_entries.append((hostaddr,hostentry))
            for hostentry,hostaddr in sorted(ipv6_entries):
                ofd.write('%s %s\n' % (hostaddr,hostentry))

            ofd.write(closetag)
コード例 #11
0
ファイル: sshclient.py プロジェクト: fenwick0neil/python-etce
 def sourceisdestination(self, host, srcfilename, dstfilename):
     if srcfilename == dstfilename:
         p = Platform()        
         if p.hostname_has_local_address(host):
             return True
     return False
コード例 #12
0
def startfield(args):
    this_hostname = Platform().hostname()

    config = ConfigDictionary()

    workdir = config.get('etce', 'WORK_DIRECTORY')

    workdir = os.getenv('WORKDIR', workdir)

    if not os.path.exists(workdir):
        raise DOCKERError('ETCE WORK_DIRECTORY "%s" not found. ' \
                       'Please create it before starting.' % workdir)

    if args.dockerplanfile:
        dockerplanfile = args.dockerplanfile
    else:
        dockerplanfile = os.path.join(workdir, 'dockerplan.xml')

    plandoc = DOCKERPlanFileDoc(dockerplanfile)

    # lockfile
    lockfilename = \
        os.path.join(workdir, 'etce.docker.lock')

    if os.path.isfile(lockfilename):
        err = 'Detected an active docker field with root at: %s. ' \
              'Run "etce-docker stop" first.' % \
              plandoc.docker_root_directory(this_hostname)
        raise DOCKERError(err)

    cidr = os.getenv('CIDR', '10.99.0.0/16')
    containers = []
    for hostname, _ in plandoc.hostnames():
        for container in plandoc.containers(hostname):
            for bridgename, interfaceparams in container.interfaces.items():
                if IPAddress(interfaceparams['ipv4']) in IPNetwork(cidr):
                    containers.append(
                        (container.docker_name, interfaceparams['ipv4']))
                    break

        ipexist = []
    for _, ip in containers:
        ipexist.append(ip)
    my_ip = ''
    for ip in IPNetwork(cidr)[1:]:
        if not str(ip) in ipexist:
            my_ip = str(ip)
            break

    my_ip = my_ip + '/' + cidr.split('/')[1]

    # write to /etc/hosts in container/machine controller all external ip
    writehosts(plandoc, containers)

    hostfile = \
        os.path.join(workdir, 'hosts')

    if not args.dryrun:
        shutil.copy(dockerplanfile, lockfilename)
        shutil.copy('/etc/hosts', hostfile)

    startdockers(plandoc, args.writehosts, args.forcedockerroot, args.dryrun)

    other_hosts = []

    for hostname, ip in plandoc.hostnames():
        if hostname != (this_hostname and 'localhost'):
            other_hosts.append(hostname)

    # start containers on other hosts, if any
    if other_hosts:
        client = None
        try:
            client = ClientBuilder().build(\
                        other_hosts,
                        user=args.user,
                        port=args.port,
                        password=args.password)

            # push the file and execute
            client.put(dockerplanfile, '.', other_hosts, doclobber=True)

            # push the file
            client.put('/etc/hosts', '.', other_hosts, doclobber=True)

            # on the destination node the netplan file gets pushed to the
            # ETCE WORK_DIRECTORY
            command = 'dockermanager startdockers %s writehosts=%s forcedockerroot=%s' \
                      % (os.path.basename(dockerplanfile),
                         args.writehosts,
                         args.forcedockerroot)

            ret = client.execute(command, other_hosts)

            for k in ret:
                print '[%s] return: %s' % (k, ret[k].retval['result'])

        finally:
            if client:
                client.close()

                # A valid ETCE Test Directory.
                TESTDIRECTORY = os.path.join(workdir, 'pub-tdmact')

                # The output directory to place the built Test Directory.
                TESTROOT = os.path.join(
                    workdir, TESTDIRECTORY + '_' + etce.utils.timestamp())

                os.system('etce-test publish %s %s --verbose' %
                          (TESTDIRECTORY, TESTROOT))

                # A user tag to prepend to the name of each test result directory.
                TESTPREFIX = 'tdmact'
                # Run scenario order steps
                #if not args.collect:
                os.system(
                    'etce-test run --user root --policy autoadd -v --kill before --nocollect %s %s %s'
                    % (TESTPREFIX, HOSTFILE, TESTROOT))
コード例 #13
0
 def __init__(self):
     # check root
     #if not os.geteuid() == 0:
     #    raise RuntimeError('You need to be root to perform this command.')
     self._platform = Platform()
コード例 #14
0
class DOCKERManagerImpl(object):
    def __init__(self):
        # check root
        #if not os.geteuid() == 0:
        #    raise RuntimeError('You need to be root to perform this command.')
        self._platform = Platform()


    def start(self, plandoc, writehosts, forcedockerroot=False, dryrun=False):
        hostname = socket.gethostname().split('.')[0].lower()
        dockerrootdir = plandoc.docker_root_directory(hostname)
        containers = plandoc.containers(hostname)

        if not containers:
            print 'No containers assigned to "%s". Skipping.' % hostname
            return
        
        if not dockerrootdir[0] == '/':
            print 'root_directory "%s" for hostname "%s" is not an absolute path. Quitting.' % \
                (dockerrootdir, hostname)
            return

        directory_level = len(dockerrootdir.split('/')) - 1
        if not directory_level >= 3:
            print 'root_directory "%s" for hostname "%s" is less than 3 levels deep. Quitting.' % \
                (dockerrootdir, hostname)
            return

        allowed_roots = ('tmp', 'opt', 'home', 'var', 'mnt')
        if not dockerrootdir.split('/')[1] in allowed_roots:
            print 'root_directory "%s" for hostname "%s" is not located in one of {%s} ' \
                'directory trees. Quitting.' % \
                (dockerrootdir, hostname, ', '.join(allowed_roots))
            return

        if dockerrootdir is None or len(containers) == 0:
            print 'No containers assigned to host %s. Quitting.' % hostname
            return

        # delete and remake the node root
        if os.path.exists(dockerrootdir):
            if forcedockerroot:
                print 'Force removal of "%s" docker root directory.' \
                    % dockerrootdir
                shutil.rmtree(dockerrootdir)
            else:
                raise DOCKERError('%s docker root directory already exists, Quitting.' % dockerrootdir)

        os.makedirs(dockerrootdir)

        # set kernelparameters
        kernelparameters = plandoc.kernelparameters(hostname)
        if len(kernelparameters) > 0:
            print 'Setting kernel parameters:'

            for kernelparamname,kernelparamval in kernelparameters.items():
                os.system('sysctl %s=%s' % (kernelparamname,kernelparamval))

        #vxlan tunnel
        if not dryrun:
            for _,vxlantunnel in plandoc.vxlantunnels(hostname).items():
                if not self._platform.isdeviceup('vxlan1'):
                    self._platform.runcommand('ip link add %s ' \
                                              'type vxlan id %s ' \
                                              'group 239.1.1.1 ' \
                                              'dev %s' % \
                                              (vxlantunnel.name,
                                               vxlantunnel.id,
                                               vxlantunnel.device))
                    self._platform.networkinterfaceup(vxlantunnel.name)

        # bring up bridge
        if not dryrun:
            for _,bridge in plandoc.bridges(hostname).items():
                if not bridge.persistent:
                    print 'Bringing up bridge: %s' % bridge.devicename

                    self._platform.dockerbridgeup(bridge.devicename,
                                                  bridge.subnet,
                                                  bridge.iprange,
                                                  bridge.gateway,
                                                  bridge.mtu,
                                                  bridge.addifs,
                                                  enablemulticastsnooping=True)
                    '''
                    if not bridge.ipv4 is None:
                        self._platform.adddeviceaddress(bridge.devicename,
                                                        bridge.ipv4)

                    if not bridge.ipv6 is None:
                        self._platform.adddeviceaddress(bridge.devicename,
                                                        bridge.ipv6)
                    '''

                    time.sleep(0.1)
                        
                elif not self._platform.isdeviceup(bridge.devicename):
                    raise RuntimeError('Bridge %s marked persistent is not up. Quitting.')

        # write hosts file
        if not dryrun:
            if writehosts:
                self._writehosts(containers)

        # create container files
        for container in containers:
            docker_directory = container.docker_directory

            self._makedirs(docker_directory)

            # make the config
            with open(os.path.join(docker_directory, 'config'), 'w') as configf:
                configf.write(str(container))

            # make init script
            filename,initscripttext = container.initscript

            if initscripttext:
                scriptfile = os.path.join(docker_directory, filename)

                with open(scriptfile, 'w') as sf:
                    sf.write(initscripttext)

                    os.chmod(scriptfile, 
                             stat.S_IRWXU | stat.S_IRGRP | \
                             stat.S_IXGRP | stat.S_IROTH | \
                             stat.S_IXOTH)

        if dryrun:
            print 'dryrun'
        else:
            self._startnodes(containers)


    def stop(self, plandoc):
        hostname = self._platform.hostname()

        noderoot = plandoc.docker_root_directory(hostname)

        for _, vxlantunnel in plandoc.vxlantunnels(hostname).items():
            if vxlantunnel.name in self._platform.getnetworkdevicenames():
                self._platform.networkinterfacedown(vxlantunnel.name)
                self._platform.networkinterfaceremove(vxlantunnel.name)

        for _,bridge in plandoc.bridges(hostname).items():

            if not bridge.persistent:
                print 'Bringing down bridge: %s' % bridge.devicename
                self._platform.dockerbridgedown(bridge.devicename)

        for container in plandoc.containers(hostname):
            command = 'docker rm -f %s &> /dev/null' % container.docker_name
            print command
            os.system(command)

        #os.remove(plandoc.planfile())


    def _makedirs(self, noderoot):
        os.makedirs(noderoot)


    def _startnodes(self, containers):
        for container in containers:
            image = ''
            params = ''
            for name,value in container.params:
                if name == 'image':
                    image = value
                else:
                    if '=' in name:
                        params += name + value + ' '
                    #else:
                    #    params += name + ' ' + value + ' '
            if image == '':
                raise RuntimeError('Image not defined. Quitting.')
            command = 'docker run ' \
                      '--detach ' \
                      '--tty ' \
                      '--name=%s ' \
                      '--hostname=%s ' \
                      '--cap-add=ALL ' \
                      '--privileged=true ' \
                      '--network=none ' \
                      '--volume %s ' \
                      '%s ' \
                      '%s > /dev/null' % \
                      (container.docker_name,
                       container.docker_name,
                       container.docker_directory,
                       params,
                       image)
            os.system(command)
            print command

            command = 'docker network disconnect -f none %s > /dev/null' % container.docker_name
            os.system(command)

            i = 0
            for bridgename, interfaceparams in container.interfaces.items():

                command = 'docker network connect --ip %s %s %s > /dev/null' % \
                           (interfaceparams['ipv4'], bridgename, container.docker_name)
                os.system(command)
                time.sleep(1)
                command = 'docker exec -t %s bash -c "ethtool -K eth%d tx off" > /dev/null' % \
                          (container.docker_name, i)
                os.system(command)
                i += 1

            

    def _waitstart(self, nodecount, dockerroot):
        numstarted = 0
        for i in range(10):
            command = 'docker ps --format "{{.Names}}" '
            numstarted = len(self._platform.runcommand(command))
            print 'Waiting for docker containers: %d of %d are running.' % \
                (numstarted, nodecount)

            if numstarted == nodecount:
                break

            time.sleep(1)

        print 'Continuing with %d of %d running docker containers.' % \
            (numstarted, nodecount)

        
    def _writehosts(self, containers):
        opentag = '#### Start auto-generated ETCE control mappings\n'
        closetag = '#### Stop auto-generated ETCE control mappings\n'
        etcehostlines = []
        searchstate = 0
        for line in open('/etc/hosts', 'r'):
            if searchstate == 0:
                if line.startswith(opentag):
                    searchstate = 1
                else:
                    etcehostlines.append(line)   
            elif searchstate == 1:
                if line.startswith(closetag):
                    searchstate == 2
            else:
                etcehostlines.append(line)

        # strip off trailing white spaces
        etcehostlines.reverse()
        for i,line in enumerate(etcehostlines):
            if len(line.strip()) > 0:
                etcehostlines = etcehostlines[i:]
                break
        etcehostlines.reverse()

        with open('/etc/hosts', 'w') as ofd:
            for line in etcehostlines:
                ofd.write(line)

            ofd.write('\n')
            ofd.write(opentag)
            
            # ipv4
            ipv4_entries = []
            for container in containers:
                for hostentry,hostaddr in container.hosts_entries_ipv4:
                    ipv4_entries.append((hostentry,hostaddr))
            for hostentry,hostaddr in sorted(ipv4_entries):
                ofd.write('%s %s\n' % (hostaddr,hostentry))

            #ipv6 = []
            ipv6_entries = []
            for container in containers:
                for hostentry,hostaddr in container.hosts_entries_ipv6:
                    ipv6_entries.append((hostaddr,hostentry))
            for hostentry,hostaddr in sorted(ipv6_entries):
                ofd.write('%s %s\n' % (hostaddr,hostentry))

            ofd.write(closetag)
コード例 #15
0
class WrapperContextImpl(ArgRegistrar):
    ''' WrapperContextImpl implements the WrapperContext interface.'''
    def __init__(self, wrappername, wrapperinstance, trialargs, testargs,
                 config, testdir):
        self._trialargs = trialargs
        self._testargs = testargs
        self._config = config
        self._testdir = testdir
        self._platform = Platform()
        self._wrappername = wrappername
        self._sudo = False
        self._default_pidfilename = '%s/etce.%s.%s.pid' \
                                    % (os.path.join(self._config.get('etce', 'WORK_DIRECTORY'), 'lock'),
                                       self.platform.hostname(),
                                       self._wrappername)

        self._description = wrapperinstance.__doc__

        # start with reserved args set here ...
        self._args = {
            'default_pidfilename': self._default_pidfilename,
            'nodename': self._testdir.nodename(),
            'nodeid': self._testdir.nodeid(),
            'testname': self._testdir.name(),
            'wrappername': self._wrappername,
            'infile': None,
            'outfile': None
        }

        # ... and the ones passed in
        self._args.update(trialargs)

        # these are the reserved args that cannot be overwritten
        self._reserved_args = set(self._args)

        # fill in the arguments registered by the wrapper
        wrapperinstance.register(self)

        storefile = os.path.join(self._trialargs['logdirectory'], 'etce.store')

        self._wrapperstore = WrapperStore(storefile)

        self._wrapperstore.update(
            {'etce': {
                'starttime': self._trialargs['starttime']
            }}, self._args['nodename'])

    def register_argument(self, argname, defaultval, description):
        if argname in self._reserved_args:
            raise ValueError('Wrapper "%s" attempting to register a ' \
                             'reserved argument "%s". Quitting.' % \
                             (self._args['wrappername'],
                              argname))

        if self._testdir.hasconfig(self._wrappername, argname):
            self._args[argname] = \
                self._testdir.getconfig(self._wrappername, argname, defaultval)
        elif argname in self._testargs:
            self._args[argname] = self._testargs[argname]
        else:
            self._args[argname] = defaultval

    def register_infile_name(self, name):
        self._args['infile'] = self._testdir.getfile(name)

    def register_outfile_name(self, name):
        self._args['outfile'] = os.path.join(self._trialargs['logdirectory'],
                                             name)

    def run_with_sudo(self):
        # ignore run with sudo requests when configured to do so
        if self._config.get('etce', 'IGNORE_RUN_WITH_SUDO').lower() == 'yes':
            return
        self._sudo = True

    def store(self, namevaldict):
        self._wrapperstore.update({self._args['wrappername']: namevaldict},
                                  self._args['nodename'])

    @property
    def platform(self):
        return self._platform

    @property
    def args(self):
        return ArgProxy(self._args)

    def daemonize(self,
                  command,
                  argstr,
                  stdout=None,
                  stderr=None,
                  pidfilename=None,
                  genpidfile=True,
                  pidincrement=0,
                  starttime=None,
                  extra_paths=[]):

        commandstr = self._build_commandstr(command, argstr, extra_paths)

        # print the commandstr and return on a dryrun
        if self._trialargs['dryrun']:
            print(commandstr)
            sys.stdout.flush()
            return

        # 1. call self.stop(pidfilename)
        self.stop(pidfilename)

        # run the command
        pid, subprocess = etce.utils.daemonize_command(commandstr, stdout,
                                                       stderr, starttime)

        # return on parent
        if pid > 0:
            return

        # 2. if genpidfile is True, and pidfilename is None,
        #    generate the pidfilename
        if genpidfile and pidfilename is None:
            pidfilename = self._default_pidfilename

        # 3. write the pid to pidfilename
        if genpidfile:
            with open(pidfilename, 'w') as pidfile:
                pidfile.write(str(subprocess.pid + pidincrement))

        # 4. wait on subprocess
        subprocess.wait()

        # 5. exit, do not return, because returning
        #    will cause any subsequent wrappers in this
        #    step to be rerun
        sys.exit(0)

    def run(self,
            command,
            argstr,
            stdout=None,
            stderr=None,
            pidfilename=None,
            genpidfile=True,
            pidincrement=0,
            extra_paths=[]):

        commandstr = self._build_commandstr(command, argstr, extra_paths)

        # print the commandstr and return on a dryrun
        if self._trialargs['dryrun']:
            print(commandstr)
            sys.stdout.flush()
            return

        self.stop(pidfilename)

        print(commandstr)
        sys.stdout.flush()

        stdoutfd = None
        stderrfd = None
        if not stdout is None:
            stdoutfd = open(stdout, 'w')

        if not stderr is None:
            if stdout == stderr:
                stderrfd = stdoutfd
            else:
                stderrfd = open(stderr, 'w')

        #    generate the pidfilename
        if genpidfile and pidfilename is None:
            pidfilename = self._default_pidfilename

        # create the Popen subprocess
        sp = subprocess.Popen(shlex.split(commandstr),
                              stdout=stdoutfd,
                              stderr=stderrfd)

        # write the pid to pidfilename
        if genpidfile:
            with open(pidfilename, 'w') as pidfile:
                pidfile.write(str(sp.pid + pidincrement))

        # wait on subprocess
        sp.wait()

    def stop(self, pidfilename=None, signal=signal.SIGQUIT, sudo=True):
        # use default pidfilename if None specified
        if pidfilename is None:
            pidfilename = self._default_pidfilename

        # if found a pid, kill the process and remove the file
        self._platform.kill(pidfilename, signal, sudo)

    def _build_commandstr(self, command, argstr, extra_paths):
        all_paths = os.environ['PATH'].split(':') + list(extra_paths)

        existing_paths = filter(os.path.isdir, all_paths)

        found_paths = []
        for existing_path in existing_paths:
            if command in os.listdir(existing_path):
                found_paths.append(existing_path)

        if not found_paths:
            raise WrapperError('Cannot find command "%s" in system paths {%s}. Quitting.' \
                               % (command, ','.join(all_paths)))

        commandstr = ' '.join([os.path.join(found_paths[0], command), argstr])

        # run with sudo if wrapper requested it
        if self._sudo:
            commandstr = 'sudo ' + commandstr

        return commandstr
コード例 #16
0
def stopfield(args):
    workdir = ConfigDictionary().get('etce', 'WORK_DIRECTORY')

    workdir = os.getenv('WORKDIR', workdir)

    lockfilename = os.path.join(workdir, 'etce.docker.lock')

    if not os.path.exists(lockfilename) or not os.path.isfile(lockfilename):
        raise DOCKERError('Lockfile "%s" not found. Quitting.' % lockfilename)

    if args.dockerplanfile:
        dockerplanfile = args.dockerplanfile
    else:
        dockerplanfile = os.path.join(workdir, 'dockerplan.xml')

    plandoc = DOCKERPlanFileDoc(dockerplanfile)

    this_hostname = Platform().hostname()

    other_hosts = []

    for hostname, ip in plandoc.hostnames():
        if hostname != (this_hostname and 'localhost'):
            other_hosts.append(hostname)

    # stop containers on other hosts, if any
    try:
        if other_hosts:
            if args.collect:
                client_nodes = None
                try:
                    print
                    'Collecting results.'

                    time = 'collect_on_%s' % etce.timeutils.getstrtimenow(
                    ).split('.')[0]

                    localtestresultsdir = os.path.join(workdir, 'data', time)

                    field = Field(os.path.join(workdir, 'HOSTFILE'))

                    # root nodes host the filesystem for all of the virtual nodes attached
                    filesystemnodes = list(field.roots())

                    testdir = 'data'

                    client_nodes = ClientBuilder().build(
                        filesystemnodes,
                        user=args.user,
                        port=args.port,
                        password=args.password,
                        policy=args.policy)
                    try:

                        client_nodes.collect(testdir, localtestresultsdir,
                                             filesystemnodes)
                    except:
                        pass

                finally:
                    if client_nodes:
                        client_nodes.close()

            client = None
            try:
                client = ClientBuilder().build(other_hosts,
                                               user=args.user,
                                               port=args.port,
                                               password=args.password)

                # push the file and execute
                client.put(lockfilename, '.', other_hosts, doclobber=True)
                # on the destination node the netplan file gets pushed to the
                # ETCE WORK_DIRECTORY
                command = 'dockermanager stopdockers %s' % os.path.basename(
                    dockerplanfile)

                ret = client.execute(command, other_hosts)

                for k in ret:
                    print '[%s] return: %s' % (k, ret[k].retval['result'])
            finally:
                if client:
                    client.close()

    finally:
        #       os.system('ip link del vxlan1')
        stopdockers(plandoc)
        os.system('rm -f %s' % lockfilename)
コード例 #17
0
class WrapperContextImpl(ArgRegistrar):
    ''' WrapperContextImpl implements the WrapperContext interface.'''
    def __init__(self, wrappername, wrapperinstance, trialargs, testargs,
                 config, testdir):
        self._trialargs = trialargs
        self._testargs = testargs
        self._config = config
        self._testdir = testdir
        self._platform = Platform()
        self._wrappername = wrappername
        self._default_pidfilename = '%s/etce.%s.%s.pid' \
                                    % (os.path.join(self._config.get('etce', 'WORK_DIRECTORY'), 'lock'),
                                       self.platform.hostname(),
                                       self._wrappername)

        self._description = wrapperinstance.__doc__

        # start with reserved args set here ...
        self._args = {
            'default_pidfilename': self._default_pidfilename,
            'nodename': self._testdir.nodename(),
            'nodeid': self._testdir.nodeid(),
            'testname': self._testdir.name(),
            'wrappername': self._wrappername,
            'infile': None,
            'outfile': None
        }

        # ... and the ones passed in
        self._args.update(trialargs)

        # these are the reserved args that cannot be overwritten
        self._reserved_args = set(self._args)

        # fill in the arguments registered by the wrapper
        wrapperinstance.register(self)

        storefile = os.path.join(self._trialargs['logdirectory'], 'etce.store')

        self._wrapperstore = WrapperStore(storefile)

        self._wrapperstore.update(
            {'etce': {
                'starttime': self._trialargs['starttime']
            }}, self._args['nodename'])

    def register_argument(self, argname, defaultval, description):
        if argname in self._reserved_args:
            raise ValueError('Wrapper "%s" attempting to register a ' \
                             'reserved argument "%s". Quitting.' % \
                             (self._args['wrappername'],
                              argname))

        if self._testdir.hasconfig(self._wrappername, argname):
            self._args[argname] = \
                self._testdir.getconfig(self._wrappername, argname, defaultval)
        elif argname in self._testargs:
            self._args[argname] = self._testargs[argname]
        else:
            self._args[argname] = defaultval

    def register_infile_name(self, name):
        self._args['infile'] = self._testdir.getfile(name)

    def register_outfile_name(self, name):
        self._args['outfile'] = os.path.join(self._trialargs['logdirectory'],
                                             name)

    def store(self, namevaldict):
        self._wrapperstore.update({self._args['wrappername']: namevaldict},
                                  self._args['nodename'])

    @property
    def platform(self):
        return self._platform

    @property
    def args(self):
        return ArgProxy(self._args)

    def daemonize(self,
                  commandstr,
                  stdout=None,
                  stderr=None,
                  pidfilename=None,
                  genpidfile=True,
                  pidincrement=0,
                  starttime=None):

        # print the commandstr and return on a dryrun
        if self._trialargs['dryrun']:
            print commandstr
            return

        # 1. call self.stop(pidfilename)
        self.stop(pidfilename)

        # run the command
        pid,subprocess = \
                         etce.utils.daemonize_command(commandstr,
                                                      stdout,
                                                      stderr,
                                                      starttime)

        # return on parent
        if pid > 0:
            return

        # 2. if genpidfile is True, and pidfilename is None,
        #    generate the pidfilename
        if genpidfile and pidfilename is None:
            pidfilename = self._default_pidfilename

        # 3. write the pid to pidfilename
        if genpidfile:
            with open(pidfilename, 'w') as pidfile:
                pidfile.write(str(subprocess.pid + pidincrement))

        # 4. wait on subprocess
        subprocess.wait()

        # 5. exit, do not return, because returning
        #    will cause any subsequent wrappers in this
        #    step to be rerun
        sys.exit(0)

    def run(self,
            commandstr,
            stdout=None,
            stderr=None,
            pidfilename=None,
            genpidfile=True,
            pidincrement=0):

        # print the commandstr and return on a dryrun
        if self._trialargs['dryrun']:
            print commandstr
            return

        self.stop(pidfilename)

        print commandstr

        command = shlex.split(commandstr)

        stdoutfd = None
        stderrfd = None
        if not stdout is None:
            stdoutfd = open(stdout, 'w')

        if not stderr is None:
            if stdout == stderr:
                stderrfd = stdoutfd
            else:
                stderrfd = open(stderr, 'w')

        #    generate the pidfilename
        if genpidfile and pidfilename is None:
            pidfilename = self._default_pidfilename

        # create the Popen subprocess
        sp = subprocess.Popen(command, stdout=stdoutfd, stderr=stderrfd)

        # write the pid to pidfilename
        if genpidfile:
            with open(pidfilename, 'w') as pidfile:
                pidfile.write(str(sp.pid + pidincrement))

        # wait on subprocess
        sp.wait()

    def stop(self, pidfilename=None, signal=signal.SIGQUIT, decorator=''):
        # use default pidfilename if None specified
        if pidfilename is None:
            pidfilename = self._default_pidfilename

        pid = self._platform.readpid(pidfilename)

        # if found a pid, kill the process and remove the file
        if pid:
            try:
                print 'killing pid %d found in %s' % (pid, pidfilename)
                command = '%s kill -%d %d' % (pid, signal)
                sp = subprocess.Popen(command)
                sp.wait()
                #os.kill(pid, signal.SIGQUIT)
            except OSError as e:
                # orphaned pidfile - process already dead
                pass
            finally:
                os.remove(pidfilename)