def _read_attributes(self, templatedirelem):
        template_subdir = '.'.join([self._name,
                                    ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX')])

        default_hostname_format = ConfigDictionary().get('etce', 'DEFAULT_ETCE_HOSTNAME_FORMAT')

        hostname_format = templatedirelem.attrib.get('hostname_format', default_hostname_format)

        return (template_subdir, hostname_format)
    def _read_attributes(self, templatedirelem):
        template_subdir = '.'.join([self._name,
                                    ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX')])

        # for template directory foo.tpl default template directory name format and
        # TEMPLATE_HOSTNUMBER_DIGITS value N is
        # foo-${'%0Nd' % etce_index}
        default_hostname_format = templatedirelem.attrib.get('name') + \
                                  "-${'%0" + \
                                  str(ConfigDictionary().get('etce', 'TEMPLATE_HOSTNUMBER_DIGITS')) + \
                                  "d' % etce_index}"

        hostname_format = templatedirelem.attrib.get('hostname_format', default_hostname_format)

        return (template_subdir, hostname_format)
Exemple #3
0
    def kill(self, signal=signal.SIGQUIT, sudo=True):
        p = Platform()

        my_pidfile_toks = ('etce', p.hostname())

        lockfiledir = os.path.join(
            ConfigDictionary().get('etce', 'WORK_DIRECTORY'), 'lock')

        if not os.path.isdir(lockfiledir):
            return

        pidfiles = os.listdir(lockfiledir)

        for pidfile in pidfiles:
            toks = pidfile.split('.')

            if len(toks) < 2:
                continue

            if my_pidfile_toks == (toks[0], toks[1]):
                fullpidfile = os.path.join(lockfiledir, pidfile)

                pid = p.kill(fullpidfile, signal, sudo)

                if pid:
                    print 'killed process "%d" from pidfile "%s"' \
                        % (pid, fullpidfile)
Exemple #4
0
    def __init__(self, test_directory, sub_path):
        self._test_directory = test_directory

        self._sub_path = sub_path

        self._full_name = os.path.join(self._test_directory, self._sub_path)

        sub_path_toks = sub_path.split(os.path.sep)

        self._root_sub_entry = sub_path_toks[0]

        self._root_sub_entry_absolute = os.path.join(test_directory,
                                                     self._root_sub_entry)

        self._tail = ''
        if len(sub_path_toks) > 1:
            self._tail = os.path.sep.join(sub_path_toks[1:])

        self._root_sub_entry_is_dir = os.path.isdir(
            self._root_sub_entry_absolute)

        suffix = ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX')

        self._template_directory_member = \
            self._root_sub_entry.endswith('.' + suffix) and \
            os.path.isdir(os.path.join(test_directory, self._root_sub_entry))
    def __init__(self,
                 templatedirelem,
                 indices,
                 testfile_global_overlays,
                 templates_global_overlaylists):
        self._global_overlays = testfile_global_overlays

        self._templates_global_overlaylists = templates_global_overlaylists

        template_suffix = ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX')
        
        self._name = templatedirelem.attrib['name']

        self._template_directory_name = '.'.join([self._name, template_suffix])
        
        self._indices = indices
        
        self._relative_path, \
        self._hostname_format = self._read_attributes(templatedirelem)

        # build local overlay chain
        self._template_local_overlays = {}

        for overlayelem in templatedirelem.findall('./overlay'):
            oname = overlayelem.attrib['name']

            oval = overlayelem.attrib['value']

            otype = overlayelem.attrib.get('type', None)

            self._template_local_overlays[oname] = configstrtoval(oval, argtype=otype)

        self._template_local_overlaylists = \
            OverlayListChainFactory().make(templatedirelem.findall('./overlaylist'),
                                           self._indices)
Exemple #6
0
def startfield(args):
    this_hostname = Platform().hostname()

    plandoc = LXCPlanFileDoc(args.lxcplanfile)

    config = ConfigDictionary()

    workdir = config.get('etce', 'WORK_DIRECTORY')

    if not os.path.exists(workdir):
        raise LXCError('ETCE WORK_DIRECTORY "%s" not found. ' \
                       'Please create it before starting.' % workdir)

    # lockfile
    lockfilename = \
        os.path.join(plandoc.lxc_root_directory(this_hostname),
                     'etce.lxc.lock')

    if os.path.isfile(lockfilename):
        err = 'Detected an active lxc field with root at: %s. ' \
              'Run "etce-lxc stop" first.' % \
              plandoc.lxc_root_directory(this_hostname)
        raise LXCError(err)

    startlxcs(plandoc, args.writehosts, args.forcelxcroot, args.dryrun)

    if not args.dryrun:
        shutil.copy(args.lxcplanfile, lockfilename)

    other_hosts = set(plandoc.hostnames()).difference(
        ['localhost', this_hostname])

    # start containers on other hosts, if any
    if other_hosts:
        client = None
        try:
            client = ClientBuilder().build(\
                        other_hosts,
                        user=args.user,
                        port=args.port)

            # push the file and execute
            client.put(args.lxcplanfile, '.', other_hosts, doclobber=True)

            # on the destination node the netplan file gets pushed to the
            # ETCE WORK_DIRECTORY
            command = 'lxcmanager startlxcs %s writehosts=%s forcelxcroot=%s' \
                      % (os.path.basename(args.lxcplanfile),
                         args.writehosts,
                         args.forcelxcroot)

            ret = client.execute(command, other_hosts)

            for k in ret:
                print '[%s] return: %s' % (k, ret[k].retval['result'])

        finally:
            if client:
                client.close()
Exemple #7
0
    def __init__(self, test_directory):
        self._test_directory = test_directory

        test_filename_abs = os.path.join(self._test_directory,
                                         TestDirectory.TESTFILENAME)

        self._testdoc = TestFileDoc(test_filename_abs)

        self._config = ConfigDictionary()
Exemple #8
0
def untarzip(tarname, dstpath, clobber, minclobberdepth, deletetar):
    # already a directory? just return path
    if os.path.isdir(tarname):
        return tarname
    # not a tarfile
    if not tarfile.is_tarfile(tarname):
        return tarname

    # get first level names in the tarfile
    t = tarfile.open(tarname, 'r:gz')
    tarsubdirs = set([name.split(os.sep)[0] for name in t.getnames()])

    # calculate the absolute destination path, rooted at WORK_DIRECTORY
    etcedir = ConfigDictionary().get('etce', 'WORK_DIRECTORY')
    while dstpath.find('/') == 0 or dstpath.find('.') == 0:
        dstpath = dstpath[1:]
    extractdir = os.path.join(etcedir, dstpath)

    # make the extractdir if it doesn't exist
    if not os.path.exists(extractdir):
        os.makedirs(extractdir)

    # do not extract anything if ...
    targetentries = set(os.listdir(extractdir))
    collisionentries = tarsubdirs.intersection(targetentries)
    if len(collisionentries) > 0:
        # ... there is a collision ...
        if not clobber:
            firstcollision = collisionentries.pop()
            error = 'Error: directory %s already exists. ' \
                    'Quitting.' % os.path.join(extractdir, firstcollision)
            raise RuntimeError(error)
        else:
            # ... or the target directory is less than minclobber depth
            depth = sum(
                [1 for tok in extractdir.split('/') if len(tok.strip()) > 0])
            if depth < minclobberdepth:
                error = 'Error: target directory %s is less than ' \
                        'minclobberdepth(%d). Quitting.' % (extractdir, minclobberdepth)
                raise RuntimeError(error)
    try:
        for entry in collisionentries:
            fullentry = os.path.join(extractdir, entry)
            if os.path.isdir(fullentry):
                shutil.rmtree(os.path.join(extractdir, entry))
            else:
                os.remove(fullentry)
        t.extractall(extractdir)
    finally:
        t.close()

    if deletetar:
        os.remove(tarname)

    return extractdir
    def _read_attributes(self, templatefileelem):
        default_hostname_format = ConfigDictionary().get('etce', 'DEFAULT_ETCE_HOSTNAME_FORMAT')

        hostname_format = \
            templatefileelem.attrib.get('hostname_format', default_hostname_format)

        outputfilename = \
            templatefileelem.attrib.get('output_file_name', 
                                        templatefileelem.attrib['name'])

        return (hostname_format, outputfilename)
Exemple #10
0
def prepfiles(srcsubdir):
    # find the named subdir, tar it up and return it's absolute path
    # or None if path doesn't exist
    etcedir = ConfigDictionary().get('etce', 'WORK_DIRECTORY')
    srcabsdir = os.path.join(etcedir, srcsubdir)
    parentdir = os.path.dirname(srcabsdir)
    child = os.path.basename(srcabsdir)
    cwd = os.getcwd()
    try:
        os.chdir(parentdir)
        if not os.path.exists(child):
            return None
        tarfile = tarzip([child])
        return os.path.join(parentdir, tarfile)
    finally:
        os.chdir(cwd)
Exemple #11
0
    def determine_nodenames(self):
        # Determine the nodenames defined by the test files and templates:
        #
        # 1. read the base directory and test directory and take any
        #    subdirectory that does not end with .TEMPLATE_DIRECTORY_SUFFIX to
        #    be a nodename
        #
        # 2. add all of the directory names that will be generated
        #    by template directories
        #
        # 3. remove the doc subdirectory (the doc subdirectory is ignored,
        #                                 a place for additional test
        #                                 documentation).
        #
        template_suffix = ConfigDictionary().get('etce',
                                                 'TEMPLATE_DIRECTORY_SUFFIX')

        hostnames = set([])

        # if this is already a merged test directory, ignore base directory
        # search
        if not self._merged:
            for entry in os.listdir(
                    os.path.join(self.location(), self._basedir)):
                abs_entry = os.path.join(self.location(), self._basedir, entry)

                if os.path.isdir(abs_entry):
                    if entry.split('.')[-1] == template_suffix:
                        continue
                    hostnames.update([entry])

        for entry in os.listdir(self.location()):
            abs_entry = os.path.join(self.location(), entry)

            if os.path.isdir(abs_entry):
                if entry.split('.')[-1] == template_suffix:
                    continue
                hostnames.update([entry])

        formatted_dirnames = self._testdoc.formatted_directory_names

        hostnames.update(formatted_dirnames)

        # and the doc directory
        hostnames.difference_update([TestDirectory.DOCSUBDIRNAME])

        return list(hostnames)
Exemple #12
0
    def kill(self):
        p = Platform()

        my_pidfile_prefix = 'etce.%s' % p.hostname()

        lockfiledir = os.path.join(
            ConfigDictionary().get('etce', 'WORK_DIRECTORY'), 'lock')

        if not os.path.isdir(lockfiledir):
            return

        for pidfile in os.listdir(lockfiledir):
            if pidfile.startswith(my_pidfile_prefix):
                fullpidfile = os.path.join(lockfiledir, pidfile)

                pid = p.kill(fullpidfile)

                if pid:
                    print 'killed process "%d" from pidfile "%s"' \
                        % (pid, fullpidfile)
Exemple #13
0
def stopfield(args):
    workdir = ConfigDictionary().get('etce', 'WORK_DIRECTORY')

    lockfilename = os.path.join(workdir, 'lxcroot', 'etce.lxc.lock')

    if not os.path.exists(lockfilename) or not os.path.isfile(lockfilename):
        raise LXCError('Lockfile "%s" not found. Quitting.' % lockfilename)

    plandoc = LXCPlanFileDoc(lockfilename)

    other_hosts = set(plandoc.hostnames()).difference(
        ['localhost', Platform().hostname()])

    # stop containers on other hosts, if any
    try:
        if other_hosts:
            client = None
            try:
                client = ClientBuilder().build(other_hosts,
                                               user=args.user,
                                               port=args.port)

                # push the file and execute
                client.put(lockfilename, '.', other_hosts, doclobber=True)

                # on the destination node the netplan file gets pushed to the
                # ETCE WORK_DIRECTORY
                command = 'lxcmanager stoplxcs %s' % os.path.basename(
                    lockfilename)

                ret = client.execute(command, other_hosts)

                for k in ret:
                    print('[%s] return: %s' % (k, ret[k].retval['result']))

            finally:
                if client:
                    client.close()
    finally:
        stoplxcs(plandoc)
Exemple #14
0
    def run(self, starttime, templatesubdir, trialsubdir):
        etcedir = ConfigDictionary().get('etce', 'WORK_DIRECTORY')

        lockdir = os.path.join(etcedir, 'lock')

        templatedir = os.path.join(etcedir, templatesubdir)

        testdefdir = os.path.join(etcedir, 'current_test')

        trialdir = os.path.join(etcedir, trialsubdir)

        # instantiate the template files and write overlays
        runtime_overlays = {'etce_install_path': testdefdir}

        publisher = Publisher(templatedir)

        publisher.publish(publishdir=testdefdir,
                          logdir=trialdir,
                          runtime_overlays=runtime_overlays,
                          overwrite_existing_publishdir=True)

        self._checkdir(lockdir)

        self._checkdir(trialdir)
Exemple #15
0
 def __init__(self):
     self._config = ConfigDictionary()
Exemple #16
0
    def __init__(self, hosts, **kwargs):
        etce.fieldclient.FieldClient.__init__(self, hosts)

        self._connection_dict = {}

        self._execute_threads = []

        user = kwargs.get('user', None)

        port = kwargs.get('port', None)

        key_filenames = None

        self._envfile = kwargs.get('envfile', None)

        self._config = ConfigDictionary()

        ssh_config_file = os.path.expanduser('~/.ssh/config')

        ssh_config = None

        if os.path.exists(ssh_config_file):            
            ssh_config = paramiko.SSHConfig()
            ssh_config.parse(open(ssh_config_file))
        
        for host in hosts:
            host_config = None
            if ssh_config:
                host_config = ssh_config.lookup(host)
            host_user = os.path.basename(os.path.expanduser('~'))
            host_port = 22
            host_key_filenames = []
            
            if user:
                host_user = user
            elif host_config:
                host_user = host_config.get('user', host_user)

            if port:
                host_port = port
            elif host_config:
                host_port = host_config.get('port', host_port)

            if key_filenames:
                host_key_filenames = key_filenames
            elif host_config:
                host_key_filenames = host_config.get('identityfile', host_key_filenames)
                
            try:
                client = paramiko.SSHClient()

                client.set_missing_host_key_policy(paramiko.client.RejectPolicy())

                client.load_system_host_keys()

                self._connection_dict[host] = client

                self._connection_dict[host].connect(hostname=host,
                                                    username=host_user,
                                                    port=int(host_port),
                                                    key_filename=host_key_filenames,
                                                    allow_agent=True)

            except socket.gaierror as ge:
                message = '%s "%s". Quitting.' % (ge.strerror, host)
                raise FieldConnectionError(message)

            except paramiko.ssh_exception.NoValidConnectionsError as e:
                raise FieldConnectionError('Unable to connect to host "%s". Quitting.' % host)

            except Exception as e:
                message = 'Unable to connect to host "%s" (%s). Quitting.' % (host, str(e))
                raise FieldConnectionError(message)
def stopfield(args):
    workdir = ConfigDictionary().get('etce', 'WORK_DIRECTORY')

    workdir = os.getenv('WORKDIR', workdir)

    lockfilename = os.path.join(workdir, 'etce.docker.lock')

    if not os.path.exists(lockfilename) or not os.path.isfile(lockfilename):
        raise DOCKERError('Lockfile "%s" not found. Quitting.' % lockfilename)

    if args.dockerplanfile:
        dockerplanfile = args.dockerplanfile
    else:
        dockerplanfile = os.path.join(workdir, 'dockerplan.xml')

    plandoc = DOCKERPlanFileDoc(dockerplanfile)

    this_hostname = Platform().hostname()

    other_hosts = []

    for hostname, ip in plandoc.hostnames():
        if hostname != (this_hostname and 'localhost'):
            other_hosts.append(hostname)

    # stop containers on other hosts, if any
    try:
        if other_hosts:
            if args.collect:
                client_nodes = None
                try:
                    print
                    'Collecting results.'

                    time = 'collect_on_%s' % etce.timeutils.getstrtimenow(
                    ).split('.')[0]

                    localtestresultsdir = os.path.join(workdir, 'data', time)

                    field = Field(os.path.join(workdir, 'HOSTFILE'))

                    # root nodes host the filesystem for all of the virtual nodes attached
                    filesystemnodes = list(field.roots())

                    testdir = 'data'

                    client_nodes = ClientBuilder().build(
                        filesystemnodes,
                        user=args.user,
                        port=args.port,
                        password=args.password,
                        policy=args.policy)
                    try:

                        client_nodes.collect(testdir, localtestresultsdir,
                                             filesystemnodes)
                    except:
                        pass

                finally:
                    if client_nodes:
                        client_nodes.close()

            client = None
            try:
                client = ClientBuilder().build(other_hosts,
                                               user=args.user,
                                               port=args.port,
                                               password=args.password)

                # push the file and execute
                client.put(lockfilename, '.', other_hosts, doclobber=True)
                # on the destination node the netplan file gets pushed to the
                # ETCE WORK_DIRECTORY
                command = 'dockermanager stopdockers %s' % os.path.basename(
                    dockerplanfile)

                ret = client.execute(command, other_hosts)

                for k in ret:
                    print '[%s] return: %s' % (k, ret[k].retval['result'])
            finally:
                if client:
                    client.close()

    finally:
        #       os.system('ip link del vxlan1')
        stopdockers(plandoc)
        os.system('rm -f %s' % lockfilename)
def startfield(args):
    this_hostname = Platform().hostname()

    config = ConfigDictionary()

    workdir = config.get('etce', 'WORK_DIRECTORY')

    workdir = os.getenv('WORKDIR', workdir)

    if not os.path.exists(workdir):
        raise DOCKERError('ETCE WORK_DIRECTORY "%s" not found. ' \
                       'Please create it before starting.' % workdir)

    if args.dockerplanfile:
        dockerplanfile = args.dockerplanfile
    else:
        dockerplanfile = os.path.join(workdir, 'dockerplan.xml')

    plandoc = DOCKERPlanFileDoc(dockerplanfile)

    # lockfile
    lockfilename = \
        os.path.join(workdir, 'etce.docker.lock')

    if os.path.isfile(lockfilename):
        err = 'Detected an active docker field with root at: %s. ' \
              'Run "etce-docker stop" first.' % \
              plandoc.docker_root_directory(this_hostname)
        raise DOCKERError(err)

    cidr = os.getenv('CIDR', '10.99.0.0/16')
    containers = []
    for hostname, _ in plandoc.hostnames():
        for container in plandoc.containers(hostname):
            for bridgename, interfaceparams in container.interfaces.items():
                if IPAddress(interfaceparams['ipv4']) in IPNetwork(cidr):
                    containers.append(
                        (container.docker_name, interfaceparams['ipv4']))
                    break

        ipexist = []
    for _, ip in containers:
        ipexist.append(ip)
    my_ip = ''
    for ip in IPNetwork(cidr)[1:]:
        if not str(ip) in ipexist:
            my_ip = str(ip)
            break

    my_ip = my_ip + '/' + cidr.split('/')[1]

    # write to /etc/hosts in container/machine controller all external ip
    writehosts(plandoc, containers)

    hostfile = \
        os.path.join(workdir, 'hosts')

    if not args.dryrun:
        shutil.copy(dockerplanfile, lockfilename)
        shutil.copy('/etc/hosts', hostfile)

    startdockers(plandoc, args.writehosts, args.forcedockerroot, args.dryrun)

    other_hosts = []

    for hostname, ip in plandoc.hostnames():
        if hostname != (this_hostname and 'localhost'):
            other_hosts.append(hostname)

    # start containers on other hosts, if any
    if other_hosts:
        client = None
        try:
            client = ClientBuilder().build(\
                        other_hosts,
                        user=args.user,
                        port=args.port,
                        password=args.password)

            # push the file and execute
            client.put(dockerplanfile, '.', other_hosts, doclobber=True)

            # push the file
            client.put('/etc/hosts', '.', other_hosts, doclobber=True)

            # on the destination node the netplan file gets pushed to the
            # ETCE WORK_DIRECTORY
            command = 'dockermanager startdockers %s writehosts=%s forcedockerroot=%s' \
                      % (os.path.basename(dockerplanfile),
                         args.writehosts,
                         args.forcedockerroot)

            ret = client.execute(command, other_hosts)

            for k in ret:
                print '[%s] return: %s' % (k, ret[k].retval['result'])

        finally:
            if client:
                client.close()

                # A valid ETCE Test Directory.
                TESTDIRECTORY = os.path.join(workdir, 'pub-tdmact')

                # The output directory to place the built Test Directory.
                TESTROOT = os.path.join(
                    workdir, TESTDIRECTORY + '_' + etce.utils.timestamp())

                os.system('etce-test publish %s %s --verbose' %
                          (TESTDIRECTORY, TESTROOT))

                # A user tag to prepend to the name of each test result directory.
                TESTPREFIX = 'tdmact'
                # Run scenario order steps
                #if not args.collect:
                os.system(
                    'etce-test run --user root --policy autoadd -v --kill before --nocollect %s %s %s'
                    % (TESTPREFIX, HOSTFILE, TESTROOT))
    def _parseplan(self, lxcplanfile):
        lxcplanelem = self.parse(lxcplanfile)

        kernelparameters = {}

        containertemplates = {}

        rootdirectories = {}

        lxcplanelems = \
            lxcplanelem.findall('./containertemplates/containertemplate')

        for containertemplateelem in lxcplanelems:
            containertemplate_name = containertemplateelem.attrib['name']

            containertemplate_parent_name = \
                containertemplateelem.attrib.get('parent', None)

            containertemplate_parent = None

            if containertemplate_parent_name:
                if not containertemplate_parent_name in containertemplates:
                    errmsg = 'parent "%s" of containertemplate "%s" not ' \
                             'previously listed. Quitting.' % \
                             (containertemplate_parent_name,
                              containertemplate_name)
                    raise ValueError(errmsg)

                containertemplate_parent = \
                    containertemplates[containertemplate_parent_name]

            containertemplates[containertemplate_name] = \
                ContainerTemplate(containertemplateelem,
                                  containertemplate_parent)

        hostelems = lxcplanelem.findall('./hosts/host')

        bridges = {}

        containers = {}

        hostnames = []

        for hostelem in hostelems:
            hostname = hostelem.attrib.get('hostname')

            hostnames.append(hostname)

            # 'localhost' is permitted as a catchall hostname to mean the
            # local machine only when one host is specified in the file
            if hostname == 'localhost':
                if len(hostelems) > 1:
                    error = '"localhost" hostname only permitted when one ' \
                            'host is specified. Quitting'
                    raise ValueError(error)

            # kernel params
            kernelparameters[hostname] = {}

            for paramelem in hostelem.findall('./kernelparameters/parameter'):
                kernelparameters[hostname][paramelem.attrib['name']] = \
                    paramelem.attrib['value']

            # bridges (explicit)
            bridges[hostname] = {}

            for bridgeelem in hostelem.findall('./bridges/bridge'):
                bridge = Bridge(bridgeelem)

                bridges[hostname][bridge.name] = bridge

            containers[hostname] = []

            params = []

            containerselem = hostelem.findall('./containers')[0]

            root_directory = \
                os.path.join(ConfigDictionary().get('etce', 'WORK_DIRECTORY'), 'lxcroot')

            rootdirectories[hostname] = root_directory

            # ensure no repeated lxc_indices
            alllxcids = set([])

            for containerelem in hostelem.findall('./containers/container'):
                containerlxcids = etce.utils.nodestr_to_nodelist(
                    str(containerelem.attrib['lxc_indices']))

                repeatedids = alllxcids.intersection(containerlxcids)

                assert len(repeatedids) == 0, \
                    'Found repeated lxcid(s): {%s}. Quitting.' % \
                    ','.join([ str(nid) for nid in list(repeatedids) ])

                alllxcids.update(containerlxcids)

            # Create containers from container elems
            for containerelem in hostelem.findall('./containers/container'):
                templatename = containerelem.attrib.get('template', None)

                template = containertemplates.get(templatename, None)

                lxcids = etce.utils.nodestr_to_nodelist(
                    str(containerelem.attrib['lxc_indices']))

                # fetch the overlays, use etce file values as default
                overlays = ConfigDictionary().asdict()['overlays']

                for overlayelem in containerelem.findall('./overlays/overlay'):
                    oname = overlayelem.attrib['name']

                    ovalue = overlayelem.attrib['value']

                    overlays[oname] = etce.utils.configstrtoval(ovalue)

                # fetch the overlaylists
                overlaylists = {}

                for overlaylistelem in containerelem.findall(
                        './overlays/overlaylist'):
                    oname = overlaylistelem.attrib['name']

                    separator = overlaylistelem.attrib.get('separator', ',')

                    ovalues = overlaylistelem.attrib['values'].split(separator)

                    overlaylists[oname] = ovalues

                # treat all values for each name as an int if possible,
                # else all strings
                for oname, ovals in overlaylists.items():
                    converted_vals = []
                    try:
                        converted_vals = [
                            etce.utils.configstrtoval(oval) for oval in ovals
                        ]

                        overlaylists[oname] = converted_vals
                    except ValueError:
                        # leave as strings
                        pass

                # Why must a default value be supplied here when
                # schema declares this attribute with a default value?
                for i, lxcid in enumerate(lxcids):
                    # start with overlays
                    lxcoverlays = copy.copy(overlays)

                    # then add list items for this node
                    for oname, ovals in overlaylists.items():
                        lxcoverlays[oname] = ovals[i]

                    # then lxcindex, lxc_name and lxc_directory (cannot be overwritten)
                    lxcoverlays.update({'lxc_index': lxcid})

                    lxcoverlays.update({
                        'lxc_name':
                        format_string(containerelem.attrib['lxc_name'],
                                      lxcoverlays)
                    })

                    lxcoverlays.update({
                        'lxc_directory':
                        os.path.join(root_directory, lxcoverlays['lxc_name'])
                    })

                    containers[hostname].append(
                        Container(containerelem, lxcoverlays, params, template,
                                  bridges[hostname], hostname))

            # Roll over containers to get names of implicit bridges added
            # from the container interface bridge names and augment
            # the bridges list
            for container in containers[hostname]:
                for iname, iparams in container.interfaces.items():
                    if not iname in bridges[hostname]:
                        bridges[hostname][iname] = BridgeImplicit(iname)

        return hostnames, kernelparameters, bridges, containers, rootdirectories
Exemple #20
0
 def __init__(self):
     self._test = TestDirectory(os.getcwd(), None)
     self._stepsfiledoc = StepsFileDoc(self._test.stepsfile())
     self._config = ConfigDictionary()
Exemple #21
0
    def __init__(self, hosts, **kwargs):
        etce.fieldclient.FieldClient.__init__(self, hosts)

        self._connection_dict = {}

        self._execute_threads = []

        # ssh authentication is revised (5/7/2019):
        #
        # As tested against paramiko 1.16
        #
        # User must specify the ssh key file to use for authentication. They
        # can specify the key file explicitly with the sshkey parameter -
        # if the filename is not absolute, it is assumed to be a file located
        # in ~/.ssh. If sshkey is None, try to determine the key file from
        # ~/.ssh/config. If that also fails, check for the default ssh rsa
        # key ~/.ssh/id_rsa and attempt to use that.
        #
        # paramiko also allows provides a paramiko.agent.Agent class for
        # querying a running ssh-agent for its loaded keys. The agent
        # agent can be used:
        #
        #   1. by calling connect with allow_agent = True (the default)
        #   2. by calling Agent().get_keys() and passing to connect as pkey
        #
        # In the first case, the connect call selects the first key found
        # in the running agent and prompts for a passphrase - without indicating
        # the key it is prompting for. In the second case, the only identifying
        # information that can be obtained from an agent returned key object is
        # its md5 fingerprint - which is correct but not convenient for
        # helping the user select and identify the agent key to use. For these
        # reasons, ignore the agent for authentication and make the user identify
        # the key file(s) to use - preferable via there .ssh/config file.

        user = kwargs.get('user', None)

        port = kwargs.get('port', None)

        policystr = kwargs.get('policy', 'reject')

        sshkey = kwargs.get('sshkey', None)

        user_specified_key_file = None

        if sshkey:
            if sshkey[0] == '/':
                user_specified_key_file = sshkey
            else:
                user_specified_key_file = os.path.expanduser(
                    os.path.join('~/.ssh', sshkey))

            if not os.path.exists(user_specified_key_file):
                raise FieldConnectionError(
                    'sshkey "%s" doesn\'t exist. Quitting.' % \
                    user_specified_key_file)

        self._envfile = kwargs.get('envfile', None)

        self._config = ConfigDictionary()

        ssh_config_file = os.path.expanduser('~/.ssh/config')

        ssh_config = None

        if os.path.exists(ssh_config_file):
            ssh_config = paramiko.SSHConfig()
            ssh_config.parse(open(ssh_config_file))

        authenticated_keys = {}

        policy = RejectPolicy

        if policystr == 'warning':
            policy = WarningPolicy
        elif policystr == 'autoadd':
            policy = AutoAddPolicy

        policy = self._set_unknown_hosts_policy(hosts, port, ssh_config,
                                                policy)

        for host in hosts:
            host_config = None

            if ssh_config:
                host_config = ssh_config.lookup(host)

            host_user = os.path.basename(os.path.expanduser('~'))

            if user:
                host_user = user
            elif host_config:
                host_user = host_config.get('user', host_user)

            host_port = 22

            if port:
                host_port = port
            elif host_config:
                host_port = host_config.get('port', host_port)

            host_key_filenames = []

            if user_specified_key_file:
                host_key_filenames = [user_specified_key_file]
            elif host_config:
                host_key_filenames = host_config.get('identityfile',
                                                     host_key_filenames)

            if not host_key_filenames:
                default_rsa_keyfile = os.path.join(os.path.expanduser('~'),
                                                   '.ssh', 'id_rsa')
                if os.path.exists(default_rsa_keyfile) and os.path.isfile(
                        default_rsa_keyfile):
                    host_key_filenames = [default_rsa_keyfile]
                else:
                    message = 'Unable to find an RSA SSH key associated with host "%s". '\
                              'Either:\n\n' \
                              ' 1) specify a key using the "sshkey" option\n' \
                              ' 2) add a "Host" rule to your ~/.ssh/config file identifying the key\n' \
                              ' 3) create a default RSA key ~/.ssh/id_rsa".\n\n' \
                              'Quitting.' % host
                    raise FieldConnectionError(message)

            try:
                pkey = None

                for host_key_file in host_key_filenames:
                    if host_key_file in authenticated_keys:
                        pkey = authenticated_keys[host_key_file]
                    else:
                        pkey = None
                        try:
                            # Assume key is not passphrase protected first
                            pkey = RSAKey.from_private_key_file(
                                host_key_file, None)
                        except PasswordRequiredException as pre:
                            # if that fails, prompt for passphrase
                            pkey = RSAKey.from_private_key_file(
                                host_key_file,
                                getpass.getpass('Enter passphrase for %s: ' %
                                                host_key_file))

                    authenticated_keys[host_key_file] = pkey

                    break

                if not pkey:
                    message = 'Unable to connect to host "%s", cannot authenticate. ' \
                              'Quitting.' % host,
                    raise FieldConnectionError(message)

                client = paramiko.SSHClient()

                client.load_system_host_keys()

                client.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))

                client.set_missing_host_key_policy(policy())

                client.connect(hostname=host,
                               username=host_user,
                               port=int(host_port),
                               pkey=pkey,
                               allow_agent=False)

                self._connection_dict[host] = client

            except socket.gaierror as ge:
                message = '%s "%s". Quitting.' % (ge.strerror, host)
                raise FieldConnectionError(message)

            except paramiko.ssh_exception.NoValidConnectionsError as e:
                raise FieldConnectionError('Unable to connect to host "%s", ' \
                                           'NoValidConnectionsError. Quitting.' % host)

            except Exception as e:
                raise FieldConnectionError(e)