def startfield(args): this_hostname = Platform().hostname() plandoc = LXCPlanFileDoc(args.lxcplanfile) config = ConfigDictionary() workdir = config.get('etce', 'WORK_DIRECTORY') if not os.path.exists(workdir): raise LXCError('ETCE WORK_DIRECTORY "%s" not found. ' \ 'Please create it before starting.' % workdir) # lockfile lockfilename = \ os.path.join(plandoc.lxc_root_directory(this_hostname), 'etce.lxc.lock') if os.path.isfile(lockfilename): err = 'Detected an active lxc field with root at: %s. ' \ 'Run "etce-lxc stop" first.' % \ plandoc.lxc_root_directory(this_hostname) raise LXCError(err) startlxcs(plandoc, args.writehosts, args.forcelxcroot, args.dryrun) if not args.dryrun: shutil.copy(args.lxcplanfile, lockfilename) other_hosts = set(plandoc.hostnames()).difference( ['localhost', this_hostname]) # start containers on other hosts, if any if other_hosts: client = None try: client = ClientBuilder().build(\ other_hosts, user=args.user, port=args.port) # push the file and execute client.put(args.lxcplanfile, '.', other_hosts, doclobber=True) # on the destination node the netplan file gets pushed to the # ETCE WORK_DIRECTORY command = 'lxcmanager startlxcs %s writehosts=%s forcelxcroot=%s' \ % (os.path.basename(args.lxcplanfile), args.writehosts, args.forcelxcroot) ret = client.execute(command, other_hosts) for k in ret: print '[%s] return: %s' % (k, ret[k].retval['result']) finally: if client: client.close()
def _read_attributes(self, templatedirelem): template_subdir = '.'.join([self._name, ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX')]) default_hostname_format = ConfigDictionary().get('etce', 'DEFAULT_ETCE_HOSTNAME_FORMAT') hostname_format = templatedirelem.attrib.get('hostname_format', default_hostname_format) return (template_subdir, hostname_format)
def __init__(self, test_directory): self._test_directory = test_directory test_filename_abs = os.path.join(self._test_directory, TestDirectory.TESTFILENAME) self._testdoc = TestFileDoc(test_filename_abs) self._config = ConfigDictionary()
def _read_attributes(self, templatedirelem): template_subdir = '.'.join([self._name, ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX')]) # for template directory foo.tpl default template directory name format and # TEMPLATE_HOSTNUMBER_DIGITS value N is # foo-${'%0Nd' % etce_index} default_hostname_format = templatedirelem.attrib.get('name') + \ "-${'%0" + \ str(ConfigDictionary().get('etce', 'TEMPLATE_HOSTNUMBER_DIGITS')) + \ "d' % etce_index}" hostname_format = templatedirelem.attrib.get('hostname_format', default_hostname_format) return (template_subdir, hostname_format)
def __init__(self, test_directory, sub_path): self._test_directory = test_directory self._sub_path = sub_path self._full_name = os.path.join(self._test_directory, self._sub_path) sub_path_toks = sub_path.split(os.path.sep) self._root_sub_entry = sub_path_toks[0] self._root_sub_entry_absolute = os.path.join(test_directory, self._root_sub_entry) self._tail = '' if len(sub_path_toks) > 1: self._tail = os.path.sep.join(sub_path_toks[1:]) self._root_sub_entry_is_dir = os.path.isdir( self._root_sub_entry_absolute) suffix = ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX') self._template_directory_member = \ self._root_sub_entry.endswith('.' + suffix) and \ os.path.isdir(os.path.join(test_directory, self._root_sub_entry))
def __init__(self, templatedirelem, indices, testfile_global_overlays, templates_global_overlaylists): self._global_overlays = testfile_global_overlays self._templates_global_overlaylists = templates_global_overlaylists template_suffix = ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX') self._name = templatedirelem.attrib['name'] self._template_directory_name = '.'.join([self._name, template_suffix]) self._indices = indices self._relative_path, \ self._hostname_format = self._read_attributes(templatedirelem) # build local overlay chain self._template_local_overlays = {} for overlayelem in templatedirelem.findall('./overlay'): oname = overlayelem.attrib['name'] oval = overlayelem.attrib['value'] otype = overlayelem.attrib.get('type', None) self._template_local_overlays[oname] = configstrtoval(oval, argtype=otype) self._template_local_overlaylists = \ OverlayListChainFactory().make(templatedirelem.findall('./overlaylist'), self._indices)
def kill(self, signal=signal.SIGQUIT, sudo=True): p = Platform() my_pidfile_toks = ('etce', p.hostname()) lockfiledir = os.path.join( ConfigDictionary().get('etce', 'WORK_DIRECTORY'), 'lock') if not os.path.isdir(lockfiledir): return pidfiles = os.listdir(lockfiledir) for pidfile in pidfiles: toks = pidfile.split('.') if len(toks) < 2: continue if my_pidfile_toks == (toks[0], toks[1]): fullpidfile = os.path.join(lockfiledir, pidfile) pid = p.kill(fullpidfile, signal, sudo) if pid: print 'killed process "%d" from pidfile "%s"' \ % (pid, fullpidfile)
def untarzip(tarname, dstpath, clobber, minclobberdepth, deletetar): # already a directory? just return path if os.path.isdir(tarname): return tarname # not a tarfile if not tarfile.is_tarfile(tarname): return tarname # get first level names in the tarfile t = tarfile.open(tarname, 'r:gz') tarsubdirs = set([name.split(os.sep)[0] for name in t.getnames()]) # calculate the absolute destination path, rooted at WORK_DIRECTORY etcedir = ConfigDictionary().get('etce', 'WORK_DIRECTORY') while dstpath.find('/') == 0 or dstpath.find('.') == 0: dstpath = dstpath[1:] extractdir = os.path.join(etcedir, dstpath) # make the extractdir if it doesn't exist if not os.path.exists(extractdir): os.makedirs(extractdir) # do not extract anything if ... targetentries = set(os.listdir(extractdir)) collisionentries = tarsubdirs.intersection(targetentries) if len(collisionentries) > 0: # ... there is a collision ... if not clobber: firstcollision = collisionentries.pop() error = 'Error: directory %s already exists. ' \ 'Quitting.' % os.path.join(extractdir, firstcollision) raise RuntimeError(error) else: # ... or the target directory is less than minclobber depth depth = sum( [1 for tok in extractdir.split('/') if len(tok.strip()) > 0]) if depth < minclobberdepth: error = 'Error: target directory %s is less than ' \ 'minclobberdepth(%d). Quitting.' % (extractdir, minclobberdepth) raise RuntimeError(error) try: for entry in collisionentries: fullentry = os.path.join(extractdir, entry) if os.path.isdir(fullentry): shutil.rmtree(os.path.join(extractdir, entry)) else: os.remove(fullentry) t.extractall(extractdir) finally: t.close() if deletetar: os.remove(tarname) return extractdir
class Executer(object): def __init__(self): self._test = TestDirectory(os.getcwd(), None) self._stepsfiledoc = StepsFileDoc(self._test.stepsfile()) self._config = ConfigDictionary() def step(self, stepname, starttime, logsubdirectory): wrappers = self._stepsfiledoc.getwrappers(stepname) hostname = Platform().hostname() hostdir = os.path.join(self._test.location(), hostname) if not os.path.exists(hostdir): return logdirectory = os.path.join(self._config.get('etce', 'WORK_DIRECTORY'), logsubdirectory, hostname) if not os.path.exists(logdirectory): os.makedirs(logdirectory) if wrappers: trialargs = { 'logdirectory': logdirectory, 'starttime': starttime, 'stepname': stepname, 'dryrun': False } wldr = WrapperLoader() for wrapperentry, methodname, testargs in wrappers: wrapperinstance = \ wldr.loadwrapper(wrapperentry.name, self._stepsfiledoc.getpackageprefixes()) # ensure each wrapper is called with the testdirectory as # the current working directory, and with it's own # instance of the wrapper context os.chdir(hostdir) ctx = WrapperContext( WrapperContextImpl(wrapperentry.name, wrapperinstance, trialargs, testargs, self._config, self._test)) if methodname == 'run': # run calls prerun, run, postrun to encourage # pre/post condition checks wrapperinstance.prerun(ctx) wrapperinstance.run(ctx) wrapperinstance.postrun(ctx) else: wrapperinstance.stop(ctx)
def _read_attributes(self, templatefileelem): default_hostname_format = ConfigDictionary().get('etce', 'DEFAULT_ETCE_HOSTNAME_FORMAT') hostname_format = \ templatefileelem.attrib.get('hostname_format', default_hostname_format) outputfilename = \ templatefileelem.attrib.get('output_file_name', templatefileelem.attrib['name']) return (hostname_format, outputfilename)
def prepfiles(srcsubdir): # find the named subdir, tar it up and return it's absolute path # or None if path doesn't exist etcedir = ConfigDictionary().get('etce', 'WORK_DIRECTORY') srcabsdir = os.path.join(etcedir, srcsubdir) parentdir = os.path.dirname(srcabsdir) child = os.path.basename(srcabsdir) cwd = os.getcwd() try: os.chdir(parentdir) if not os.path.exists(child): return None tarfile = tarzip([child]) return os.path.join(parentdir, tarfile) finally: os.chdir(cwd)
def determine_nodenames(self): # Determine the nodenames defined by the test files and templates: # # 1. read the base directory and test directory and take any # subdirectory that does not end with .TEMPLATE_DIRECTORY_SUFFIX to # be a nodename # # 2. add all of the directory names that will be generated # by template directories # # 3. remove the doc subdirectory (the doc subdirectory is ignored, # a place for additional test # documentation). # template_suffix = ConfigDictionary().get('etce', 'TEMPLATE_DIRECTORY_SUFFIX') hostnames = set([]) # if this is already a merged test directory, ignore base directory # search if not self._merged: for entry in os.listdir( os.path.join(self.location(), self._basedir)): abs_entry = os.path.join(self.location(), self._basedir, entry) if os.path.isdir(abs_entry): if entry.split('.')[-1] == template_suffix: continue hostnames.update([entry]) for entry in os.listdir(self.location()): abs_entry = os.path.join(self.location(), entry) if os.path.isdir(abs_entry): if entry.split('.')[-1] == template_suffix: continue hostnames.update([entry]) formatted_dirnames = self._testdoc.formatted_directory_names hostnames.update(formatted_dirnames) # and the doc directory hostnames.difference_update([TestDirectory.DOCSUBDIRNAME]) return list(hostnames)
def kill(self): p = Platform() my_pidfile_prefix = 'etce.%s' % p.hostname() lockfiledir = os.path.join( ConfigDictionary().get('etce', 'WORK_DIRECTORY'), 'lock') if not os.path.isdir(lockfiledir): return for pidfile in os.listdir(lockfiledir): if pidfile.startswith(my_pidfile_prefix): fullpidfile = os.path.join(lockfiledir, pidfile) pid = p.kill(fullpidfile) if pid: print 'killed process "%d" from pidfile "%s"' \ % (pid, fullpidfile)
def stopfield(args): workdir = ConfigDictionary().get('etce', 'WORK_DIRECTORY') lockfilename = os.path.join(workdir, 'lxcroot', 'etce.lxc.lock') if not os.path.exists(lockfilename) or not os.path.isfile(lockfilename): raise LXCError('Lockfile "%s" not found. Quitting.' % lockfilename) plandoc = LXCPlanFileDoc(lockfilename) other_hosts = set(plandoc.hostnames()).difference( ['localhost', Platform().hostname()]) # stop containers on other hosts, if any try: if other_hosts: client = None try: client = ClientBuilder().build(other_hosts, user=args.user, port=args.port) # push the file and execute client.put(lockfilename, '.', other_hosts, doclobber=True) # on the destination node the netplan file gets pushed to the # ETCE WORK_DIRECTORY command = 'lxcmanager stoplxcs %s' % os.path.basename( lockfilename) ret = client.execute(command, other_hosts) for k in ret: print('[%s] return: %s' % (k, ret[k].retval['result'])) finally: if client: client.close() finally: stoplxcs(plandoc)
def run(self, starttime, templatesubdir, trialsubdir): etcedir = ConfigDictionary().get('etce', 'WORK_DIRECTORY') lockdir = os.path.join(etcedir, 'lock') templatedir = os.path.join(etcedir, templatesubdir) testdefdir = os.path.join(etcedir, 'current_test') trialdir = os.path.join(etcedir, trialsubdir) # instantiate the template files and write overlays runtime_overlays = {'etce_install_path': testdefdir} publisher = Publisher(templatedir) publisher.publish(publishdir=testdefdir, logdir=trialdir, runtime_overlays=runtime_overlays, overwrite_existing_publishdir=True) self._checkdir(lockdir) self._checkdir(trialdir)
def _parseplan(self, lxcplanfile): lxcplanelem = self.parse(lxcplanfile) kernelparameters = {} containertemplates = {} rootdirectories = {} lxcplanelems = \ lxcplanelem.findall('./containertemplates/containertemplate') for containertemplateelem in lxcplanelems: containertemplate_name = containertemplateelem.attrib['name'] containertemplate_parent_name = \ containertemplateelem.attrib.get('parent', None) containertemplate_parent = None if containertemplate_parent_name: if not containertemplate_parent_name in containertemplates: errmsg = 'parent "%s" of containertemplate "%s" not ' \ 'previously listed. Quitting.' % \ (containertemplate_parent_name, containertemplate_name) raise ValueError(errmsg) containertemplate_parent = \ containertemplates[containertemplate_parent_name] containertemplates[containertemplate_name] = \ ContainerTemplate(containertemplateelem, containertemplate_parent) hostelems = lxcplanelem.findall('./hosts/host') bridges = {} containers = {} hostnames = [] for hostelem in hostelems: hostname = hostelem.attrib.get('hostname') hostnames.append(hostname) # 'localhost' is permitted as a catchall hostname to mean the # local machine only when one host is specified in the file if hostname == 'localhost': if len(hostelems) > 1: error = '"localhost" hostname only permitted when one ' \ 'host is specified. Quitting' raise ValueError(error) # kernel params kernelparameters[hostname] = {} for paramelem in hostelem.findall('./kernelparameters/parameter'): kernelparameters[hostname][paramelem.attrib['name']] = \ paramelem.attrib['value'] # bridges (explicit) bridges[hostname] = {} for bridgeelem in hostelem.findall('./bridges/bridge'): bridge = Bridge(bridgeelem) bridges[hostname][bridge.name] = bridge containers[hostname] = [] params = [] containerselem = hostelem.findall('./containers')[0] root_directory = \ os.path.join(ConfigDictionary().get('etce', 'WORK_DIRECTORY'), 'lxcroot') rootdirectories[hostname] = root_directory # ensure no repeated lxc_indices alllxcids = set([]) for containerelem in hostelem.findall('./containers/container'): containerlxcids = etce.utils.nodestr_to_nodelist( str(containerelem.attrib['lxc_indices'])) repeatedids = alllxcids.intersection(containerlxcids) assert len(repeatedids) == 0, \ 'Found repeated lxcid(s): {%s}. Quitting.' % \ ','.join([ str(nid) for nid in list(repeatedids) ]) alllxcids.update(containerlxcids) # Create containers from container elems for containerelem in hostelem.findall('./containers/container'): templatename = containerelem.attrib.get('template', None) template = containertemplates.get(templatename, None) lxcids = etce.utils.nodestr_to_nodelist( str(containerelem.attrib['lxc_indices'])) # fetch the overlays, use etce file values as default overlays = ConfigDictionary().asdict()['overlays'] for overlayelem in containerelem.findall('./overlays/overlay'): oname = overlayelem.attrib['name'] ovalue = overlayelem.attrib['value'] overlays[oname] = etce.utils.configstrtoval(ovalue) # fetch the overlaylists overlaylists = {} for overlaylistelem in containerelem.findall( './overlays/overlaylist'): oname = overlaylistelem.attrib['name'] separator = overlaylistelem.attrib.get('separator', ',') ovalues = overlaylistelem.attrib['values'].split(separator) overlaylists[oname] = ovalues # treat all values for each name as an int if possible, # else all strings for oname, ovals in overlaylists.items(): converted_vals = [] try: converted_vals = [ etce.utils.configstrtoval(oval) for oval in ovals ] overlaylists[oname] = converted_vals except ValueError: # leave as strings pass # Why must a default value be supplied here when # schema declares this attribute with a default value? for i, lxcid in enumerate(lxcids): # start with overlays lxcoverlays = copy.copy(overlays) # then add list items for this node for oname, ovals in overlaylists.items(): lxcoverlays[oname] = ovals[i] # then lxcindex, lxc_name and lxc_directory (cannot be overwritten) lxcoverlays.update({'lxc_index': lxcid}) lxcoverlays.update({ 'lxc_name': format_string(containerelem.attrib['lxc_name'], lxcoverlays) }) lxcoverlays.update({ 'lxc_directory': os.path.join(root_directory, lxcoverlays['lxc_name']) }) containers[hostname].append( Container(containerelem, lxcoverlays, params, template, bridges[hostname], hostname)) # Roll over containers to get names of implicit bridges added # from the container interface bridge names and augment # the bridges list for container in containers[hostname]: for iname, iparams in container.interfaces.items(): if not iname in bridges[hostname]: bridges[hostname][iname] = BridgeImplicit(iname) return hostnames, kernelparameters, bridges, containers, rootdirectories
class WrapperLoader(object): def __init__(self): self._config = ConfigDictionary() def wrapperpaths(self): return self._config.get('etce', 'WRAPPER_DIRECTORY').split(':') def loadwrappers(self, wrapperpath): wrapperinstances = {} for cwd, dirnames, filenames in os.walk(wrapperpath): for wrapperfile in filenames: try: wrapperfile = os.path.join(cwd, wrapperfile.split('.')[0]) fullwrappername = \ os.path.relpath(wrapperfile, wrapperpath) wrapper = self._load_module(fullwrappername, None) if wrapper is not None: basename = wrapper.__name__.split('/')[-1] candidateclassname = basename.upper() classinstance = None for key in wrapper.__dict__: if key.upper() == candidateclassname: candidateclass = wrapper.__dict__[key] if callable(candidateclass): key = fullwrappername.replace(os.sep, '.') wrapperinstances[key] = (wrapperpath, candidateclass()) except: continue return wrapperinstances def loadwrapper(self, wrappername, packageprefixfilter=(None, )): wrapper = None for packagename in packageprefixfilter: wrapper = self._load_module(wrappername, packagename) if wrapper is not None: basename = wrapper.__name__.split('/')[-1] candidateclassname = basename.upper() classinstance = None for key in wrapper.__dict__: if key.upper() == candidateclassname: candidateclass = wrapper.__dict__[key] if callable(candidateclass): return candidateclass() message = 'No wrapper "%s" found' % wrappername raise RuntimeError(message) def _load_module(self, wrappername, packageprefix): wrapper = None if packageprefix: wrappername = packageprefix + '.' + wrappername etcewrapper = wrappername.replace('.', os.sep) try: f,pathname,description = \ imp.find_module(etcewrapper, self.wrapperpaths()) wrapper = imp.load_module(etcewrapper, f, pathname, description) except: pass return wrapper
def stopfield(args): workdir = ConfigDictionary().get('etce', 'WORK_DIRECTORY') workdir = os.getenv('WORKDIR', workdir) lockfilename = os.path.join(workdir, 'etce.docker.lock') if not os.path.exists(lockfilename) or not os.path.isfile(lockfilename): raise DOCKERError('Lockfile "%s" not found. Quitting.' % lockfilename) if args.dockerplanfile: dockerplanfile = args.dockerplanfile else: dockerplanfile = os.path.join(workdir, 'dockerplan.xml') plandoc = DOCKERPlanFileDoc(dockerplanfile) this_hostname = Platform().hostname() other_hosts = [] for hostname, ip in plandoc.hostnames(): if hostname != (this_hostname and 'localhost'): other_hosts.append(hostname) # stop containers on other hosts, if any try: if other_hosts: if args.collect: client_nodes = None try: print 'Collecting results.' time = 'collect_on_%s' % etce.timeutils.getstrtimenow( ).split('.')[0] localtestresultsdir = os.path.join(workdir, 'data', time) field = Field(os.path.join(workdir, 'HOSTFILE')) # root nodes host the filesystem for all of the virtual nodes attached filesystemnodes = list(field.roots()) testdir = 'data' client_nodes = ClientBuilder().build( filesystemnodes, user=args.user, port=args.port, password=args.password, policy=args.policy) try: client_nodes.collect(testdir, localtestresultsdir, filesystemnodes) except: pass finally: if client_nodes: client_nodes.close() client = None try: client = ClientBuilder().build(other_hosts, user=args.user, port=args.port, password=args.password) # push the file and execute client.put(lockfilename, '.', other_hosts, doclobber=True) # on the destination node the netplan file gets pushed to the # ETCE WORK_DIRECTORY command = 'dockermanager stopdockers %s' % os.path.basename( dockerplanfile) ret = client.execute(command, other_hosts) for k in ret: print '[%s] return: %s' % (k, ret[k].retval['result']) finally: if client: client.close() finally: # os.system('ip link del vxlan1') stopdockers(plandoc) os.system('rm -f %s' % lockfilename)
def __init__(self): self._config = ConfigDictionary()
def __init__(self): self._test = TestDirectory(os.getcwd(), None) self._stepsfiledoc = StepsFileDoc(self._test.stepsfile()) self._config = ConfigDictionary()
def startfield(args): this_hostname = Platform().hostname() config = ConfigDictionary() workdir = config.get('etce', 'WORK_DIRECTORY') workdir = os.getenv('WORKDIR', workdir) if not os.path.exists(workdir): raise DOCKERError('ETCE WORK_DIRECTORY "%s" not found. ' \ 'Please create it before starting.' % workdir) if args.dockerplanfile: dockerplanfile = args.dockerplanfile else: dockerplanfile = os.path.join(workdir, 'dockerplan.xml') plandoc = DOCKERPlanFileDoc(dockerplanfile) # lockfile lockfilename = \ os.path.join(workdir, 'etce.docker.lock') if os.path.isfile(lockfilename): err = 'Detected an active docker field with root at: %s. ' \ 'Run "etce-docker stop" first.' % \ plandoc.docker_root_directory(this_hostname) raise DOCKERError(err) cidr = os.getenv('CIDR', '10.99.0.0/16') containers = [] for hostname, _ in plandoc.hostnames(): for container in plandoc.containers(hostname): for bridgename, interfaceparams in container.interfaces.items(): if IPAddress(interfaceparams['ipv4']) in IPNetwork(cidr): containers.append( (container.docker_name, interfaceparams['ipv4'])) break ipexist = [] for _, ip in containers: ipexist.append(ip) my_ip = '' for ip in IPNetwork(cidr)[1:]: if not str(ip) in ipexist: my_ip = str(ip) break my_ip = my_ip + '/' + cidr.split('/')[1] # write to /etc/hosts in container/machine controller all external ip writehosts(plandoc, containers) hostfile = \ os.path.join(workdir, 'hosts') if not args.dryrun: shutil.copy(dockerplanfile, lockfilename) shutil.copy('/etc/hosts', hostfile) startdockers(plandoc, args.writehosts, args.forcedockerroot, args.dryrun) other_hosts = [] for hostname, ip in plandoc.hostnames(): if hostname != (this_hostname and 'localhost'): other_hosts.append(hostname) # start containers on other hosts, if any if other_hosts: client = None try: client = ClientBuilder().build(\ other_hosts, user=args.user, port=args.port, password=args.password) # push the file and execute client.put(dockerplanfile, '.', other_hosts, doclobber=True) # push the file client.put('/etc/hosts', '.', other_hosts, doclobber=True) # on the destination node the netplan file gets pushed to the # ETCE WORK_DIRECTORY command = 'dockermanager startdockers %s writehosts=%s forcedockerroot=%s' \ % (os.path.basename(dockerplanfile), args.writehosts, args.forcedockerroot) ret = client.execute(command, other_hosts) for k in ret: print '[%s] return: %s' % (k, ret[k].retval['result']) finally: if client: client.close() # A valid ETCE Test Directory. TESTDIRECTORY = os.path.join(workdir, 'pub-tdmact') # The output directory to place the built Test Directory. TESTROOT = os.path.join( workdir, TESTDIRECTORY + '_' + etce.utils.timestamp()) os.system('etce-test publish %s %s --verbose' % (TESTDIRECTORY, TESTROOT)) # A user tag to prepend to the name of each test result directory. TESTPREFIX = 'tdmact' # Run scenario order steps #if not args.collect: os.system( 'etce-test run --user root --policy autoadd -v --kill before --nocollect %s %s %s' % (TESTPREFIX, HOSTFILE, TESTROOT))
class SSHClient(etce.fieldclient.FieldClient): RETURNVALUE_OPEN_DEMARCATOR = '***********ETCESSH_RETURN_VALUE_START********************' RETURNVALUE_CLOSE_DEMARCATOR = '***********ETCESSH_RETURN_VALUE_STOP********************' def __init__(self, hosts, **kwargs): etce.fieldclient.FieldClient.__init__(self, hosts) self._connection_dict = {} self._execute_threads = [] # ssh authentication is revised (5/7/2019): # # As tested against paramiko 1.16 # # User must specify the ssh key file to use for authentication. They # can specify the key file explicitly with the sshkey parameter - # if the filename is not absolute, it is assumed to be a file located # in ~/.ssh. If sshkey is None, try to determine the key file from # ~/.ssh/config. If that also fails, check for the default ssh rsa # key ~/.ssh/id_rsa and attempt to use that. # # paramiko also allows provides a paramiko.agent.Agent class for # querying a running ssh-agent for its loaded keys. The agent # agent can be used: # # 1. by calling connect with allow_agent = True (the default) # 2. by calling Agent().get_keys() and passing to connect as pkey # # In the first case, the connect call selects the first key found # in the running agent and prompts for a passphrase - without indicating # the key it is prompting for. In the second case, the only identifying # information that can be obtained from an agent returned key object is # its md5 fingerprint - which is correct but not convenient for # helping the user select and identify the agent key to use. For these # reasons, ignore the agent for authentication and make the user identify # the key file(s) to use - preferable via there .ssh/config file. user = kwargs.get('user', None) port = kwargs.get('port', None) policystr = kwargs.get('policy', 'reject') sshkey = kwargs.get('sshkey', None) user_specified_key_file = None if sshkey: if sshkey[0] == '/': user_specified_key_file = sshkey else: user_specified_key_file = os.path.expanduser( os.path.join('~/.ssh', sshkey)) if not os.path.exists(user_specified_key_file): raise FieldConnectionError( 'sshkey "%s" doesn\'t exist. Quitting.' % \ user_specified_key_file) self._envfile = kwargs.get('envfile', None) self._config = ConfigDictionary() ssh_config_file = os.path.expanduser('~/.ssh/config') ssh_config = None if os.path.exists(ssh_config_file): ssh_config = paramiko.SSHConfig() ssh_config.parse(open(ssh_config_file)) authenticated_keys = {} policy = RejectPolicy if policystr == 'warning': policy = WarningPolicy elif policystr == 'autoadd': policy = AutoAddPolicy policy = self._set_unknown_hosts_policy(hosts, port, ssh_config, policy) for host in hosts: host_config = None if ssh_config: host_config = ssh_config.lookup(host) host_user = os.path.basename(os.path.expanduser('~')) if user: host_user = user elif host_config: host_user = host_config.get('user', host_user) host_port = 22 if port: host_port = port elif host_config: host_port = host_config.get('port', host_port) host_key_filenames = [] if user_specified_key_file: host_key_filenames = [user_specified_key_file] elif host_config: host_key_filenames = host_config.get('identityfile', host_key_filenames) if not host_key_filenames: default_rsa_keyfile = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa') if os.path.exists(default_rsa_keyfile) and os.path.isfile( default_rsa_keyfile): host_key_filenames = [default_rsa_keyfile] else: message = 'Unable to find an RSA SSH key associated with host "%s". '\ 'Either:\n\n' \ ' 1) specify a key using the "sshkey" option\n' \ ' 2) add a "Host" rule to your ~/.ssh/config file identifying the key\n' \ ' 3) create a default RSA key ~/.ssh/id_rsa".\n\n' \ 'Quitting.' % host raise FieldConnectionError(message) try: pkey = None for host_key_file in host_key_filenames: if host_key_file in authenticated_keys: pkey = authenticated_keys[host_key_file] else: pkey = None try: # Assume key is not passphrase protected first pkey = RSAKey.from_private_key_file( host_key_file, None) except PasswordRequiredException as pre: # if that fails, prompt for passphrase pkey = RSAKey.from_private_key_file( host_key_file, getpass.getpass('Enter passphrase for %s: ' % host_key_file)) authenticated_keys[host_key_file] = pkey break if not pkey: message = 'Unable to connect to host "%s", cannot authenticate. ' \ 'Quitting.' % host, raise FieldConnectionError(message) client = paramiko.SSHClient() client.load_system_host_keys() client.load_host_keys(os.path.expanduser('~/.ssh/known_hosts')) client.set_missing_host_key_policy(policy()) client.connect(hostname=host, username=host_user, port=int(host_port), pkey=pkey, allow_agent=False) self._connection_dict[host] = client except socket.gaierror as ge: message = '%s "%s". Quitting.' % (ge.strerror, host) raise FieldConnectionError(message) except paramiko.ssh_exception.NoValidConnectionsError as e: raise FieldConnectionError('Unable to connect to host "%s", ' \ 'NoValidConnectionsError. Quitting.' % host) except Exception as e: raise FieldConnectionError(e) def sourceisdestination(self, host, srcfilename, dstfilename): if srcfilename == dstfilename: p = Platform() if p.hostname_has_local_address(host): return True return False def put(self, localsrc, remotedst, hosts, doclobber=False, minclobberdepth=2): # this is intended to work like 'cp -R src dstdir' where src # can be a file name or directory name (relative or absolute path) # and destination is always a directory. dstdir can be relative # or absolute also, but it is rooted at WORK_DIRECTORY on the receiving # nodes. Examples: # # src is a file # src=foo.txt, dst='/': moves foo.txt to WORK_DIRECTORY/foo.txt # src=/home/bar/foo.txt, dst='/': moves foo.txt to WORK_DIRECTORY/foo.txt # src is a directory # src=./foo/bar, dst='bar': moves bar to WORK_DIRECTORY/bar/bar # src=/opt/foo/bar, dst='bar': moves bar to WORK_DIRECTORY/bar/bar # remotesubdir = self._normalize_remotedst(remotedst) srcdir, srcbase = self._normalize_split_localsubdir(localsrc) if not os.path.exists(localsrc): raise RuntimeError('Error: "%s" doesn\'t exist. Quitting.' % srcbase) srctar = '' cwd = os.getcwd() try: # move to directory containing the src if len(srcdir) > 0: os.chdir(srcdir) # eliminate cases where src and dst are same path on same host # this is local directory that we are putting tmppath = os.getcwd() abssrc = os.path.join(tmppath, srcbase) # this is where this node would resolve the put location if it # were a receiver etcedir = self._config.get('etce', 'WORK_DIRECTORY') tmpsubdir = remotesubdir if tmpsubdir == '.': tmpsubdir = '' absdst = os.path.join(etcedir, tmpsubdir, srcbase) dsthosts = [] # only move when not same host and same directory for host in hosts: if self.sourceisdestination(host, abssrc, absdst): print( 'Skipping host "%s". Source and destination are the same.' % host) continue dsthosts.append(host) # continue if we have at least one non self host if len(dsthosts) == 0: return # first step, move the tar file to remote /tmp srctar = etce.utils.tarzip([srcbase]) abssrctar = os.path.join(os.getcwd(), srctar) absdsttar = os.path.join('/tmp', srctar) threads = [] for host in dsthosts: # create name of tar file on destination if host in self._connection_dict: threads.append( PutThread(self._connection_dict[host], abssrctar, absdsttar, host)) for t in threads: t.start() for t in threads: t.join() # now extract tarfile on dst hosts to the output path deletetar = True command = 'utils untarzip %s %s %s %d %s' % ( absdsttar, remotesubdir, str(doclobber), minclobberdepth, str(deletetar)) self.execute(command, hosts) finally: if os.path.exists(srctar): os.remove(srctar) os.chdir(cwd) def interrupt(self): for thread in self._execute_threads: thread.interrupt() def execute(self, commandstr, hosts, workingdir=None): # execute an etce command over ssh self._execute_threads = [] fullcommandstr = '' if self._envfile is not None: fullcommandstr += '. %s; ' % self._envfile fullcommandstr += 'etce-field-exec ' if not workingdir is None: fullcommandstr += '--cwd %s ' % workingdir fullcommandstr += commandstr for host in hosts: host_fullcommandstr = 'export HOSTNAME=%s; ' % host + fullcommandstr if host in self._connection_dict: self._execute_threads.append( ExecuteThread(self._connection_dict[host], host_fullcommandstr, host)) # start the threads for t in self._execute_threads: t.start() # collect the return objects and monitor for exception returnobjs = {} exception = False keyboard_interrupt = False for t in self._execute_threads: # cycle on join to allow keyboard interrupts # to occur immediately while t.isAlive(): t.join(5.0) returnobjs[t.name] = t.returnobject() if returnobjs[t.name].retval['isexception']: exception = True elif returnobjs[t.name].keyboard_interrupt: keyboard_interrupt = True # raise an exception if any return object is an exception if exception: raise ETCEExecuteException(returnobjs) if keyboard_interrupt: raise KeyboardInterrupt() # return in error free case return returnobjs def collect(self, remotesrc, localdstdir, hosts): if len(hosts) == 0: print(' Warning: no hosts.') return remotesubdir = self._normalize_remotesrc(remotesrc) srchosts = [] # make the destination if it does not exist if not os.path.exists(localdstdir) or not os.path.isdir(localdstdir): print('Warning: local directory "%s" does not exist. Will attempt to make.' % \ localdstdir) os.makedirs(localdstdir) srchosts = hosts else: # eliminate cases where src and dst are same path on same host # abssrc is where transfer would come from is this host is # among the remote hosts etcedir = self._config.get('etce', 'WORK_DIRECTORY') abssrc = os.path.join(etcedir, remotesubdir) # figure out absolute name of local destination cwd = os.getcwd() os.chdir(localdstdir) absroot = os.getcwd() os.chdir(cwd) absdst = os.path.join(absroot, os.path.basename(remotesubdir)) for host in hosts: if self.sourceisdestination(host, abssrc, absdst): print( ' Skipping host "%s". Source and destination are the same.' % host) continue srchosts.append(host) if not srchosts: # No hosts to pull files from return # prep the items to fetch - tar them up and get their names retvals = self.execute('utils prepfiles %s' % remotesrc, srchosts) tarfiles = {} for host in retvals: if retvals[host].retval['result'] is not None: tarfiles[host] = retvals[host].retval['result'] if not tarfiles: print(' Warning: no files to transfer.') return # Retrieve and extract data from each remote host removers = [] for host, tfile in tarfiles.items(): host_is_local = False if os.path.exists(tfile): # ignore file if it is already on local machine host_is_local = True getter = GetThread(self._connection_dict[host], tfile, os.path.join('/tmp', os.path.basename(tfile)), host) getter.start() getter.join() ltf = os.path.join('/tmp', os.path.basename(tfile)) if not os.path.exists(ltf) or not os.path.isfile(ltf): raise RuntimeError('%s does not exist' % ltf) tf = None try: command = 'etce-field-exec platform rmfile %s' % tfile if self._envfile is not None: command = '. %s; %s' % (self._envfile, command) # also set up a thread to remove the tarfile on remotes removers.append( ExecuteThread(self._connection_dict[host], command, host)) absolute_localdstdir = localdstdir if not absolute_localdstdir[0] == '/': absolute_localdstdir = os.path.join(localdstdir, remotesrc) if host_is_local and os.path.exists(absolute_localdstdir): print('Skipping collection from local host "%s".' % host) else: print('Collecting files from host "%s" to "%s".' % (host, localdstdir)) tf = tarfile.open(ltf, 'r:gz') tf.extractall(localdstdir) tf.close() finally: os.remove(ltf) # execute the remove threads for t in removers: t.start() # collect the return objects and monitor for exception returnobjs = {} exception = False for t in removers: t.join() returnobjs[t.name] = t.returnobject() if returnobjs[t.name].retval['isexception']: exception = True # raise an exception if any return object is an exception if exception: raise ETCEExecuteException(returnobjs) def _normalize_remotesrc(self, remotesubdir): subdirre = re.compile(r'\w+(?:/\w*)*') if not subdirre.match(remotesubdir): raise ValueError('Error: %s is not a valid source' % remotesubdir) tmpsubdir = remotesubdir while tmpsubdir.rfind('/') == (len(tmpsubdir) - 1): tmpsubdir = tmpsubdir[:-1] return tmpsubdir def _normalize_remotedst(self, remotesubdir): subdir = remotesubdir.strip() if len(subdir) == 0 or subdir == '.': return '.' subdirre = re.compile(r'(?:\./)?(\w+/?)+') match = subdirre.match(subdir) if not match: raise ValueError('Error: %s is not a valid destination' % remotesubdir) if subdir[-1] == '/': subdir == subdir[:-1] if subdir[0] == '.': subdir = subdir[2:] # '..' not permitted in destination if '..' in subdir.split('/'): raise ValueError('Error: ".." not permitted in destination path') # for simplicity, disallow '.' in remotedst also if '.' in subdir.split('/'): raise ValueError( 'Error: "." not permitted in multi-level destination path') return subdir def _normalize_split_localsubdir(self, localsubdir): srcbase = os.path.basename(localsubdir) srcdir = os.path.dirname(localsubdir) # disallow . or .. as srcbase or in srcdir if srcbase == '..' or srcbase == '.': raise ValueError('Error: src cannot be ".." or "."') if len(srcbase) == 0: raise ValueError('Error: No source specified') if '..' in srcdir.split('/'): raise ValueError('Error: ".." not permitted in src') if '.' in srcdir.split('/'): raise ValueError('Error: "." not permitted is src') return (srcdir, srcbase) def _set_unknown_hosts_policy(self, hosts, port, ssh_config, policy): known_hosts_filename = os.path.expanduser('~/.ssh/known_hosts') if not os.path.exists(known_hosts_filename) or \ not os.path.isfile(known_hosts_filename): raise FieldConnectionError( 'Error: ~/.ssh/known_hosts file does not exist, ' \ 'please create it.') all_host_keys = paramiko.util.load_host_keys(known_hosts_filename) # build list of hosts that don't have an ssh-rsa entry in known_hosts unknown_hosts = [] for host in hosts: host_config = None if ssh_config: host_config = ssh_config.lookup(host) host_port = 22 if port: host_port = port elif host_config: host_port = host_config.get('port', host_port) # try host and [host]:port as keys to check in known_hosts as # format depends on ssh version keys_to_check = set([host, '[%s]:%d' % (host, int(host_port))]) found_keys = keys_to_check.intersection(set(all_host_keys.keys())) if not found_keys: unknown_hosts.append(host) else: host_keys = all_host_keys.get(sorted(found_keys)[0], None) rsakey = host_keys.get('ssh-rsa', None) if not rsakey: unknown_hosts.append(host) # if we found an unknown host and we're configured to reject, ask user for permission to add if unknown_hosts and (policy == RejectPolicy): unknown_hosts_str = '{' + ', '.join(sorted(unknown_hosts)) + '}' response = raw_input( 'Unknown hosts: %s. Add to known_hosts (Y/N) [N]? ' % unknown_hosts_str) if not response.upper() == 'Y': print('Quitting.', file=sys.stderr) exit(1) return AutoAddPolicy return policy def close(self): for host in self._connection_dict: self._connection_dict[host].close()
class Publisher(object): def __init__(self, test_directory): self._test_directory = test_directory test_filename_abs = os.path.join(self._test_directory, TestDirectory.TESTFILENAME) self._testdoc = TestFileDoc(test_filename_abs) self._config = ConfigDictionary() def merge_with_base(self, mergedir, absbasedir_override=None, extrafiles=[]): ''' Merge the files from the basedirectory (if there is one), the test directory and the extrafiles to the merge directory. ''' if not mergedir: raise ValueError('A merge directory must be specified in merging ' \ 'a test directory with its base directory. ' \ 'Quitting.') # Quit if the merge directory already exists. This also handles the case # where mergedir is the same as the self._test_directory if os.path.exists(mergedir): print('Merge directory "%s" already exists, skipping merge.' % mergedir, file=sys.stderr) return srcdirs = [self._test_directory] # choose the base directory for the merge base_directory = None if absbasedir_override: base_directory = absbasedir_override elif self._testdoc.has_base_directory: # test.xml file base directory is permitted to be relative or absolute if self._testdoc.base_directory[0] == os.path.sep: base_directory = self._testdoc.base_directory else: base_directory = os.path.join(self._test_directory, self._testdoc.base_directory) else: # merge devolves to copying the test directory to merge directory pass # check that the base directory exists if base_directory: if os.path.isdir(base_directory): srcdirs.insert(0, base_directory) else: errstr = 'In merging test directory "%s", cannot find base directory "%s". Quitting.' % \ (self._test_directory, base_directory) raise ValueError(errstr) # move the files in basedirectory, then test directory for srcdir in srcdirs: if srcdir[-1] == '/': srcdir = srcdir[:-1] subfiles = self._get_subfiles(srcdir) for subfile in subfiles: srcfile = os.path.join(srcdir, subfile) dstfile = os.path.join(mergedir, subfile) dirname = os.path.dirname(dstfile) if not os.path.exists(dirname): os.makedirs(dirname) if subfile == TestDirectory.TESTFILENAME: self._testdoc.rewrite_without_base_directory(dstfile) else: shutil.copyfile(srcfile, dstfile) self._move_extra_files(extrafiles, mergedir) # check for corner case where a template directory is specified # in test file but is empty. Issue a warning, but move # it to the mergedirectory anyway. self._warn_on_empty_template_directory(srcdirs, mergedir) def _move_extra_files(self, extrafiles, dstdir): for srcfile, dstfile in extrafiles: dstfile = os.path.join(dstdir, dstfile) dirname = os.path.dirname(dstfile) if not os.path.exists(dirname): os.makedirs(dirname) shutil.copyfile(srcfile, dstfile) def _warn_on_empty_template_directory(self, srcdirs, mergedir): template_directory_names = self._testdoc.template_directory_names nonexistent_template_directories = set([]) empty_template_directories = set([]) for template_directory_name in template_directory_names: empty = True exists = False for srcdir in srcdirs: dir_to_test = os.path.join(srcdir, template_directory_name) if not os.path.exists(dir_to_test): continue exists = True if os.listdir(dir_to_test): empty = False if not exists: nonexistent_template_directories.update( [template_directory_name]) if empty: empty_template_directories.update([template_directory_name]) if nonexistent_template_directories: errstr = 'Missing template directories {%s}. Quitting.' % \ ', '.join(list(nonexistent_template_directories)) raise ValueError(errstr) for empty_template_directory in empty_template_directories: print('Warning: template directory "%s" is empty.' \ % empty_template_directory, file=sys.stderr) os.makedirs(os.path.join(mergedir, empty_template_directory)) def publish(self, publishdir, logdir=None, absbasedir_override=None, runtime_overlays={}, extrafiles=[], overwrite_existing_publishdir=False): '''Publish the directory described by the testdirectory and its test.xml file to the destination directory. 1. First instantiate template directories and files named by test.xml. 2. Move other files from the test directory to the destination (filling in overlays). 3. Move extra files, specified by the caller, to the destination. publish combines the files from the test directory and the (optional) base directory to the destination. ''' srcdirs = [self._test_directory] if absbasedir_override: srcdirs.insert(0, absbasedir_override) elif self._testdoc.has_base_directory: # test.xml file base directory is permitted to be relative or absolute if self._testdoc.base_directory[0] == os.path.sep: srcdirs.insert(0, self._testdoc.base_directory) else: srcdirs.insert( 0, os.path.join(self._test_directory, self._testdoc.base_directory)) templates = self._testdoc.templates subdirectory_map = {} for srcdir in srcdirs: subdirectory_map.update(self._build_subdirectory_map(srcdir)) subdirectory_map = self._prune_unused_template_directories( subdirectory_map) etce_config_overlays, env_overlays = self._get_host_and_env_overlays() testfile_global_overlays = self._testdoc.global_overlays( subdirectory_map) print() print('Publishing %s to %s' % (self._testdoc.name, publishdir)) if os.path.exists(publishdir): if overwrite_existing_publishdir: shutil.rmtree(publishdir) else: errstr = 'ERROR: destination dir "%s" already exists.' \ % publishdir raise ValueError(errstr) os.makedirs(publishdir) # move template files self._instantiate_templates(templates, runtime_overlays, env_overlays, etce_config_overlays, publishdir, subdirectory_map, logdir) # and then the remaining files self._move(subdirectory_map, runtime_overlays, env_overlays, testfile_global_overlays, etce_config_overlays, publishdir, logdir) self._move_extra_files(extrafiles, publishdir) def _get_host_and_env_overlays(self): # Assemble overlays from # 1. etce.conf etce_config_overlays = {} for k, v in self._config.items('overlays'): etce_config_overlays[k] = etce.utils.configstrtoval(v) # 3. overlay set by environment variables, identified by # etce.conf ENV_OVERLAYS_ALLOW env_overlays = {} env_overlays_allow = self._config.get('etce', 'ENV_OVERLAYS_ALLOW', '') if len(env_overlays_allow): for overlay in env_overlays_allow.split(':'): if overlay in os.environ: env_overlays[overlay] = etce.utils.configstrtoval( os.environ[overlay]) return (etce_config_overlays, env_overlays) def _prune_unused_template_directories(self, subdirectory_map): directory_templates_used_by_test = self._testdoc.template_directory_names all_template_directory_keys = set([ entry.root_sub_entry for entry in subdirectory_map.values() if entry.template_directory_member ]) directory_templates_not_used_by_test = \ all_template_directory_keys.difference(directory_templates_used_by_test) rmpaths = [] for unused in directory_templates_not_used_by_test: for subpath in subdirectory_map: if subpath.startswith(unused + '/'): rmpaths.append(subpath) map(subdirectory_map.pop, rmpaths) return subdirectory_map def _move(self, subdirectory_map, runtime_overlays, env_overlays, testfile_global_overlays, etce_config_overlays, publishdir, logdir): skipfiles = (TestDirectory.CONFIGFILENAME, TestDirectory.HOSTFILENAME) omitdirs = (TestDirectory.DOCSUBDIRNAME, ) for relname, entry in subdirectory_map.items(): if entry.root_sub_entry in omitdirs: continue # full path to the first level entry first_level_entry_abs = entry.root_sub_entry_absolute reserved_overlays = {} # first_level_entry is a nodename if it is a directory if entry.root_sub_entry_is_dir: reserved_overlays = {'etce_hostname': entry.root_sub_entry} if logdir: reserved_overlays['etce_log_path'] = os.path.join( logdir, entry.root_sub_entry) # for non-template file, overlays maps are searched in precedence: # # 1. reserved overlays # 2. runtime overlays (passed in by user or calling function) # 3. overlays set by environment variable # 4. overlays set in the test.xml file that apply to all files (at # the top level) # 5. overlays set in the etce.conf "overlays" section - default # values overlays = ChainMap(reserved_overlays, runtime_overlays, env_overlays, testfile_global_overlays, etce_config_overlays) fulldstfile = os.path.join(publishdir, relname) dstfiledir = os.path.dirname(fulldstfile) if not os.path.exists(dstfiledir): os.makedirs(dstfiledir) if relname == TestDirectory.TESTFILENAME: self._testdoc.rewrite_without_overlays_and_templates( fulldstfile) elif relname in skipfiles: shutil.copyfile(entry.full_name, fulldstfile) else: format_file(entry.full_name, fulldstfile, overlays) def _instantiate_templates(self, templates, runtime_overlays, env_overlays, etce_config_overlays, publishdir, subdirectory_map, logdir): for template in templates: subdirectory_map = template.instantiate(subdirectory_map, publishdir, logdir, runtime_overlays, env_overlays, etce_config_overlays) def _get_subfiles(self, directory): files = [] for dirname, dirnames, filenames in os.walk(directory): for filename in filenames: fullpath = os.path.join(dirname, filename) relpath = os.path.relpath(fullpath, directory) files.append(relpath) return files def _build_subdirectory_map(self, directory): subfiles = {} for dirname, dirnames, filenames in os.walk(directory): for filename in filenames: fullpath = os.path.join(dirname, filename) relpath = os.path.relpath(fullpath, directory) subfiles[relpath] = TestDirectoryEntry(directory, relpath) return subfiles
def __init__(self, hosts, **kwargs): etce.fieldclient.FieldClient.__init__(self, hosts) self._connection_dict = {} self._execute_threads = [] user = kwargs.get('user', None) port = kwargs.get('port', None) key_filenames = None self._envfile = kwargs.get('envfile', None) self._config = ConfigDictionary() ssh_config_file = os.path.expanduser('~/.ssh/config') ssh_config = None if os.path.exists(ssh_config_file): ssh_config = paramiko.SSHConfig() ssh_config.parse(open(ssh_config_file)) for host in hosts: host_config = None if ssh_config: host_config = ssh_config.lookup(host) host_user = os.path.basename(os.path.expanduser('~')) host_port = 22 host_key_filenames = [] if user: host_user = user elif host_config: host_user = host_config.get('user', host_user) if port: host_port = port elif host_config: host_port = host_config.get('port', host_port) if key_filenames: host_key_filenames = key_filenames elif host_config: host_key_filenames = host_config.get('identityfile', host_key_filenames) try: client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.client.RejectPolicy()) client.load_system_host_keys() self._connection_dict[host] = client self._connection_dict[host].connect(hostname=host, username=host_user, port=int(host_port), key_filename=host_key_filenames, allow_agent=True) except socket.gaierror as ge: message = '%s "%s". Quitting.' % (ge.strerror, host) raise FieldConnectionError(message) except paramiko.ssh_exception.NoValidConnectionsError as e: raise FieldConnectionError('Unable to connect to host "%s". Quitting.' % host) except Exception as e: message = 'Unable to connect to host "%s" (%s). Quitting.' % (host, str(e)) raise FieldConnectionError(message)
class SSHClient(etce.fieldclient.FieldClient): RETURNVALUE_OPEN_DEMARCATOR='***********ETCESSH_RETURN_VALUE_START********************' RETURNVALUE_CLOSE_DEMARCATOR='***********ETCESSH_RETURN_VALUE_STOP********************' def __init__(self, hosts, **kwargs): etce.fieldclient.FieldClient.__init__(self, hosts) self._connection_dict = {} self._execute_threads = [] user = kwargs.get('user', None) port = kwargs.get('port', None) key_filenames = None self._envfile = kwargs.get('envfile', None) self._config = ConfigDictionary() ssh_config_file = os.path.expanduser('~/.ssh/config') ssh_config = None if os.path.exists(ssh_config_file): ssh_config = paramiko.SSHConfig() ssh_config.parse(open(ssh_config_file)) for host in hosts: host_config = None if ssh_config: host_config = ssh_config.lookup(host) host_user = os.path.basename(os.path.expanduser('~')) host_port = 22 host_key_filenames = [] if user: host_user = user elif host_config: host_user = host_config.get('user', host_user) if port: host_port = port elif host_config: host_port = host_config.get('port', host_port) if key_filenames: host_key_filenames = key_filenames elif host_config: host_key_filenames = host_config.get('identityfile', host_key_filenames) try: client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.client.RejectPolicy()) client.load_system_host_keys() self._connection_dict[host] = client self._connection_dict[host].connect(hostname=host, username=host_user, port=int(host_port), key_filename=host_key_filenames, allow_agent=True) except socket.gaierror as ge: message = '%s "%s". Quitting.' % (ge.strerror, host) raise FieldConnectionError(message) except paramiko.ssh_exception.NoValidConnectionsError as e: raise FieldConnectionError('Unable to connect to host "%s". Quitting.' % host) except Exception as e: message = 'Unable to connect to host "%s" (%s). Quitting.' % (host, str(e)) raise FieldConnectionError(message) def sourceisdestination(self, host, srcfilename, dstfilename): if srcfilename == dstfilename: p = Platform() if p.hostname_has_local_address(host): return True return False def put(self, localsrc, remotedst, hosts, doclobber=False, minclobberdepth=2): # this is intended to work like 'cp -R src dstdir' where src # can be a file name or directory name (relative or absolute path) # and destination is always a directory. dstdir can be relative # or absolute also, but it is rooted at WORK_DIRECTORY on the receiving # nodes. Examples: # # src is a file # src=foo.txt, dst='/': moves foo.txt to WORK_DIRECTORY/foo.txt # src=/home/bar/foo.txt, dst='/': moves foo.txt to WORK_DIRECTORY/foo.txt # src is a directory # src=./foo/bar, dst='bar': moves bar to WORK_DIRECTORY/bar/bar # src=/opt/foo/bar, dst='bar': moves bar to WORK_DIRECTORY/bar/bar # remotesubdir = self._normalize_remotedst(remotedst) srcdir,srcbase = self._normalize_split_localsubdir(localsrc) if not os.path.exists(localsrc): raise RuntimeError('Error: "%s" doesn\'t exist. Quitting.' % srcbase) srctar = '' cwd = os.getcwd() try: # move to directory containing the src if len(srcdir) > 0: os.chdir(srcdir) # eliminate cases where src and dst are same path on same host # this is local directory that we are putting tmppath = os.getcwd() abssrc = os.path.join(tmppath, srcbase) # this is where this node would resolve the put location if it # were a receiver etcedir = self._config.get('etce', 'WORK_DIRECTORY') tmpsubdir = remotesubdir if tmpsubdir == '.': tmpsubdir = '' absdst = os.path.join(etcedir, tmpsubdir, srcbase) dsthosts = [] # only move when not same host and same directory for host in hosts: if self.sourceisdestination(host, abssrc, absdst): print 'Skipping host "%s". Source and destination are the same.' % host continue dsthosts.append(host) # continue if we have at least one non self host if len(dsthosts) == 0: return # first step, move the tar file to remote /tmp srctar = etce.utils.tarzip([srcbase]) abssrctar = os.path.join(os.getcwd(), srctar) absdsttar = os.path.join('/tmp', srctar) threads = [] for host in dsthosts: # create name of tar file on destination if host in self._connection_dict: threads.append(PutThread(self._connection_dict[host], abssrctar, absdsttar, host)) for t in threads: t.start() for t in threads: t.join() # now extract tarfile on dst hosts to the output path deletetar = True command = 'utils untarzip %s %s %s %d %s' % (absdsttar, remotesubdir, str(doclobber), minclobberdepth, str(deletetar)) self.execute(command, hosts) finally: if os.path.exists(srctar): os.remove(srctar) os.chdir(cwd) def interrupt(self): for thread in self._execute_threads: thread.interrupt() def execute(self, commandstr, hosts, workingdir=None): # execute an etce command over ssh self._execute_threads = [] fullcommandstr = '' if self._envfile is not None: fullcommandstr += '. %s; ' % self._envfile fullcommandstr += 'etce-field-exec ' if not workingdir is None: fullcommandstr += '--cwd %s ' % workingdir fullcommandstr += commandstr for host in hosts: host_fullcommandstr = 'export HOSTNAME=%s; ' % host + fullcommandstr if host in self._connection_dict: self._execute_threads.append(ExecuteThread(self._connection_dict[host], host_fullcommandstr, host)) # start the threads for t in self._execute_threads: t.start() # collect the return objects and monitor for exception returnobjs = {} exception = False keyboard_interrupt = False for t in self._execute_threads: # cycle on join to allow keyboard interrupts # to occur immediately while t.isAlive(): t.join(5.0) returnobjs[t.name] = t.returnobject() if returnobjs[t.name].retval['isexception']: exception = True elif returnobjs[t.name].keyboard_interrupt: keyboard_interrupt = True # raise an exception if any return object is an exception if exception: raise ETCEExecuteException(returnobjs) if keyboard_interrupt: raise KeyboardInterrupt() # return in error free case return returnobjs def collect(self, remotesrc, localdstdir, hosts): print 'Collecting files from hosts "%s" to "%s."' % (', '.join(hosts), localdstdir) if len(hosts) == 0: print ' Warning: no hosts.' return remotesubdir = self._normalize_remotesrc(remotesrc) srchosts = [] # make the destination if it does not exist if not os.path.exists(localdstdir) or not os.path.isdir(localdstdir): print 'Warning: local directory "%s" does not exist. Will attempt to make.' % \ localdstdir os.makedirs(localdstdir) srchosts = hosts else: # eliminate cases where src and dst are same path on same host # abssrc is where transfer would come from is this host is # among the remote hosts etcedir = self._config.get('etce', 'WORK_DIRECTORY') abssrc = os.path.join(etcedir, remotesubdir) # figure out absolute name of local destination cwd = os.getcwd() os.chdir(localdstdir) absroot = os.getcwd() os.chdir(cwd) absdst = os.path.join(absroot, os.path.basename(remotesubdir)) for host in hosts: if self.sourceisdestination(host, abssrc, absdst): print ' Skipping host "%s". Source and destination are the same.' % host continue srchosts.append(host) if not srchosts: # No hosts to pull files from return # prep the items to fetch - tar them up and get their names retvals = self.execute('utils prepfiles %s' % remotesrc, srchosts) tarfiles = {} for host in retvals: if retvals[host].retval['result'] is not None: tarfiles[host] = retvals[host].retval['result'] if not tarfiles: print ' Warning: no files to transfer.' return # Create GetThread for the hosts that have a tarfile to transfer threads = [ GetThread(self._connection_dict[host], tfile, os.path.join('/tmp',os.path.basename(tfile)), host) for host,tfile in tarfiles.items() ] for t in threads: t.start() for t in threads: t.join() # extract the tarfiles to the dst directory threads = [] for h,tfile in tarfiles.items(): ltf = os.path.join('/tmp', os.path.basename(tfile)) if not os.path.exists(ltf) or not os.path.isfile(ltf): raise RuntimeError('%s does not exist' % ltf) tf = None try: tf = tarfile.open(ltf, 'r:gz') tf.extractall(localdstdir) command = 'etce-field-exec platform rmfile %s' % tfile if self._envfile is not None: command = '. %s; %s' % (self._envfile, command) # also set up a thread to remove the tarfile on remotes threads.append(ExecuteThread(self._connection_dict[host], command, host)) finally: if not tf is None: tf.close() os.remove(ltf) # execute the remove threads for t in threads: t.start() # collect the return objects and monitor for exception returnobjs = {} exception = False for t in threads: t.join() returnobjs[t.name] = t.returnobject() if returnobjs[t.name].retval['isexception']: exception = True # raise an exception if any return object is an exception if exception: raise ETCEExecuteException(returnobjs) def _normalize_remotesrc(self, remotesubdir): subdirre = re.compile(r'\w+(?:/\w*)*') if not subdirre.match(remotesubdir): raise ValueError('Error: %s is not a valid source' % remotesubdir) tmpsubdir = remotesubdir while tmpsubdir.rfind('/') == (len(tmpsubdir) - 1): tmpsubdir = tmpsubdir[:-1] return tmpsubdir def _normalize_remotedst(self, remotesubdir): subdir = remotesubdir.strip() if len(subdir) == 0 or subdir == '.': return '.' subdirre = re.compile(r'(?:\./)?(\w+/?)+') match = subdirre.match(subdir) if not match: raise ValueError('Error: %s is not a valid destination' % remotesubdir) if subdir[-1] == '/': subdir == subdir[:-1] if subdir[0] == '.': subdir = subdir[2:] # '..' not permitted in destination if '..' in subdir.split('/'): raise ValueError('Error: ".." not permitted in destination path') # for simplicity, disallow '.' in remotedst also if '.' in subdir.split('/'): raise ValueError('Error: "." not permitted in multi-level destination path') return subdir def _normalize_split_localsubdir(self, localsubdir): srcbase = os.path.basename(localsubdir) srcdir = os.path.dirname(localsubdir) # disallow . or .. as srcbase or in srcdir if srcbase == '..' or srcbase == '.': raise ValueError('Error: src cannot be ".." or "."') if len(srcbase) == 0: raise ValueError('Error: No source specified') if '..' in srcdir.split('/'): raise ValueError('Error: ".." not permitted in src') if '.' in srcdir.split('/'): raise ValueError('Error: "." not permitted is src') return srcdir,srcbase def close(self): for host in self._connection_dict: self._connection_dict[host].close()
def __init__(self, hosts, **kwargs): etce.fieldclient.FieldClient.__init__(self, hosts) self._connection_dict = {} self._execute_threads = [] # ssh authentication is revised (5/7/2019): # # As tested against paramiko 1.16 # # User must specify the ssh key file to use for authentication. They # can specify the key file explicitly with the sshkey parameter - # if the filename is not absolute, it is assumed to be a file located # in ~/.ssh. If sshkey is None, try to determine the key file from # ~/.ssh/config. If that also fails, check for the default ssh rsa # key ~/.ssh/id_rsa and attempt to use that. # # paramiko also allows provides a paramiko.agent.Agent class for # querying a running ssh-agent for its loaded keys. The agent # agent can be used: # # 1. by calling connect with allow_agent = True (the default) # 2. by calling Agent().get_keys() and passing to connect as pkey # # In the first case, the connect call selects the first key found # in the running agent and prompts for a passphrase - without indicating # the key it is prompting for. In the second case, the only identifying # information that can be obtained from an agent returned key object is # its md5 fingerprint - which is correct but not convenient for # helping the user select and identify the agent key to use. For these # reasons, ignore the agent for authentication and make the user identify # the key file(s) to use - preferable via there .ssh/config file. user = kwargs.get('user', None) port = kwargs.get('port', None) policystr = kwargs.get('policy', 'reject') sshkey = kwargs.get('sshkey', None) user_specified_key_file = None if sshkey: if sshkey[0] == '/': user_specified_key_file = sshkey else: user_specified_key_file = os.path.expanduser( os.path.join('~/.ssh', sshkey)) if not os.path.exists(user_specified_key_file): raise FieldConnectionError( 'sshkey "%s" doesn\'t exist. Quitting.' % \ user_specified_key_file) self._envfile = kwargs.get('envfile', None) self._config = ConfigDictionary() ssh_config_file = os.path.expanduser('~/.ssh/config') ssh_config = None if os.path.exists(ssh_config_file): ssh_config = paramiko.SSHConfig() ssh_config.parse(open(ssh_config_file)) authenticated_keys = {} policy = RejectPolicy if policystr == 'warning': policy = WarningPolicy elif policystr == 'autoadd': policy = AutoAddPolicy policy = self._set_unknown_hosts_policy(hosts, port, ssh_config, policy) for host in hosts: host_config = None if ssh_config: host_config = ssh_config.lookup(host) host_user = os.path.basename(os.path.expanduser('~')) if user: host_user = user elif host_config: host_user = host_config.get('user', host_user) host_port = 22 if port: host_port = port elif host_config: host_port = host_config.get('port', host_port) host_key_filenames = [] if user_specified_key_file: host_key_filenames = [user_specified_key_file] elif host_config: host_key_filenames = host_config.get('identityfile', host_key_filenames) if not host_key_filenames: default_rsa_keyfile = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa') if os.path.exists(default_rsa_keyfile) and os.path.isfile( default_rsa_keyfile): host_key_filenames = [default_rsa_keyfile] else: message = 'Unable to find an RSA SSH key associated with host "%s". '\ 'Either:\n\n' \ ' 1) specify a key using the "sshkey" option\n' \ ' 2) add a "Host" rule to your ~/.ssh/config file identifying the key\n' \ ' 3) create a default RSA key ~/.ssh/id_rsa".\n\n' \ 'Quitting.' % host raise FieldConnectionError(message) try: pkey = None for host_key_file in host_key_filenames: if host_key_file in authenticated_keys: pkey = authenticated_keys[host_key_file] else: pkey = None try: # Assume key is not passphrase protected first pkey = RSAKey.from_private_key_file( host_key_file, None) except PasswordRequiredException as pre: # if that fails, prompt for passphrase pkey = RSAKey.from_private_key_file( host_key_file, getpass.getpass('Enter passphrase for %s: ' % host_key_file)) authenticated_keys[host_key_file] = pkey break if not pkey: message = 'Unable to connect to host "%s", cannot authenticate. ' \ 'Quitting.' % host, raise FieldConnectionError(message) client = paramiko.SSHClient() client.load_system_host_keys() client.load_host_keys(os.path.expanduser('~/.ssh/known_hosts')) client.set_missing_host_key_policy(policy()) client.connect(hostname=host, username=host_user, port=int(host_port), pkey=pkey, allow_agent=False) self._connection_dict[host] = client except socket.gaierror as ge: message = '%s "%s". Quitting.' % (ge.strerror, host) raise FieldConnectionError(message) except paramiko.ssh_exception.NoValidConnectionsError as e: raise FieldConnectionError('Unable to connect to host "%s", ' \ 'NoValidConnectionsError. Quitting.' % host) except Exception as e: raise FieldConnectionError(e)