def configure_orchestra(s): ''' Create the appropriate profiles and system configurations to the orchestra server for the system we are going to provision. ''' cdebug('Enter MetalProvisioner::configure_orchestra') if not s.quiet: print('Configure the Orchestra server') o = Configuration['systems'][s.target]['provisioner'] o_series = o['series'][s.series] o_arch = o['arch'][s.arch] distro = '%s%s-%s' % (s.series, o_series['server distro decoration'], o_arch) kickstart = path.join(s.kickstarts_root, 'kt-%s.preseed' % (o_series['preseed'])) repos = "'%s-%s %s-%s-security'" % (s.series, o_arch, s.series, o_arch) server = o['server'] t = Configuration['systems'][s.target] if not s.quiet: print(' remove profile \'%s\'' % s.name) ssh('%s@%s' % ('kernel', server), 'sudo cobbler profile remove --name=%s' % (s.name)) if not s.quiet: print(' adding profile \'%s\' with series: %s arch: %s' % (s.name, s.series, s.arch)) ssh('%s@%s' % ('kernel', server), 'sudo cobbler profile add --name=%s --distro=%s --kickstart=%s --repos=%s' % (s.name, distro, kickstart, repos)) if not s.quiet: print(' adding system \'%s\'' % (s.name)) ssh('%s@%s' % ('kernel', server), 'sudo cobbler system add --name=%s --profile=%s --hostname=%s --mac=%s' % (s.name, s.name, s.name, t['mac address'])) cdebug('Leave MetalProvisioner::configure_orchestra')
def verify_xen_target(s): center('Metal::verify_xen_target') retval = False s.progress('Verifying Xen install') if s.series == 'lucid': cdebug("Can't do lucid") elif s.series == 'precise': result, output = s.ssh('sudo xm list') for line in output: line = line.strip() if 'Domain-0' in line: retval = True break else: result, output = s.ssh('sudo xl list') for line in output: line = line.strip() if 'Domain-0' in line: retval = True break if not retval: error("") error("Failed to find the Domain-0 domain.") error("") cleave('Metal::verify_xen_target (%s)' % retval) return retval
def __init__(s, server, target, series, arch): cdebug('Enter Cobbler::__init__') s.target = target s.series = series s.arch = arch cdebug('Leave Cobbler::__init__')
def series(s, results): center(s.__class__.__name__ + '.series') retval = "unknown" try: # First see if the series is in the results. This might have been done by hand # to add a series where this algorithm doesn't work. # retval = results['attributes']['series'] except KeyError: kv = '' try: kv = results['attributes']['kernel'] m = Debian.version_rc.match(kv) if m: for series in Ubuntu.index_by_series_name: if series in kv: cdebug(" is backport kernel", 'blue') # If the series is in the kernel version string, it is most likely # a "backport" kernel and we should use the series in the version # retval = series break if retval == 'unknown': # Starting with Utopic we are adding the series version to the kernel version and not # the series name. # for k in Ubuntu.db: if '~%s' % k in kv: cdebug(" is backport kernel", 'blue') # If the series is in the kernel version string, it is most likely # a "backport" kernel and we should use the series in the version # retval = Ubuntu.db[k]['name'] break if retval == 'unknown': # What a hack ... if '2.6' == m.group(1): version = '2.6.32' else: version = '%s.0' % ( m.group(1) ) # Only want major and minor for determining the series retval = s.ubuntu.lookup(version)['name'] else: print( " ** WARNING: The kernel version string found in the results data did not match the regex." ) except KeyError: print( " ** WARNING: The kernel version (%s) did not match up with any Ubuntu series." % (results['attributes']['kernel'])) cleave(s.__class__.__name__ + '.series') return retval
def store_results(self, data): center("TestResultsRepository.store_results") destdir = path.join(self.results_dir, 'results.json') cdebug('destdir: "%s"' % destdir) with open(destdir, 'w') as f: f.write(json.dumps(data, sort_keys=True, indent=4)) cleave("TestResultsRepository.store_results")
def images(s): center(s.__class__.__name__ + '.images') retval = [] # Get daily streams, change for releases (mirror_url, path) = util.path_from_mirror_url('https://cloud-images.ubuntu.com/daily/streams/v1/index.sjson', None) cdebug(' mirror_url: %s' % mirror_url) cdebug(' path: %s' % path) smirror = mirrors.UrlMirrorReader(mirror_url) # Change the content_id to find results for other clouds or for release images fl = [] fl.append('content_id=com.ubuntu.cloud:daily:%s' % s.cloud) if s.series is not None: fl.append('release=' + s.series) if s.region is not None: fl.append('region=' + s.region) filter_list = filters.get_filters(fl) cdebug(' fl: %s' % fl) tmirror = FilterMirror(config={'filters': filter_list}) try: tmirror.sync(smirror, path) try: # Find the latest version for i in tmirror.json_entries: # cdebug(i) cdebug(i['version_name']) versions = [item['version_name'] for item in tmirror.json_entries] versions = sorted(list(set(versions))) cdebug(versions) latest = versions[-1] items = [i for i in tmirror.json_entries if i['version_name'] == latest] for item in items: retval.append(item) except IndexError: pass # # Print a list of the regions represented in the filtered list # # as an example of extracting a list of unique keys from all items # regions = set([item['region'] for item in tmirror.json_entries]) # regions = sorted(list(regions)) # print('Regions: %s' % regions) except IOError: pass cleave(s.__class__.__name__ + '.images') return retval
def initialize_results_dir(self, dirname): center("TestResultsRepository.initialize_results_dir") self.results_dir = path.join(self.cfg['repository_root'], dirname) if path.exists(self.results_dir): cdebug("%s exists.\n" % (self.results_dir)) rmtree(self.results_dir) else: cdebug("%s does not exist.\n" % (self.results_dir)) makedirs(self.results_dir) cleave("TestResultsRepository.initialize_results_dir") return self.results_dir
def __init__(s): ''' ''' center(s.__class__.__name__ + '.__init__') try: s.trr = TestResultsRepository(rc='test-results.rc') s.ubuntu = Ubuntu() except TestResultsRepositoryError as e: error(e.msg) cdebug("Leave Digest.initialize") cleave(s.__class__.__name__ + '.__init__') raise Exit() cleave(s.__class__.__name__ + '.__init__')
def wait_for_target(s, progress=None, timeout=30): ''' Wait for the remote system to come up far enough that we can start talking (ssh) to it. ''' center('Base::wait_for_system_ex') if progress: s.progress(progress) start = datetime.utcnow() cinfo('Starting waiting for \'%s\' at %s' % (s.target, start)) # Keep spinning until we either timeout or we get back some output from 'uname -vr' # while True: try: result, output = s.ssh( 'uname -vr', additional_ssh_options="-o BatchMode=yes -o LogLevel=quiet" ) if result == 0 and len(output) > 0: cdebug("exit result is 0") break except ShellError as e: if 'port 22: Connection refused' in e.output: # Just ignore this, we know that we can connect to the remote host # otherwise we wouldn't have been able to reboot it. # print("** Encountered 'Connection refused'") pass else: print("Something else bad happened") cleave('Base::wait_for_system') raise now = datetime.utcnow() delta = now - start if delta.seconds > timeout * 60: cinfo('Timed out at: %s' % now) raise ErrorExit( 'The specified timeout (%d) was reached while waiting for the target system (%s) to come back up.' % (timeout, s.target)) sleep(60) cinfo('Checking at: %s' % datetime.utcnow()) cleave('Base::wait_for_system_ex')
def cdu(s, outlet): center('PDU.cdu') retval = None if s.series in ['trusty']: ssh_options = '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=quiet' else: ssh_options = '-o KexAlgorithms=+diffie-hellman-group1-sha1 -o HostKeyAlgorithms=+ssh-dss -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=quiet' cmd = 'ssh -o MACs=hmac-sha1 %s enablement@%s' % ( ssh_options, s.__systems[s.target][outlet]) cdebug('cmd: %s' % cmd) retval = pexpect.spawn(cmd, timeout=60) retval.expect('Password: '******'Switched CDU: ') cleave('PDU.cdu') return retval
def cycle_power(s): ''' Has the smarts on how to go about turning the power off to a server remotely and then turning it back on. In some cases, multiple ports must be power cycled. ''' cdebug('Enter MetalProvisioner::cycle_power') if not s.quiet: print('Cycling the power on \'%s\'' % (s.target)) t = Configuration['systems'][s.target] if 'ipmi' in t: for state in ['off', 'on']: cmd = "ipmitool -H %s -I lanplus -U %s -P %s power %s" % (t['ipmi']['ip'], t['ipmi']['username'], t['ipmi']['passwd'], state) print(cmd) result, output = sh(cmd, ignore_result=False) if state == 'off': sleep(120) # Some of the systems want a little delay # between being powered off and then back on. else: # Power cycle the system so it will netboot and install # server = t['provisioning']['server'] for state in ['off', 'on']: for psu in t['cdu']: if psu['ip'] != '': try: ssh('%s@%s' % ('kernel', server), 'fence_cdu -a %s -l kernel -p K3rn3! -n %s -o %s' % (psu['ip'], psu['port'], state), quiet=True) except ShellError as e: # Sometimes the call to the orchestra server will time-out (not sure why), just # wait a minute and try again. # sleep(120) if not s.quiet: print(' Initial power cycle attempt failed, trying a second time.') ssh('%s@%s' % ('kernel', server), 'fence_cdu -a %s -l kernel -p K3rn3! -n %s -o %s' % (psu['ip'], psu['port'], state), quiet=True) if state == 'off': sleep(120) # Some of the systems want a little delay # between being powered off and then back on. cdebug('Leave MetalProvisioner::cycle_power')
def enable_snappy_client_live_kernel_patching(s): center("Base::enable_snappy_client_live_kernel_patching") s.progress('Enabling Live Kernel Snap Client Patching') # teach snapd about the proxys s.ssh('cp /etc/environment /tmp/environment.tmp') s.ssh('\'echo http_proxy=\"http://squid.internal:3128\" >> /tmp/environment.tmp\'') s.ssh('\'echo https_proxy=\"http://squid.internal:3128\" >> /tmp/environment.tmp\'') s.ssh('sudo cp /tmp/environment.tmp /etc/environment') s.ssh('sudo service snapd restart') # grab livepatch client from snap store (result, output) = s.ssh('sudo snap install --beta canonical-livepatch') if result != 0: raise ErrorExit("Failed to install canonical-livepatch snap") # Ensure all interfaces are connected s.ssh('sudo snap connect canonical-livepatch:kernel-module-control ubuntu-core:kernel-module-control') s.ssh('sudo snap connect canonical-livepatch:hardware-observe ubuntu-core:hardware-observe') s.ssh('sudo snap connect canonical-livepatch:system-observe ubuntu-core:system-observe') s.ssh('sudo snap connect canonical-livepatch:network-control ubuntu-core:network-control') s.ssh('sudo service snap.canonical-livepatch.canonical-livepatchd restart') # Get the auth key and enable it key = Configuration['systems'][s.raw_target]['livepatch key'] (result, output) = s.ssh('sudo canonical-livepatch enable %s' % key) if result != 0: raise ErrorExit("Failed to enable canonical-livepatch key for %s" % s.raw_target) sleep(60) (result, output) = s.ssh('sudo canonical-livepatch status --verbose') (result, output) = s.ssh('lsmod') found_module = False for l in output: cdebug(l) if 'livepatch' in l: found_module = True break if found_module == False: raise ErrorExit("Could not find livepatch kernel module") cleave("Base::enable_snappy_client_live_kernel_patching")
def __init__(s, target, series, arch): center("PS::__init__") sp = Configuration['systems'][target]['provisioner'] p = Configuration[sp] for k in p: cdebug("++ %16s : %s" % (k, p[k])) setattr(s, k, p[k]) if s.type == "maas": # Some systems/arches require a non-'generic' sub-arch. That information is specified # in the configuration information. # sub_arch = Configuration['systems'][target].get('sub-arch', 'generic') s.server = MAAS(s.server, s.creds, target, p['domain'], series, arch, flavour=sub_arch, api=s.api) elif s.type == "juju": domain = Configuration['systems'][target]['domain'] cloud = Configuration['systems'][target]['cloud'] s.server = JuJu(cloud, target, series, domain=domain) else: s.server = None cleave("PS::__init__")
def create(s, instance_name, series, region='us-west1', instance_type='unknown'): center(s.__class__.__name__ + '.create') cdebug(' instance_name: %s' % instance_name) cdebug(' series: %s' % series) cdebug(' region: %s' % region) retval = 0 s.instance_name = instance_name s.series = series # r = '-'.join(region.split('-')[0:2]) # images = CloudImages(s.cloud, series=series, region=r).images try: # print('image: %s' % (s.images[series])) # print('image: %s' % (images[0]['id'])) # cmd = 'instances create %s --zone %s --network "default" --no-restart-on-failure --image-project ubuntu-os-cloud --image %s' % (s.instance_name, region, images[0]['id'].replace('daily-', '')) cmd = 'instances create %s --zone %s --network "default" --no-restart-on-failure --image-project ubuntu-os-cloud --image %s' % (s.instance_name, region, s.images[series]) result, response = s.sh(cmd) for l in response: if l.startswith(s.instance_name): fields = l.split() s.target = fields[4] if s.target is not None: s.wait_for_target() except ShellError as e: retval = 1 for l in e.output: l.strip() print(l) cleave(s.__class__.__name__ + '.create') return retval
def install_specific_kernel_version(s): ''' ''' center("Base::install_specific_kernel_version") s.progress('Installing Specific Kernel Version') kd = KernelDebs(s.kernel, s.series, s.arch, s.flavour) urls = kd.get_urls() if urls: for url in urls: cdebug(url, 'magenta') # Pull them down # s.ssh('wget -r -A .deb -e robots=off -nv -l1 --no-directories %s' % url, quiet=True) # Install them # s.ssh('sudo dpkg -i *.deb', ignore_result=True) # Remove all other kernels # purge_list = [] m = re.search('(\d+.\d+.\d+-\d+).*', s.kernel) target = m.group(1) cdebug('target: %s' % target, 'cyan') result, output = s.ssh('dpkg -l \\\'linux-*\\\'', quiet=True) for l in output: if l.startswith('ii'): if 'linux-headers' in l or 'linux-image' in l: # Only interested in kernel packages if target not in l: # Ignore lines that contain the kernel version we are specifically targeting info = l.split() package = info[1] if any(char.isdigit() for char in package): purge_list.append(package) cdebug('Kernel packages to be purged:') for p in purge_list: cdebug(' %s' % p) for p in purge_list: s.ssh('sudo apt-get purge --yes %s' % p, quiet=True) s.ssh('sudo update-grub', quiet=True) else: raise ErrorExit('Failed to get the urls for the spcified kernel version (%s)' % s.kernel) cleave("Base::install_specific_kernel_version")
def verify_hwe_target(s): center('Target::verify_hwe_target') retval = True s.progress('Verifying HWE install') # Are we running the series correct kernel? # cdebug('Verifying hwe kernel:') if retval: retval = False kv = None result, kernel = s.ssh(r'uname -vr', additional_ssh_options="-o LogLevel=quiet") for line in kernel: line = line.strip() cdebug('uname -vr : ' + line) if 'Warning: Permanently aded' in line: continue if line == '': continue m = re.search('(\d+.\d+.\d+)-\d+-.* #(\d+)\~\S+-Ubuntu.*', line) if m: kv = m.group(1) cdebug('kernel version : ' + kv) if kv is not None: installed_series = Ubuntu().lookup(kv)['name'] if installed_series == s.hwe_series: retval = True else: error("") error("*** ERROR:") error( " Was expecting the target to be (%s) but found it to be (%s) instead." % (s.series, installed_series)) error("") else: error("") error("*** ERROR:") error(" Unable to find the kernel version in any line.") error("") for line in kernel: line = line.strip() error(" line: %s" % line) cleave('Target::verify_hwe_target (%s)' % retval) return retval
def sh(cmd, timeout=None, ignore_result=False, quiet=False): center("sh") cdebug(' cmd : \'%s\'' % cmd) cdebug(' quiet : %s' % quiet) cdebug(' ignore_result : %s' % ignore_result) out = [] p = Popen(cmd, stdout=PIPE, stderr=STDOUT, bufsize=1, shell=True) q = Queue() t = Thread(target=enqueue_output, args=(p.stdout, q, quiet)) t.daemon = True # thread dies with the program t.start() if timeout is not None: t.join(timeout) if t.is_alive(): p.terminate() cleave("sh") raise ShellTimeoutError(cmd, timeout) while p.poll() is None: # read line without blocking try: line = q.get_nowait() except Empty: pass else: # got line out.append(line.decode('utf-8')) sleep(1) while True: try: line = q.get_nowait() except Empty: break else: # got line out.append(line.decode('utf-8')) if not ignore_result: if p.returncode != 0: cleave("sh") raise ShellError(cmd, p.returncode, out) cleave("sh") return p.returncode, out
def install_xen(s): ''' Configure the remote system as a xen host. ''' center(" Enter Base::install_xen") s.progress('Installing Xen Kernel') if s.series == 'lucid': cdebug("Can't do lucid") elif s.series == 'precise': cdebug("Doing it the hard way") # Do it the hard way # s.ssh('sudo apt-get update', ignore_result=True) s.ssh('sudo apt-get install --yes xen-hypervisor', ignore_result=True) s.ssh(r'sudo sed -i \'s/GRUB_DEFAULT=.*\\+/GRUB_DEFAULT=\"Xen 4.1-amd64\"/\' /etc/default/grub') s.ssh(r'sudo sed -i \'s/GRUB_CMDLINE_LINUX=.*\\+/GRUB_CMDLINE_LINUX=\"apparmor=0\"/\' /etc/default/grub') s.ssh(r'sudo sed -i \'s/GRUB_CMDLINE_LINUX_DEFAULT=\"\"/GRUB_CMDLINE_LINUX_DEFAULT=\"\"\\nGRUB_CMDLINE_XEN=\""dom0_mem=1G,max:1G dom0_max_vcpus=1\""/\' /etc/default/grub') s.ssh('sudo update-grub') else: cdebug("Doing it the easy way") s.ssh('sudo apt-get update', ignore_result=True) s.ssh('sudo apt-get install --yes xen-hypervisor-amd64', ignore_result=True) cleave(" Leave Base::install_xen")
def verify_target(s): ''' Confirm that the target system has installed what was supposed to be installed. If we asked for one series but another is on the system, fail. ''' center('Target::verify_target') retval = False s.progress('Verifying base install') cdebug('Verifying series:') result, codename = s.ssh(r'lsb_release --codename') for line in codename: line = line.strip() if line.startswith('Codename:'): cdebug('lsb_release --codename : ' + line) print(' series: ' + line.replace('Codename:', '').strip()) if s.series not in line: error("") error("*** ERROR:") error( " Was expecting the target to be (%s) but found it to be (%s) instead." % (s.series, line.replace('Codename:\t', ''))) error("") else: retval = True # Verify we installed the arch we intended to install # cdebug('Verifying arch:') if retval: retval = False installed_arch = 'unknown' result, processor = s.ssh(r'uname -p') for line in processor: line = line.strip() cdebug('uname -p : ' + line) if 'Warning: Permanently aded' in line: continue if line == '': continue if line == 'x86_64': installed_arch = 'amd64' elif line == 'i686': installed_arch = 'i386' elif line == 'athlon': installed_arch = 'i386' elif line == 'aarch64': installed_arch = 'arm64' else: installed_arch = line print(' arch: ' + installed_arch) if s.arch == installed_arch: retval = True # Special case for Power8 (ppc64el) # elif s.arch == 'ppc64el' and installed_arch == 'ppc64le': retval = True else: error("") error("*** ERROR:") error( " Was expecting the target to be (%s) but found it to be (%s) instead." % (s.arch, installed_arch)) error("") # Are we running the series correct kernel? # cdebug('Verifying kernel:') if retval: retval = False kv = None result, kernel = s.ssh(r'uname -vr', additional_ssh_options="-o LogLevel=quiet") for line in kernel: line = line.strip() print('uname -vr : ' + line) if 'Warning: Permanently added' in line: continue if line == '': continue m = re.search('(\d+.\d+.\d+)-\d+-.* #(\d+[~a-z\d.]*)-Ubuntu.*', line) if m: kv = m.group(1) cdebug('kernel version : ' + kv) m = re.search('(\d+.\d+.\d+-\d+)-.* #(\d+[~a-z\d.]*)-Ubuntu.*', line) if m: installed_kernel = '%s.%s' % (m.group(1), m.group(2)) cdebug('installed kernel version : ' + installed_kernel) print(' kernel: ' + installed_kernel) retval = True if kv is not None: installed_series = Ubuntu().lookup(kv)['name'] if installed_series == s.series: retval = True else: error("") error("*** ERROR:") error( " Was expecting the target to be (%s) but found it to be (%s) instead." % (s.series, installed_series)) error("") else: error("") error("*** ERROR:") error(" Unable to find the kernel version in any line.") error("") if s.required_kernel_version is not None: if installed_kernel == s.required_kernel_version: retval = True else: retval = False error("") error("*** ERROR:") error( " Was expecting the target kernel version to be (%s) but found it to be (%s) instead." % (s.required_kernel_version, installed_kernel)) error("") else: error("") error("*** ERROR:") error(" Required kernel version is None") error("") cleave('Target::verify_target (%s)' % retval) return retval
def ssh(cls, target, cmd, user, additional_ssh_options='', quiet=False, ignore_result=False): center("Enter Shell::ssh") cdebug(' target : \'%s\'' % target) cdebug(' cmd : \'%s\'' % cmd) cdebug(' user : \'%s\'' % user) cdebug(' additional_ssh_options : \'%s\'' % additional_ssh_options) cdebug(' quiet : %s' % quiet) cdebug(' ignore_result : %s' % ignore_result) ssh_options = cls.ssh_options + ' ' + additional_ssh_options if user: ssh_cmd = 'ssh %s %s@%s %s' % (ssh_options, user, target, cmd) else: ssh_cmd = 'ssh %s %s %s' % (ssh_options, target, cmd) result = 0 output = '' cdebug(" ssh_cmd : '%s'" % ssh_cmd) if cls._dry_run: cdebug('[dry-run] %s' % (ssh_cmd)) else: try: result, output = sh(ssh_cmd, quiet=quiet, ignore_result=ignore_result) if type(output) is list and len(output) > 0: for l in output: if '\n' in l: for o in l.split('\n'): cdebug(l.rstrip()) else: cdebug(l.rstrip()) else: cdebug(output) except ShellError as e: cdebug( '---------------------------------------------------------------------------------------------------------', 'blue') cdebug('ShellError Raised', 'red') for l in e.output: cdebug(l.rstrip()) cdebug( '---------------------------------------------------------------------------------------------------------', 'blue') #if result != 0 and not ignore_result: # # Wait for just a few seconds and try it again. # # # #print(" **") # #print(" ** retrying the last command") # #print(" **") # result, output = sh(ssh_cmd, quiet=quiet, ignore_result=ignore_result) # if result != 0 and not ignore_result: # sleep(15) # raise ShellError(ssh_cmd, result, output) cleave("Shell::ssh") return result, output
def per_suite_results(s, data, who='sru'): # Take the cumulative results dictionary and build a test-suite focused dictionary from # it. # center(s.__class__.__name__ + '.per_suite_results') cdebug('--------------------------------------------------') retval = {} # suite results cdebug('Building test-suite focused dictionary') try: for series in data[who]: cdebug(' series: %s' % series) if series not in retval: retval[series] = {} for kver in data[who][series]: cdebug(' kver: %s' % kver) for flavour in data[who][series][kver]: cdebug(' flavour: %s' % flavour) if kver not in retval[series]: retval[series][kver] = {} if flavour not in retval[series][kver]: retval[series][kver][flavour] = {} for test_run in data[who][series][kver][flavour]: attributes = test_run['attributes'] # decode the arch # arch = s.arch_from_proc( attributes['platform']['proc']) cdebug(' arch: %s' % arch) if arch not in retval[series][kver][flavour]: retval[series][kver][flavour][arch] = {} results = test_run['results'] for k in results['suites']: suite = k['name'].replace('autotest.', '') if who == 'sru': link = '%s-%s/%s-%s-%s-index.html' % ( kver, flavour, suite, kver, arch) else: link = '%s-%s/%s-%s-%s-%s-index.html' % ( kver, flavour, who, suite, kver, arch) if suite not in retval[series][kver][flavour][ arch]: if suite == 'qrt_apparmor': cdebug( " %s : new (%s); ran: %d; failed: %d" % (suite, arch, k['tests run'], k['tests failed']), 'green') retval[series][kver][flavour][arch][ suite] = {} retval[series][kver][flavour][arch][suite][ 'link'] = link retval[series][kver][flavour][arch][suite][ 'failed'] = k['tests failed'] retval[series][kver][flavour][arch][suite][ 'run'] = k['tests run'] retval[series][kver][flavour][arch][suite][ 'skipped'] = k['tests skipped'] else: if suite == 'qrt_apparmor': cdebug( " %s : adding (%s); ran: %d; failed: %d" % (suite, arch, k['tests run'], k['tests failed']), 'green') retval[series][kver][flavour][arch][suite][ 'failed'] += k['tests failed'] retval[series][kver][flavour][arch][suite][ 'run'] += k['tests run'] retval[series][kver][flavour][arch][suite][ 'skipped'] += k['tests skipped'] except KeyError: pass cleave(s.__class__.__name__ + '.per_suite_results') return retval
def collect_results(s): ''' Build a dictionary of all of the test results. dict: who: series: kernel-version: test results ''' center(s.__class__.__name__ + '.collect_results') data = {} for tr in s.trr.test_runs: try: cdebug('tr: %s' % tr) results = s.trr.results(tr) except TestResultsRepositoryError as e: error(e.msg) continue cdebug("Processing: %s" % (tr), 'green') kernel_version = results['attributes']['kernel'] try: flavour = results['attributes']['kernel-flavour'] except KeyError: cdebug("no kernel-flavour", 'cyan') flavour = 'generic' series = s.series(results) cdebug(" kernel: %s" % (kernel_version)) if flavour == 'lowlatency': cdebug( " falvour: %s --------------------------------------------------------------------------------------" % (flavour)) else: cdebug(" falvour: %s" % (flavour)) cdebug(" series: %s" % (series)) # If the kernel used was a "custom" kernel, one that someone on the kernel # team built themselves or a mainline build kernel, we want that information # job = results['attributes']['environ']['JOB_NAME'] who = None if '_for_' in job: (junk, who) = job.split('_for_') cdebug(" who: %s" % (who)) else: who = 'sru' # 'sru' is just a general 'catch all' this lumps everything # that wasn't done by someone specifically into a single # bucket. if who == 'kernel': who = 'sru' results['attributes']['series'] = series # "Fixup" the timestamp to be what we want it to look like on the web page # ts = string_to_date(results['attributes']['timestamp']) results['attributes']['timestamp'] = ts.strftime("%Y-%m-%d %H:%M") # The primary key is who. # if who not in data: data[who] = {} cdebug( ' who first seen (%s)' % who, 'white') # The second key is the series. # if series not in data[who]: data[who][series] = {} cdebug( ' series first seen (%s)' % series, 'white') # The third key for 'data' is the kernel version. # if kernel_version not in data[who][series]: data[who][series][kernel_version] = {} cdebug( ' version (%s) first seen (%s %s)' % (kernel_version, who, series), 'white') # The fourth key for 'data' is the flavour # if flavour not in data[who][series][kernel_version]: data[who][series][kernel_version][flavour] = [] cdebug( ' flavour first seen (%s)' % flavour, 'white') data[who][series][kernel_version][flavour].append(results) cleave(s.__class__.__name__ + '.collect_results') return data
def e2d(self, node): center("x2dict.e2d") retval = None child = node.firstChild if not child: cdebug('No child nodes\n') cleave("x2dict.e2d") return None retval = {} text = '' while child is not None: if child.nodeType == Node.TEXT_NODE: cdebug('nodeType: Node.TEXT_NODE\n') cdebug('data: \'%s\'\n' % child.data.strip()) text = child.data.strip() if text != '': retval = {'text': text.split('\n')} elif child.nodeType == Node.ELEMENT_NODE: cdebug('tagName: %s\n' % child.tagName) cdebug('nodeType: Node.ELEMENT_NODE\n') if child.tagName not in retval: cdebug('Creating retval[%s] list.\n' % (child.tagName)) retval[child.tagName] = [] neo = self.e2d(child) if child.hasAttributes: if neo is None: neo = {} for a in child.attributes.keys(): cdebug("attributes[%s] = %s\n" % (a, child.attributes[a].value)) neo[a] = child.attributes[a].value retval[child.tagName].append(neo) child = child.nextSibling cleave("x2dict.e2d") return retval
def provision(s): cdebug(' Enter Cobbler::provision') s.configure_orchestra() s.cycle_power() cdebug(' Leave Cobbler::provision')