def get_bundle(results_dir, testdef_objs, err_log): """ iterates through a results directory to build up a bundle formatted for the LAVA dashboard """ testruns = [] cpuinfo = read_content(os.path.join(results_dir, 'hwcontext/cpuinfo.txt'), ignore_missing=True) meminfo = read_content(os.path.join(results_dir, 'hwcontext/meminfo.txt'), ignore_missing=True) hwctx = _get_hw_context(cpuinfo, meminfo) build = read_content(os.path.join(results_dir, 'swcontext/build.txt'), ignore_missing=True) pkginfo = read_content(os.path.join(results_dir, 'swcontext/pkgs.txt'), ignore_missing=True) for test_run_name, test_run_path in _directory_names_and_paths( results_dir): if test_run_name in ('hwcontext', 'swcontext'): continue if os.path.isdir(test_run_path): try: testruns.append( _get_test_run(test_run_path, hwctx, build, pkginfo, testdef_objs, err_log)) except KeyboardInterrupt: raise KeyboardInterrupt except: logging.exception('error processing results for: %s', test_run_name) return {'test_runs': testruns, 'format': 'Dashboard Bundle Format 1.7'}
def _get_run_testdef_metadata(test_run_dir): testdef_metadata = { 'version': None, 'description': None, 'format': None, 'location': None, 'url': None, 'os': None, 'devices': None, 'environment': None } metadata = read_content(os.path.join(test_run_dir, 'testdef_metadata')) if metadata is not '': testdef_metadata = yaml.safe_load(metadata) # Read extra metadata, if any. All metadata gets into testdef_metadata in # the bundle. extra_metadata = '' extra_metadata_path = os.path.join(test_run_dir, 'extra_metadata') if os.path.exists(extra_metadata_path): extra_metadata = read_content(extra_metadata_path) if extra_metadata is not '': extra_metadata = yaml.safe_load(extra_metadata) testdef_metadata.update(extra_metadata) return testdef_metadata
def _get_test_run_host(test_run_dir, testdef_objs, err_log, real_test_name=None): from uuid import uuid4 # now = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') logging.info("_get_test_run_host: test_run_dir %s" % test_run_dir) test_yaml_path = "/tmp/lava/tests/0_%s" % testdef_objs[0].testdef[ 'metadata']['name'] testdef = read_content(os.path.join(test_yaml_path, 'testdef.yaml')) testdef = yaml.safe_load(testdef) stdout = read_content(os.path.join(test_run_dir, 'log')) test_id = real_test_name if test_id is None: test_id = testdef.get('metadata').get('name') return { 'test_id': test_id, 'analyzer_assigned_date': now, 'analyzer_assigned_uuid': str(uuid4()), 'time_check_performed': False, 'test_results': _get_test_results_host(test_run_dir, testdef, stdout, err_log) }
def postprocess_test_result(self, test_result, case_data): with self._result_as_dir(test_result) as result_dir: case_data['postprocess_test_result_output'] = self._invoke_hook( 'postprocess_test_result', case_data['case_dir'], [result_dir]) for key in 'start_testcase_output', 'end_testcase_output', \ 'postprocess_test_result_output': path = case_data.get(key) if path is None: continue content = read_content(path, ignore_missing=True) if content: test_result['attachments'].append( create_attachment(key + '.txt', read_content(path)))
def start_vms(self): if not self.is_host: return runner = NetworkCommandRunner( self.client, self.client.target_device.tester_ps1_pattern, self.client.target_device.tester_ps1_includes_rc ) logging.debug("vm-group host: injecting SSH public key") public_key_file = os.path.join(os.path.dirname(__file__), '../device/dynamic_vm_keys/lava.pub') public_key = read_content(public_key_file).strip() runner.run('mkdir -p /root/.ssh && echo "%s" >> /root/.ssh/authorized_keys' % public_key) logging.debug("vm-group host: obtaining host IP for guest VM.") try: host_ip = runner.get_target_ip() except NetworkError as e: raise CriticalError("Failed to get network up: " % e) runner.run('export _LAVA_VM_GROUP_HOST_IP=%s' % host_ip) if self.auto_start_vms: # send a message to each guest msg = {"request": "lava_send", "messageID": "lava_vm_start", "message": {"host_ip": host_ip}} reply = self.client.context.transport(json.dumps(msg)) if reply == "nack": raise CriticalError("lava_vm_start failed") logging.info("[ACTION-B] LAVA VM start, using %s" % host_ip) self.vms_started = True
def _get_test_results(self): fixupdict = {} defpat = "(?P<test_case_id>.*-*)\\s+:\\s+(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))" if self._parser is not None: pattern = re.compile(self._parser) fixupdict = self._read_fixupdict() else: pattern = re.compile(defpat) logging.warning("""Using a default pattern to parse the test result. This may lead to empty test result in certain cases.""") logfile = read_content(self._logfile) for lineno, line in enumerate(logfile.split('\n'), 1): match = pattern.match(line.strip()) if match: res = match.groupdict() if 'result' in res: if res['result'] in fixupdict: res['result'] = fixupdict[res['result']] if res['result'] not in ('pass', 'fail', 'skip', 'unknown'): logging.error('bad test result line: %s' % line.strip()) continue res['log_lineno'] = lineno res['log_filename'] = os.path.basename(self._logfile) self._results_from_log_file.append(res) return self._results_from_log_file
def _attributes_from_dir(from_dir): attributes = {} for filename, filepath in _directory_names_and_paths(from_dir, ignore_missing=True): if os.path.isfile(filepath): attributes[filename] = read_content(filepath) return attributes
def _attachments_from_dir(from_dir): attachments = [] if from_dir: for dirpath, dirnames, filenames in os.walk(from_dir): for f in filenames: if f.endswith('.mimetype'): continue filepath = os.path.join(dirpath, f) mime_type = read_content(filepath + '.mimetype', ignore_missing=True).strip() if not mime_type: mime_type = mimetypes.guess_type(filepath)[0] if mime_type is None: mime_type = 'application/octet-stream' filename = filepath[len(from_dir) + 1:] attachments.append( create_attachment(filename, read_content(filepath), mime_type)) return attachments
def _get_run_attachments(test_run_dir, testdef, stdout): attachments = [create_attachment('stdout.log', stdout), create_attachment('testdef.yaml', testdef)] return_code = read_content(os.path.join(test_run_dir, 'return_code'), ignore_missing=True) if return_code: attachments.append(create_attachment('return_code', return_code)) attachments.extend( _attachments_from_dir(os.path.join(test_run_dir, 'attachments'))) return attachments
def _get_test_runs(self): now = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') attachment = [create_attachment(os.path.basename(self._logfile), read_content(self._logfile))] results = self._get_test_results() return { 'test_id': 'lava-command', 'analyzer_assigned_date': now, 'analyzer_assigned_uuid': str(uuid4()), 'time_check_performed': False, 'test_results': results, 'attachments': attachment }
def _bundle_results(self, target, signal_director, testdef_objs): """ Pulls the results from the target device and builds a bundle """ results_part = target.deployment_data['lava_test_results_part_attr'] results_part = getattr(target.config, results_part) rdir = self.context.host_result_dir parse_err_msg = None filesystem_access_failure = True try: with target.file_system(results_part, target.lava_test_results_dir) as d: filesystem_access_failure = False err_log = os.path.join(d, 'parse_err.log') results_dir = os.path.join(d, 'results') bundle = lava_test_shell.get_bundle(results_dir, testdef_objs, err_log) parse_err_msg = utils.read_content(err_log, ignore_missing=True) if os.path.isfile(err_log): os.unlink(err_log) # lava/results must be empty, but we keep a copy named # lava/results-XXXXXXXXXX for post-mortem analysis timestamp = datetime.now().strftime("%s") os.rename(results_dir, results_dir + '-' + timestamp) utils.ensure_directory(results_dir) except Exception as e: if filesystem_access_failure: # a failure when accessing the filesystem means the device # probably crashed. We use the backup bundle then. bundle = self._backup_bundle logging.warning( """Error extracting test results from device: %s""" % e) logging.warning( """This may mean that the device under test crashed. """ """We will use test results parsed from the serial """ """output as a backup, but note that some test """ """artifacts (such as attachments and """ """hardware/software contexts) will not be available""") else: raise e signal_director.postprocess_bundle(bundle) (fd, name) = tempfile.mkstemp( prefix='lava-test-shell', suffix='.bundle', dir=rdir) with os.fdopen(fd, 'w') as f: DocumentIO.dump(f, bundle) printer = PrettyPrinter(self.context) printer.print_results(bundle) if parse_err_msg: raise GeneralError(parse_err_msg)
def _get_run_attachments(test_run_dir, testdef, stdout): attachments = [ create_attachment('stdout.log', stdout), create_attachment('testdef.yaml', testdef) ] return_code = read_content(os.path.join(test_run_dir, 'return_code'), ignore_missing=True) if return_code: attachments.append(create_attachment('return_code', return_code)) attachments.extend( _attachments_from_dir(os.path.join(test_run_dir, 'attachments'))) return attachments
def _get_test_run(test_run_dir, hwcontext, build, pkginfo, testdef_objs, err_log): # psw0523 patch : time now! # now = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') testdef = read_content(os.path.join(test_run_dir, 'testdef.yaml')) stdout = read_content(os.path.join(test_run_dir, 'stdout.log')) uuid = read_content(os.path.join(test_run_dir, 'analyzer_assigned_uuid')) attachments = _get_run_attachments(test_run_dir, testdef, stdout) attributes = _attributes_from_dir(os.path.join(test_run_dir, 'attributes')) testdef = yaml.safe_load(testdef) testdef_obj = get_testdef_obj_with_uuid(testdef_objs, uuid) if testdef_obj: sw_sources = testdef_obj._sw_sources else: logging.warning("no software sources found for run with uuid %s", uuid) sw_sources = [] swcontext = _get_sw_context(build, pkginfo, sw_sources) return { 'test_id': testdef.get('metadata').get('name'), 'analyzer_assigned_date': now, 'analyzer_assigned_uuid': uuid, 'time_check_performed': False, 'test_results': _get_test_results(test_run_dir, testdef, stdout, err_log), 'software_context': swcontext, 'hardware_context': hwcontext, 'attachments': attachments, 'attributes': attributes, 'testdef_metadata': _get_run_testdef_metadata(test_run_dir) }
def _result_from_dir(res_dir, test_case_id=None): data = {} test_run_dir = os.path.dirname(res_dir) for fname in 'result', 'measurement', 'units', 'message', 'timestamp', 'duration': for path, dirs, files in os.walk(os.path.abspath(res_dir)): for filename in fnmatch.filter(files, fname): fpath = os.path.join(path, filename) if os.path.isfile(fpath): data['test_case_id'] = os.path.relpath(path, test_run_dir) data[fname] = read_content(fpath).strip() result = parse_testcase_result(data) result['attachments'] = _attachments_from_dir(os.path.join(res_dir, 'attachments')) result['attributes'] = _attributes_from_dir(os.path.join(res_dir, 'attributes')) return result
def _result_from_dir(res_dir, test_case_id=None): data = {} test_run_dir = os.path.dirname(res_dir) for fname in 'result', 'measurement', 'units', 'message', 'timestamp', 'duration': for path, dirs, files in os.walk(os.path.abspath(res_dir)): for filename in fnmatch.filter(files, fname): fpath = os.path.join(path, filename) if os.path.isfile(fpath): data['test_case_id'] = os.path.relpath(path, test_run_dir) data[fname] = read_content(fpath).strip() result = parse_testcase_result(data) result['attachments'] = _attachments_from_dir( os.path.join(res_dir, 'attachments')) result['attributes'] = _attributes_from_dir( os.path.join(res_dir, 'attributes')) return result
def _get_test_run(test_run_dir, testdef_objs, err_log): now = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ') testdef = read_content(os.path.join(test_run_dir, 'testdef.yaml')) stdout = read_content(os.path.join(test_run_dir, 'stdout.log')) uuid = read_content(os.path.join(test_run_dir, 'analyzer_assigned_uuid')) cpuinfo = read_content(os.path.join(test_run_dir, 'hwcontext/cpuinfo.txt'), ignore_missing=True) meminfo = read_content(os.path.join(test_run_dir, 'hwcontext/meminfo.txt'), ignore_missing=True) hwcontext = _get_hw_context(cpuinfo, meminfo) build = read_content(os.path.join(test_run_dir, 'swcontext/build.txt'), ignore_missing=True) pkginfo = read_content(os.path.join(test_run_dir, 'swcontext/pkgs.txt'), ignore_missing=True) attachments = _get_run_attachments(test_run_dir, testdef, stdout) attributes = _attributes_from_dir(os.path.join(test_run_dir, 'attributes')) testdef = yaml.safe_load(testdef) testdef_obj = get_testdef_obj_with_uuid(testdef_objs, uuid) if testdef_obj: sw_sources = testdef_obj._sw_sources else: logging.warning("no software sources found for run with uuid %s", uuid) sw_sources = [] swcontext = _get_sw_context(build, pkginfo, sw_sources) return { 'test_id': testdef.get('metadata').get('name'), 'analyzer_assigned_date': now, 'analyzer_assigned_uuid': uuid, 'time_check_performed': False, 'test_results': _get_test_results(test_run_dir, testdef, stdout, err_log), 'software_context': swcontext, 'hardware_context': hwcontext, 'attachments': attachments, 'attributes': attributes, 'testdef_metadata': _get_run_testdef_metadata(test_run_dir) }
def _get_test_results(test_run_dir, testdef, stdout, err_log): results_from_log_file = [] fixupdict = {'PASS': '******', 'FAIL': 'fail', 'SKIP': 'skip', 'UNKNOWN': 'unknown'} pattern = None pattern_used = None return_code = read_content(os.path.join(test_run_dir, 'install_return_code'), ignore_missing=True) if return_code: code = int(return_code) res = {} res['test_case_id'] = 'lava-test-shell-install' if code == 0: res['result'] = 'pass' else: res['result'] = 'fail' res['message'] = 'exit code ' + return_code results_from_log_file.append(res) if 'parse' in testdef: if 'fixupdict' in testdef['parse']: fixupdict.update(testdef['parse']['fixupdict']) if 'pattern' in testdef['parse']: pattern_used = testdef['parse']['pattern'] else: defpat = "(?P<test_case_id>.*-*)\\s+:\\s+(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))" pattern_used = defpat logging.warning("""Using a default pattern to parse the test result. This may lead to empty test result in certain cases.""") try: pattern = re.compile(pattern_used) except re.error as e: errmsg = "Pattern '{0:s}' for test run '{1:s}' compile error ({2:s}). " errmsg = errmsg.format(pattern_used, testdef['metadata']['name'], str(e)) write_content(err_log, errmsg) return results_from_log_file if not pattern: logging.debug("No pattern set") slim_pattern = "<LAVA_SIGNAL_TESTCASE TEST_CASE_ID=(?P<test_case_id>.*)\\s+"\ "RESULT=(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))>" test_pattern = "<LAVA_SIGNAL_TESTCASE TEST_CASE_ID=(?P<test_case_id>.*)\\s+"\ "RESULT=(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))\\s"\ "UNITS=(?P<units>.*)\s"\ "MEASUREMENT=(?P<measurement>.*)>" test_case_pattern = re.compile(test_pattern) result_pattern = re.compile(slim_pattern) for lineno, line in enumerate(stdout.split('\n'), 1): match = pattern.match(line.strip()) if match: res = parse_testcase_result(match.groupdict(), fixupdict) # Both of 'test_case_id' and 'result' must be included if 'test_case_id' not in res or 'result' not in res: errmsg = "Pattern '{0:s}' for test run '{1:s}' is missing test_case_id or result. " errmsg = errmsg.format(pattern_used, testdef['metadata']['name']) write_content(err_log, errmsg) return results_from_log_file res['log_lineno'] = lineno res['log_filename'] = 'stdout.log' results_from_log_file.append(res) continue # Locate a simple lava-test-case with result to retrieve log line no match = result_pattern.match(line.strip()) if match: res = parse_testcase_result(match.groupdict(), fixupdict) res['log_lineno'] = lineno res['log_filename'] = 'stdout.log' results_from_log_file.append(res) continue # also catch a lava-test-case with a unit and a measurement match = test_case_pattern.match(line.strip()) if match: res = parse_testcase_result(match.groupdict(), fixupdict) res['log_lineno'] = lineno res['log_filename'] = 'stdout.log' results_from_log_file.append(res) results_from_directories = [] results_from_directories_by_id = {} result_names_and_paths = _directory_names_and_paths( os.path.join(test_run_dir, 'results'), ignore_missing=True) result_names_and_paths = [ (name, path) for (name, path) in result_names_and_paths if os.path.isdir(path)] result_names_and_paths.sort(key=lambda (name, path): os.path.getmtime(path)) for name, path in result_names_and_paths: r = _result_from_dir(path) results_from_directories_by_id[name] = (r, len(results_from_directories)) results_from_directories.append(r) for res in results_from_log_file: if res.get('test_case_id') in results_from_directories_by_id: dir_res, index = results_from_directories_by_id[res['test_case_id']] results_from_directories[index] = None _merge_results(res, dir_res) for res in results_from_directories: if res is not None: results_from_log_file.append(res) return_code = read_content(os.path.join(test_run_dir, 'return_code'), ignore_missing=True) if return_code: code = int(return_code) res = {} res['test_case_id'] = 'lava-test-shell-run' if code == 0: res['result'] = 'pass' else: res['result'] = 'fail' res['message'] = 'exit code ' + return_code results_from_log_file.append(res) return results_from_log_file
def _get_test_results(test_run_dir, testdef, stdout, err_log): results_from_log_file = [] fixupdict = { 'PASS': '******', 'FAIL': 'fail', 'SKIP': 'skip', 'UNKNOWN': 'unknown' } pattern = None pattern_used = None return_code = read_content(os.path.join(test_run_dir, 'install_return_code'), ignore_missing=True) if return_code: code = int(return_code) res = {} res['test_case_id'] = 'lava-test-shell-install' if code == 0: res['result'] = 'pass' else: res['result'] = 'fail' res['message'] = 'exit code ' + return_code results_from_log_file.append(res) if 'parse' in testdef: if 'fixupdict' in testdef['parse']: fixupdict.update(testdef['parse']['fixupdict']) if 'pattern' in testdef['parse']: pattern_used = testdef['parse']['pattern'] else: defpat = "(?P<test_case_id>.*-*)\\s+:\\s+(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))" pattern_used = defpat logging.warning( """Using a default pattern to parse the test result. This may lead to empty test result in certain cases.""" ) try: pattern = re.compile(pattern_used) except re.error as e: errmsg = "Pattern '{0:s}' for test run '{1:s}' compile error ({2:s}). " errmsg = errmsg.format(pattern_used, testdef['metadata']['name'], str(e)) write_content(err_log, errmsg) return results_from_log_file if not pattern: logging.debug("No pattern set") slim_pattern = "<LAVA_SIGNAL_TESTCASE TEST_CASE_ID=(?P<test_case_id>.*)\\s+"\ "RESULT=(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))>" test_pattern = "<LAVA_SIGNAL_TESTCASE TEST_CASE_ID=(?P<test_case_id>.*)\\s+"\ "RESULT=(?P<result>(PASS|pass|FAIL|fail|SKIP|skip|UNKNOWN|unknown))\\s"\ "UNITS=(?P<units>.*)\s"\ "MEASUREMENT=(?P<measurement>.*)>" test_case_pattern = re.compile(test_pattern) result_pattern = re.compile(slim_pattern) for lineno, line in enumerate(stdout.split('\n'), 1): match = pattern.match(line.strip()) if match: res = parse_testcase_result(match.groupdict(), fixupdict) # Both of 'test_case_id' and 'result' must be included if 'test_case_id' not in res or 'result' not in res: errmsg = "Pattern '{0:s}' for test run '{1:s}' is missing test_case_id or result. " errmsg = errmsg.format(pattern_used, testdef['metadata']['name']) write_content(err_log, errmsg) return results_from_log_file res['log_lineno'] = lineno res['log_filename'] = 'stdout.log' results_from_log_file.append(res) continue # Locate a simple lava-test-case with result to retrieve log line no match = result_pattern.match(line.strip()) if match: res = parse_testcase_result(match.groupdict(), fixupdict) res['log_lineno'] = lineno res['log_filename'] = 'stdout.log' results_from_log_file.append(res) continue # also catch a lava-test-case with a unit and a measurement match = test_case_pattern.match(line.strip()) if match: res = parse_testcase_result(match.groupdict(), fixupdict) res['log_lineno'] = lineno res['log_filename'] = 'stdout.log' results_from_log_file.append(res) results_from_directories = [] results_from_directories_by_id = {} result_names_and_paths = _directory_names_and_paths(os.path.join( test_run_dir, 'results'), ignore_missing=True) result_names_and_paths = [(name, path) for (name, path) in result_names_and_paths if os.path.isdir(path)] result_names_and_paths.sort( key=lambda (name, path): os.path.getmtime(path)) for name, path in result_names_and_paths: r = _result_from_dir(path) results_from_directories_by_id[name] = (r, len(results_from_directories)) results_from_directories.append(r) for res in results_from_log_file: if res.get('test_case_id') in results_from_directories_by_id: dir_res, index = results_from_directories_by_id[ res['test_case_id']] results_from_directories[index] = None _merge_results(res, dir_res) for res in results_from_directories: if res is not None: results_from_log_file.append(res) return_code = read_content(os.path.join(test_run_dir, 'return_code'), ignore_missing=True) if return_code: code = int(return_code) res = {} res['test_case_id'] = 'lava-test-shell-run' if code == 0: res['result'] = 'pass' else: res['result'] = 'fail' res['message'] = 'exit code ' + return_code results_from_log_file.append(res) return results_from_log_file