def check_log_for_fails( path_to_log, testname, exe ): # Normal logs are expected to have in last line: # "All tests passed (11 assertions in 1 test case)" # Tests that have failures, however, will show: # "test cases: 1 | 1 failed # assertions: 9 | 6 passed | 3 failed" if path_to_log is None: return False for ctx in grep( r'^test cases:\s*(\d+) \|\s*(\d+) (passed|failed)', path_to_log ): m = ctx['match'] total = int(m.group(1)) passed = int(m.group(2)) if m.group(3) == 'failed': # "test cases: 1 | 1 failed" passed = total - passed if passed < total: if total == 1 or passed == 0: desc = 'failed' else: desc = str(total - passed) + ' of ' + str(total) + ' failed' if log.is_verbose_on(): log.e( log.red + testname + log.reset + ': ' + desc ) log.i( 'Executable:', exe ) log.i( 'Log: >>>' ) log.out() cat( path_to_log ) log.out( '<<<' ) else: log.e( log.red + testname + log.reset + ': ' + desc + '; see ' + path_to_log ) return True return False
def check_log_for_fails(path_to_log, testname, configuration=None, repetition=1): # Normal logs are expected to have in last line: # "All tests passed (11 assertions in 1 test case)" # Tests that have failures, however, will show: # "test cases: 1 | 1 failed # assertions: 9 | 6 passed | 3 failed" # We make sure we look at the log written by the last run of the test by ignoring anything before the last # line with "----...---" that separate between 2 separate runs of he test if path_to_log is None: return False results = None for ctx in file.grep( r'^test cases:\s*(\d+) \|\s*(\d+) (passed|failed)|^----------TEST-SEPARATOR----------$', path_to_log): m = ctx['match'] if m.string == "----------TEST-SEPARATOR----------": results = None else: results = m if not results: return False total = int(results.group(1)) passed = int(results.group(2)) if results.group(3) == 'failed': # "test cases: 1 | 1 failed" passed = total - passed if passed < total: if total == 1 or passed == 0: desc = 'failed' else: desc = str(total - passed) + ' of ' + str(total) + ' failed' if log.is_verbose_on(): log.e(log.red + testname + log.reset + ': ' + configuration_str(configuration, repetition, suffix=' ') + desc) log.i('Log: >>>') log.out() file.cat(path_to_log) log.out('<<<') else: log.e(log.red + testname + log.reset + ': ' + configuration_str(configuration, repetition, suffix=' ') + desc + '; see ' + path_to_log) return True return False
# License: Apache 2.0. See LICENSE file in root directory. # Copyright(c) 2021 Intel Corporation. All Rights Reserved. # we want this test to run first so that all tests run with updated FW versions, so we give it priority 0 #test:priority 0 #test:device each(L500*) #test:device each(D400*) import pyrealsense2 as rs, sys, os, subprocess from rspy import devices, log, test, file, repo if not devices.acroname: log.i("No Acroname library found; skipping device FW update") sys.exit(0) # Following will throw if no acroname module is found from rspy import acroname try: devices.acroname.discover() except acroname.NoneFoundError as e: log.f(e) # Remove acroname -- we're likely running inside run-unit-tests in which case the # acroname hub is likely already connected-to from there and we'll get an error # thrown ('failed to connect to acroname (result=11)'). We do not need it -- just # needed to verify it is available above... devices.acroname = None def send_hardware_monitor_command(device, command): command_input = [] # array of uint_8t # Parsing the command to array of unsigned integers(size should be < 8bits)
:param test: The test (of class type Test) we're interested in """ for configuration in test.config.configurations: try: serial_numbers = devices.by_configuration( configuration ) except RuntimeError as e: if devices.acroname: log.e( log.red + test.name + log.reset + ': ' + str(e) ) else: log.w( log.yellow + test.name + log.reset + ': ' + str(e) ) continue yield configuration, serial_numbers log.i( 'Logs in:', logdir ) def test_wrapper( test, configuration = None ): global n_tests n_tests += 1 if not log.is_debug_on() or log.is_color_on(): if configuration: log.progress( '[' + ' '.join( configuration ) + ']', test.name, '...' ) else: log.progress( test.name, '...' ) test.run_test() # Run all tests if pyrs: sys.path.append( pyrs_path ) from rspy import devices
list_only = list_tags or list_tests if not list_only: if pyrs: sys.path.insert( 1, pyrs_path) # Make sure we pick up the right pyrealsense2! from rspy import devices devices.query() devices.map_unknown_ports() # # Under Travis, we'll have no devices and no acroname skip_live_tests = len(devices.all()) == 0 and not devices.acroname # if not skip_live_tests: if not to_stdout: log.i('Logs in:', libci.logdir) exceptions = None if not no_exceptions and os.path.isfile(libci.exceptionsfile): try: log.d('loading device exceptions from:', libci.exceptionsfile) log.debug_indent() exceptions = devices.load_specs_from_file( libci.exceptionsfile) exceptions = devices.expand_specs(exceptions) log.d('==>', exceptions) finally: log.debug_unindent() # log.reset_errors() available_tags = set()
sys.exit(1) for curr, bundled in zip(current_fw_digits, bundled_fw_digits): if int(bundled) > int(curr): return True if int(bundled) < int(curr): return False return False def pretty_fw_version(fw_version_as_string): """ return a version with zeros removed """ return '.'.join([str(int(c)) for c in fw_version_as_string.split('.')]) if not devices.acroname: log.i("No Acroname library found; skipping device FW update") sys.exit(0) # Following will throw if no acroname module is found from rspy import acroname try: devices.acroname.discover() except acroname.NoneFoundError as e: log.e(e) sys.exit(1) # Remove acroname -- we're likely running inside run-unit-tests in which case the # acroname hub is likely already connected-to from there and we'll get an error # thrown ('failed to connect to acroname (result=11)'). We do not need it -- just # needed to verify it is available above... devices.acroname = None # this script is in unit-tests directory