def _get_usb_location(physical_port): """ Helper method to get windows USB location from registry """ if not physical_port: return None # physical port example: # \\?\usb#vid_8086&pid_0b07&mi_00#6&8bfcab3&0&0000#{e5323777-f976-4f5b-9b55-b94699c46e44}\global # re_result = re.match(r'.*\\(.*)#vid_(.*)&pid_(.*)(?:&mi_(.*))?#(.*)#', physical_port, flags=re.IGNORECASE) dev_type = re_result.group(1) vid = re_result.group(2) pid = re_result.group(3) mi = re_result.group(4) unique_identifier = re_result.group(5) # import winreg if mi: registry_path = "SYSTEM\CurrentControlSet\Enum\{}\VID_{}&PID_{}&MI_{}\{}".format( dev_type, vid, pid, mi, unique_identifier) else: registry_path = "SYSTEM\CurrentControlSet\Enum\{}\VID_{}&PID_{}\{}".format( dev_type, vid, pid, unique_identifier) try: reg_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, registry_path) except FileNotFoundError: log.e('Could not find registry key for port:', registry_path) log.e(' usb location:', physical_port) return None result = winreg.QueryValueEx(reg_key, "LocationInformation") # location example: 0000.0014.0000.016.003.004.003.000.000 # and, for T265: Port_#0002.Hub_#0006 return result[0]
def check_log_for_fails( path_to_log, testname, exe ): # Normal logs are expected to have in last line: # "All tests passed (11 assertions in 1 test case)" # Tests that have failures, however, will show: # "test cases: 1 | 1 failed # assertions: 9 | 6 passed | 3 failed" if path_to_log is None: return False for ctx in grep( r'^test cases:\s*(\d+) \|\s*(\d+) (passed|failed)', path_to_log ): m = ctx['match'] total = int(m.group(1)) passed = int(m.group(2)) if m.group(3) == 'failed': # "test cases: 1 | 1 failed" passed = total - passed if passed < total: if total == 1 or passed == 0: desc = 'failed' else: desc = str(total - passed) + ' of ' + str(total) + ' failed' if log.is_verbose_on(): log.e( log.red + testname + log.reset + ': ' + desc ) log.i( 'Executable:', exe ) log.i( 'Log: >>>' ) log.out() cat( path_to_log ) log.out( '<<<' ) else: log.e( log.red + testname + log.reset + ': ' + desc + '; see ' + path_to_log ) return True return False
def run_test( self ): log_path = self.get_log() try: subprocess_run( self.command, stdout=log_path ) except FileNotFoundError: log.e( log.red + self.name + log.reset + ': executable not found! (' + self.exe + ')') except subprocess.CalledProcessError as cpe: if not check_log_for_fails( log_path, self.name, self.exe ): # An unexpected error occurred log.e( log.red + self.name + log.reset + ': exited with non-zero value! (' + str(cpe.returncode) + ')' )
def check_log_for_fails(path_to_log, testname, configuration=None, repetition=1): # Normal logs are expected to have in last line: # "All tests passed (11 assertions in 1 test case)" # Tests that have failures, however, will show: # "test cases: 1 | 1 failed # assertions: 9 | 6 passed | 3 failed" # We make sure we look at the log written by the last run of the test by ignoring anything before the last # line with "----...---" that separate between 2 separate runs of he test if path_to_log is None: return False results = None for ctx in file.grep( r'^test cases:\s*(\d+) \|\s*(\d+) (passed|failed)|^----------TEST-SEPARATOR----------$', path_to_log): m = ctx['match'] if m.string == "----------TEST-SEPARATOR----------": results = None else: results = m if not results: return False total = int(results.group(1)) passed = int(results.group(2)) if results.group(3) == 'failed': # "test cases: 1 | 1 failed" passed = total - passed if passed < total: if total == 1 or passed == 0: desc = 'failed' else: desc = str(total - passed) + ' of ' + str(total) + ' failed' if log.is_verbose_on(): log.e(log.red + testname + log.reset + ': ' + configuration_str(configuration, repetition, suffix=' ') + desc) log.i('Log: >>>') log.out() file.cat(path_to_log) log.out('<<<') else: log.e(log.red + testname + log.reset + ': ' + configuration_str(configuration, repetition, suffix=' ') + desc + '; see ' + path_to_log) return True return False
def query(monitor_changes=True): """ Start a new LRS context, and collect all devices :param monitor_changes: If True, devices will update dynamically as they are removed/added """ global rs if not rs: return # # Before we can start a context and query devices, we need to enable all the ports # on the acroname, if any: if acroname: if not acroname.hub: acroname.connect() # MAY THROW! acroname.enable_ports( sleep_on_change=5) # make sure all connected! # # Get all devices, and store by serial-number global _device_by_sn, _context, _port_to_sn _context = rs.context() _device_by_sn = dict() try: log.d('discovering devices ...') log.debug_indent() for retry in range(3): try: devices = _context.query_devices() break except RuntimeError as e: log.d('FAILED to query devices:', e) if retry > 1: log.e('FAILED to query devices', retry + 1, 'times!') raise else: time.sleep(1) for dev in devices: # The FW update ID is always available, it seems, and is the ASIC serial number # whereas the Serial Number is the OPTIC serial number and is only available in # non-recovery devices. So we use the former... sn = dev.get_info(rs.camera_info.firmware_update_id) device = Device(sn, dev) _device_by_sn[sn] = device log.d( '... port {}:'.format(device.port is None and '?' or device.port), sn, dev) finally: log.debug_unindent() # if monitor_changes: _context.set_devices_changed_callback(_device_change_callback)
def has_newer_fw(current_fw, bundled_fw): """ :param current_fw: current FW version of a device :param bundled_fw: bundled FW version of the same device :return: True if the bundled version is newer than the current one """ current_fw_digits = current_fw.split('.') bundled_fw_digits = bundled_fw.split('.') if len(current_fw_digits) != len(bundled_fw_digits): log.e("Either the devices FW (", current_fw, ") or the bundled FW(", bundled_fw, ") was of an invalid format") sys.exit(1) for curr, bundled in zip(current_fw_digits, bundled_fw_digits): if int(bundled) > int(curr): return True if int(bundled) < int(curr): return False return False
def devices_by_test_config( test ): """ Yield <configuration,serial-numbers> pairs for each valid configuration under which the test should run. The <configuration> is a list of ('test:device') designations, e.g. ['L500*', 'D415']. The <serial-numbers> is a set of device serial-numbers that fit this configuration. :param test: The test (of class type Test) we're interested in """ for configuration in test.config.configurations: try: serial_numbers = devices.by_configuration( configuration ) except RuntimeError as e: if devices.acroname: log.e( log.red + test.name + log.reset + ': ' + str(e) ) else: log.w( log.yellow + test.name + log.reset + ': ' + str(e) ) continue yield configuration, serial_numbers
def __init__( self, sn, dev ): self._sn = sn self._dev = dev self._name = None if dev.supports( rs.camera_info.name ): self._name = dev.get_info( rs.camera_info.name ) self._product_line = None if dev.supports( rs.camera_info.product_line ): self._product_line = dev.get_info( rs.camera_info.product_line ) self._physical_port = dev.supports( rs.camera_info.physical_port ) and dev.get_info( rs.camera_info.physical_port ) or None self._usb_location = _get_usb_location( self._physical_port ) self._port = None if acroname: try: self._port = _get_port_by_loc( self._usb_location ) except Exception as e: log.e( 'Failed to get device port:', e ) log.d( ' physical port is', self._physical_port ) log.d( ' USB location is', self._usb_location ) self._removed = False
def test_wrapper(test, configuration=None, repetition=1): global n_tests, rslog n_tests += 1 # if not log.is_debug_on() or log.is_color_on(): log.progress( configuration_str(configuration, repetition, suffix=' ') + test.name, '...') # log_path = test.get_log() # opts = set() if rslog: opts.add('--rslog') try: test.run_test(configuration=configuration, log_path=log_path, opts=opts) except FileNotFoundError as e: log.e( log.red + test.name + log.reset + ':', str(e) + configuration_str(configuration, repetition, prefix=' ')) except subprocess.TimeoutExpired: log.e( log.red + test.name + log.reset + ':', configuration_str(configuration, repetition, suffix=' ') + 'timed out') except subprocess.CalledProcessError as cpe: if not check_log_for_fails(log_path, test.name, configuration, repetition): # An unexpected error occurred log.e( log.red + test.name + log.reset + ':', configuration_str(configuration, repetition, suffix=' ') + 'exited with non-zero value (' + str(cpe.returncode) + ')')
def __init__( self, source, line_prefix ): """ :param source: The path to the text file :param line_prefix: A regex to denote a directive (must be first thing in a line), which will be immediately followed by the directive itself and optional arguments """ TestConfig.__init__(self) # Parse the python regex = r'^' + line_prefix + r'(\S+)((?:\s+\S+)*?)\s*(?:#\s*(.*))?$' for context in grep( regex, source ): match = context['match'] directive = match.group(1) params = [s for s in context['match'].group(2).split()] comment = match.group(3) if directive == 'device': #log.d( ' configuration:', params ) if not params: log.e( source + '+' + str(context['index']) + ': device directive with no devices listed' ) else: self._configurations.append( params ) elif directive == 'priority': if len(params) == 1 and params[0].isdigit(): self._priority = int( params[0] ) else: log.e( source + '+' + str(context['index']) + ': priority directive with invalid parameters:', params ) elif directive == 'tag': self._tags.update(params) else: log.e( source + '+' + str(context['index']) + ': invalid directive "' + directive + '"; ignoring' )
def process_cpp(dir, builddir): global regex, required_tags, list_only, available_tags, tests_and_tags found = [] shareds = [] statics = [] if regex: pattern = re.compile(regex) for f in file.find(dir, '(^|/)test-.*\.cpp$'): testdir = os.path.splitext(f)[ 0] # "log/internal/test-all" <- "log/internal/test-all.cpp" testparent = os.path.dirname(testdir) # "log/internal" # We need the project name unique: keep the path but make it nicer: testname = 'test-' + testparent.replace( '/', '-') + '-' + os.path.basename(testdir)[ 5:] # "test-log-internal-all" if regex and not pattern.search(testname): continue if required_tags or list_tags: config = libci.TestConfigFromCpp(dir + os.sep + f) if not all(tag in config.tags for tag in required_tags): continue available_tags.update(config.tags) if list_tests: tests_and_tags[testname] = config.tags if testname not in tests_and_tags: tests_and_tags[testname] = None if list_only: continue # Each CMakeLists.txt sits in its own directory os.makedirs(builddir + '/' + testdir, exist_ok=True) # "build/log/internal/test-all" # Build the list of files we want in the project: # At a minimum, we have the original file, plus any common files filelist = [dir + '/' + f, '${ELPP_FILES}', '${CATCH_FILES}'] # Add any "" includes specified in the .cpp that we can find includes = find_includes(dir + '/' + f) # Add any files explicitly listed in the .cpp itself, like this: # //#cmake:add-file <filename> # Any files listed are relative to $dir shared = False static = False for context in file.grep('^//#cmake:\s*', dir + '/' + f): m = context['match'] index = context['index'] cmd, *rest = context['line'][m.end():].split() if cmd == 'add-file': for additional_file in rest: files = additional_file if not os.path.isabs(additional_file): files = dir + '/' + testparent + '/' + additional_file files = glob(files) if not files: log.e(f + '+' + str(index) + ': no files match "' + additional_file + '"') for abs_file in files: abs_file = os.path.normpath(abs_file) abs_file = abs_file.replace('\\', '/') if not os.path.exists(abs_file): log.e(f + '+' + str(index) + ': file not found "' + additional_file + '"') log.d(' add file:', abs_file) filelist.append(abs_file) if (os.path.splitext(abs_file)[0] == 'cpp'): # Add any "" includes specified in the .cpp that we can find includes |= find_includes(abs_file) elif cmd == 'static!': if len(rest): log.e(f + '+' + str(index) + ': unexpected arguments past \'' + cmd + '\'') elif shared: log.e(f + '+' + str(index) + ': \'' + cmd + '\' mutually exclusive with \'shared!\'') else: static = True elif cmd == 'shared!': if len(rest): log.e(f + '+' + str(index) + ': unexpected arguments past \'' + cmd + '\'') elif static: log.e(f + '+' + str(index) + ': \'' + cmd + '\' mutually exclusive with \'static!\'') else: shared = True else: log.e( f + '+' + str(index) + ': unknown cmd \'' + cmd + '\' (should be \'add-file\', \'static!\', or \'shared!\')') for include in includes: filelist.append(include) generate_cmake(builddir, testdir, testname, filelist) if static: statics.append(testdir) elif shared: shareds.append(testdir) else: found.append(testdir) return found, shareds, statics
def abort(): log.e( "Aborting test" ) sys.exit( 1 )
linux = True else: linux = False # Parse command-line: try: opts, args = getopt.getopt(sys.argv[1:], 'hvqr:st:', longopts=[ 'help', 'verbose', 'debug', 'quiet', 'regex=', 'stdout', 'tag=', 'list-tags', 'list-tests', 'no-exceptions', 'context=', 'repeat=', 'config=', 'no-reset', 'rslog' ]) except getopt.GetoptError as err: log.e(err) # something like "option -a not recognized" usage() regex = None to_stdout = False required_tags = [] list_tags = False list_tests = False no_exceptions = False context = None repeat = 1 forced_configurations = None no_reset = False rslog = False for opt, arg in opts: if opt in ('-h', '--help'): usage()
print(' with what tags it has') sys.exit(2) regex = None required_tags = [] list_tags = False list_tests = False # parse command-line: try: opts, args = getopt.getopt( sys.argv[1:], 'hr:t:', longopts=['help', 'regex=', 'tag=', 'list-tags', 'list-tests']) except getopt.GetoptError as err: log.e(err) # something like "option -a not recognized" usage() for opt, arg in opts: if opt in ('-h', '--help'): usage() elif opt in ('-r', '--regex'): regex = arg elif opt in ('-t', '--tag'): required_tags.append(arg) elif opt == '--list-tags': list_tags = True elif opt == '--list-tests': list_tests = True if len(args) != 2: usage()
def __init__( self, source, line_prefix ): """ :param source: The path to the text file :param line_prefix: A regex to denote a directive (must be first thing in a line), which will be immediately followed by the directive itself and optional arguments """ TestConfig.__init__(self) # Parse the python regex = r'^' + line_prefix + r'(\S+)((?:\s+\S+)*?)\s*(?:#\s*(.*))?$' for context in file.grep( regex, source ): match = context['match'] directive = match.group(1) text_params = match.group(2).strip() params = [s for s in text_params.split()] comment = match.group(3) if directive == 'device': #log.d( ' configuration:', params ) if not params: log.e( source + '+' + str(context['index']) + ': device directive with no devices listed' ) elif 'each' in text_params.lower() and len(params) > 1: log.e( source + '+' + str(context['index']) + ': each() cannot be used in combination with other specs', params ) elif 'each' in text_params.lower() and not re.fullmatch( r'each\(.+\)', text_params, re.IGNORECASE ): log.e( source + '+' + str(context['index']) + ': invalid \'each\' syntax:', params ) else: self._configurations.append( params ) elif directive == 'priority': if len(params) == 1 and params[0].isdigit(): self._priority = int( params[0] ) else: log.e( source + '+' + str(context['index']) + ': priority directive with invalid parameters:', params ) elif directive == 'timeout': if len(params) == 1 and params[0].isdigit(): self._timeout = int( params[0] ) else: log.e( source + '+' + str(context['index']) + ': timeout directive with invalid parameters:', params ) elif directive == 'tag': self._tags.update(params) elif directive == 'flag': self._flags.update( params ) else: log.e( source + '+' + str(context['index']) + ': invalid directive "' + directive + '"; ignoring' )
def map_unknown_ports(): """ Fill in unknown ports in devices by enabling one port at a time, finding out which device is there. """ if not acroname: return global _device_by_sn devices_with_unknown_ports = [ device for device in _device_by_sn.values() if device.port is None ] if not devices_with_unknown_ports: return # ports = acroname.ports() known_ports = [ device.port for device in _device_by_sn.values() if device.port is not None ] unknown_ports = [port for port in ports if port not in known_ports] try: log.d('mapping unknown ports', unknown_ports, '...') log.debug_indent() #log.d( "active ports:", ports ) #log.d( "- known ports:", known_ports ) #log.d( "= unknown ports:", unknown_ports ) # for known_port in known_ports: if known_port not in ports: log.e("A device was found on port", known_port, "but the port is not reported as used by Acroname!") # if len(unknown_ports) == 1: device = devices_with_unknown_ports[0] log.d('... port', unknown_ports[0], 'has', device.handle) device._port = unknown_ports[0] return # acroname.disable_ports(ports) wait_until_all_ports_disabled() # # Enable one port at a time to try and find what device is connected to it n_identified_ports = 0 for port in unknown_ports: # log.d('enabling port', port) acroname.enable_ports([port], disable_other_ports=True) sn = None for retry in range(5): if len(enabled()) == 1: sn = list(enabled())[0] break time.sleep(1) if not sn: log.d('could not recognize device in port', port) else: device = _device_by_sn.get(sn) if device: log.d('... port', port, 'has', device.handle) device._port = port n_identified_ports += 1 if len(devices_with_unknown_ports) == n_identified_ports: #log.d( 'no more devices; stopping' ) break else: log.w("Device with serial number", sn, "was found in port", port, "but was not in context") acroname.disable_ports([port]) wait_until_all_ports_disabled() finally: log.debug_unindent()
def derive_config_from_text(self, source, line_prefix): # Configuration is made up of directives: # #test:<directive>[:[!]<context>] <param>* # If a context is not specified, the directive always applies. Any directive with a context # will only get applied if we're running under the context it specifies (! means not, so # !nightly means when not under nightly). regex = r'^' + line_prefix regex += r'([^\s:]+)' # 1: directive regex += r'(?::(\S+))?' # 2: optional context regex += r'((?:\s+\S+)*?)' # 3: params regex += r'\s*(?:#\s*(.*))?$' # 4: optional comment for line in file.grep(regex, source): match = line['match'] directive = match.group(1) directive_context = match.group(2) text_params = match.group(3).strip() params = [s for s in text_params.split()] comment = match.group(4) if directive_context: not_context = directive_context.startswith('!') if not_context: directive_context = directive_context[1:] # not_context | directive_ctx==context | RESULT # ----------- | ---------------------- | ------ # 0 | 0 | IGNORE # 0 | 1 | USE # 1 | 0 | USE # 1 | 1 | IGNORE if not_context == (directive_context == self.context): # log.d( "directive", line['line'], "ignored because of context mismatch with running context", # self.context) continue if directive == 'device': # log.d( ' configuration:', params ) if not params: log.e(source + '+' + str(line['index']) + ': device directive with no devices listed') elif 'each' in text_params.lower() and len(params) > 1: log.e( source + '+' + str(line['index']) + ': each() cannot be used in combination with other specs', params) elif 'each' in text_params.lower() and not re.fullmatch( r'each\(.+\)', text_params, re.IGNORECASE): log.e( source + '+' + str(line['index']) + ': invalid \'each\' syntax:', params) else: self._configurations.append(params) elif directive == 'priority': if len(params) == 1 and params[0].isdigit(): self._priority = int(params[0]) else: log.e( source + '+' + str(line['index']) + ': priority directive with invalid parameters:', params) elif directive == 'timeout': if len(params) == 1 and params[0].isdigit(): self._timeout = int(params[0]) else: log.e( source + '+' + str(line['index']) + ': timeout directive with invalid parameters:', params) elif directive == 'tag': self._tags.update(map(str.lower, params)) # tags are case-insensitive elif directive == 'flag': self._flags.update(params) elif directive == 'donotrun': if params: log.e( source + '+' + str(line['index']) + ': donotrun directive should not have parameters:', params) self._donotrun = True else: log.e(source + '+' + str(line['index']) + ': invalid directive "' + directive + '"; ignoring')
librealsense = os.path.dirname(current_dir) # function for checking if a file is an executable def is_executable(path_to_test): if linux: return os.access(path_to_test, os.X_OK) else: return path_to_test.endswith('.exe') # Parse command-line: try: opts,args = getopt.getopt( sys.argv[1:], 'hvqr:st:', longopts = [ 'help', 'verbose', 'debug', 'quiet', 'regex=', 'stdout', 'tag' ]) except getopt.GetoptError as err: log.e( err ) # something like "option -a not recognized" usage() regex = None to_stdout = False tag = None for opt,arg in opts: if opt in ('-h','--help'): usage() elif opt in ('-v','--verbose'): log.verbose_on() elif opt in ('-q','--quiet'): log.quiet_on() elif opt in ('-r', '--regex'): regex = arg elif opt in ('-s', '--stdout'): to_stdout = True
def pretty_fw_version(fw_version_as_string): """ return a version with zeros removed """ return '.'.join([str(int(c)) for c in fw_version_as_string.split('.')]) if not devices.acroname: log.i("No Acroname library found; skipping device FW update") sys.exit(0) # Following will throw if no acroname module is found from rspy import acroname try: devices.acroname.discover() except acroname.NoneFoundError as e: log.e(e) sys.exit(1) # Remove acroname -- we're likely running inside run-unit-tests in which case the # acroname hub is likely already connected-to from there and we'll get an error # thrown ('failed to connect to acroname (result=11)'). We do not need it -- just # needed to verify it is available above... devices.acroname = None # this script is in unit-tests directory librealsense = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # common/fw/firmware-version.h contains the bundled FW versions for all product lines fw_versions_file = os.path.join(librealsense, 'common', 'fw', 'firmware-version.h') if not os.path.isfile(fw_versions_file): log.e("Expected to find a file containing FW versions at", fw_versions_file, ", but the file was not found")