def lastgrep(file, expr, ignore=[], include=[], encoding=None, returnMatch=False, flags=0): """Search for matches to a regular expression in the last line of an input file, returning true if a match occurs. :param file: The full path to the input file :param expr: The regular expression (uncompiled) to search for in the last line of the input file :return: success (True / False) :param ignore: A list of regular expressions which remove entries in the input file contents before making the grep :param include: A list of regular expressions used to select lines from the input file contents to use in the grep :param encoding: Specifies the encoding to be used for opening the file, or None for default. :rtype: integer :raises FileNotFoundException: Raised if the input file does not exist """ if not pathexists(file): raise FileNotFoundException("unable to find file \"%s\"" % (file)) else: with openfile(file, 'r', encoding=encoding) as f: contents = f.readlines() contents = trimContents(contents, ignore, exclude=True, flags=flags) contents = trimContents(contents, include, exclude=False, flags=flags) logContents("Contents of %s after pre-processing;" % os.path.basename(file), contents) if len(contents) > 0: line = contents[len(contents)-1] regexpr = re.compile(expr, flags=flags) result = regexpr.search(line) if result is not None: return result if returnMatch else True return None if returnMatch else False
def cleanup(self, **kwargs): if not pathexists(self.destDir): log.debug( 'No matching output files were found for collection directory: %s', os.path.normpath(self.destDir)) return log.info('Collected %s test output files to directory: %s', '{:}'.format(self.collectedFileCount), os.path.normpath(fromLongPathSafe(self.destDir))) self.archiveAndPublish()
def cleanup(self, **kwargs): pythonCoverageDir = self.destDir assert os.path.isabs(pythonCoverageDir) pythonCoverageDir = os.path.normpath( fromLongPathSafe(pythonCoverageDir)) if not pathexists(pythonCoverageDir): log.info('No Python coverage files were generated.') return log.info('Preparing Python coverage report in: %s', pythonCoverageDir) self.runner.startPython( ['-m', 'coverage', 'combine'], abortOnError=True, workingDir=pythonCoverageDir, stdouterr=pythonCoverageDir + '/python-coverage-combine', disableCoverage=True, onError=lambda process: 'Failed to combine Python code coverage data: %s' % self.runner. getExprFromFile(process.stdout, '.+', returnNoneIfMissing=True) or self.runner.logFileContents(process.stderr, maxLines=0)) # produces coverage.xml in a standard format that is useful to code coverage tools self.runner.startPython( ['-m', 'coverage', 'xml'], abortOnError=False, workingDir=pythonCoverageDir, stdouterr=pythonCoverageDir + '/python-coverage-xml', disableCoverage=True, onError=lambda process: self.runner.getExprFromFile( process.stdout, '.+', returnNoneIfMissing=True) or self.runner. logFileContents(process.stderr, maxLines=0)) self.runner.startPython( [ '-m', 'coverage', 'html', '-d', toLongPathSafe(pythonCoverageDir + '/htmlcov') ] + self.getCoverageArgsList(), abortOnError=False, workingDir=pythonCoverageDir, stdouterr=pythonCoverageDir + '/python-coverage-html', disableCoverage=True, onError=lambda process: self.runner.getExprFromFile( process.stdout, '.+', returnNoneIfMissing=True) or self.runner. logFileContents(process.stderr, maxLines=0)) # to avoid confusion, remove any zero byte out/err files from the above for p in os.listdir(pythonCoverageDir): p = os.path.join(pythonCoverageDir, p) if p.endswith(('.out', '.err')) and os.path.getsize(p) == 0: os.remove(p) self.archiveAndPublish()
def getmatches(file, regexpr, ignores=None, encoding=None, flags=0, mappers=[], returnFirstOnly=False): """Look for matches on a regular expression in an input file, return a sequence of the matches (or if returnFirstOnly=True, just the first). :param file: The full path to the input file :param regexpr: The regular expression used to search for matches :param mappers: A list of lambdas or generator functions used to pre-process the file's lines before looking for matches. :param ignores: A list of regexes which will cause matches to be discarded. These are applied *after* any mappers. :param encoding: Specifies the encoding to be used for opening the file, or None for default. :param returnFirstOnly: If True, stops reading the file as soon as the first match is found and returns it. :return: A list of the match objects, or the match object or None if returnFirstOnly is True :rtype: list :raises FileNotFoundException: Raised if the input file does not exist """ matches = [] rexp = re.compile(regexpr, flags=flags) log.debug("Looking for expression \"%s\" in input file %s" % (regexpr, file)) if isinstance(ignores, str): ignores = [ ignores ] # it's easy to pass in a str by mistake and we definitely don't want to be ignoring lines containing any letter from that string! ignores = [re.compile(i, flags=flags) for i in (ignores or [])] if not pathexists(file): raise FileNotFoundException("unable to find file \"%s\"" % (file)) else: with openfile(file, 'r', encoding=encoding) as f: for l in applyMappers(f, mappers): match = rexp.search(l) if match is not None: shouldignore = False for i in ignores: if i.search(l): shouldignore = True break if shouldignore: continue log.debug(("Found match for line: %s" % l).rstrip()) if returnFirstOnly is True: return match matches.append(match) if returnFirstOnly is True: return None return matches
def filecopy(src, dst): """Copy source file to a destination file. :param src: Full path to the source filename :param dst: Full path the destination filename :raises FileNotFoundException: Raised if the source file does not exist """ if not pathexists(src): raise FileNotFoundException("unable to find file %s" % (os.path.basename(src))) shutil.copyfile(toLongPathSafe(src), toLongPathSafe(dst))
def collectPath(self, testObj, path, **kwargs): name, ext = os.path.splitext(os.path.basename(path)) collectdest = toLongPathSafe( os.path.join( self.destDir, (self.outputPattern.replace('@TESTID@', str(testObj)).replace( '@FILENAME@', name).replace('.@FILENAME_EXT@', ext)))) i = 1 while pathexists(collectdest.replace('@UNIQUE@', '%d' % (i))): i += 1 collectdest = collectdest.replace('@UNIQUE@', '%d' % (i)) mkdir(os.path.dirname(collectdest)) shutil.copyfile(toLongPathSafe(path.replace('/', os.sep)), collectdest) self.collectedFileCount += 1
def orderedgrep(file, exprList, encoding=None, flags=0): """Seach for ordered matches to a set of regular expressions in an input file, returning None on success, and a string indicating the missing expression something is missing. The ordered grep method will only pass if matches to the set of regular expression in the expression list occur in the input file in the order they appear in the expression list. Matches to the regular expressions do not have to be across sequential lines in the input file, only in the correct order. For example, for a file with contents :: A is for apple B is for book C is for cat D is for dog an expression list of ["^A.*$", "^C.*$", "^D.*$"] will return true, whilst an expression list of ["^A.*$", "^C.$", "^B.$"] will return false. :param file: The full path to the input file :param exprList: A list of regular expressions (uncompiled) to search for in the input file :param encoding: Specifies the encoding to be used for opening the file, or None for default. :return: None on success, or on failure the string expression that was not found (with an indicator of its index in the array). :rtype: string :raises FileNotFoundException: Raised if the input file does not exist """ list = copy.deepcopy(exprList) list.reverse() expr, exprIndex = list.pop(), 1 regexpr = re.compile(expr, flags=flags) if not pathexists(file): raise FileNotFoundException('unable to find file "%s"' % (file)) # pragma: no cover with openfile(file, 'r', encoding=encoding) as f: for line in f: if regexpr.search(line) is not None: if len(list) == 0: return None # success - found them all expr, exprIndex = list.pop(), exprIndex + 1 regexpr = re.compile(expr, flags=flags) return '#%d: %s' % (exprIndex, expr ) # the expression we were trying to match
def filegrep(file, expr, ignores=None, returnMatch=False, encoding=None, flags=0, mappers=[]): """Search for matches to a regular expression in an input file, returning true if a match occurs. :param file: The full path to the input file :param expr: The regular expression (uncompiled) to search for in the input file :param ignores: Optional list of regular expression strings to ignore when searching file. :param returnMatch: return the regex match object instead of a simple boolean :param encoding: Specifies the encoding to be used for opening the file, or None for default. :param mappers: Mappers to pre-process the file. :return: success (True / False), unless returnMatch=True in which case it returns the regex match object (or None if not matched) :rtype: integer :raises FileNotFoundException: Raised if the input file does not exist """ if not pathexists(file): raise FileNotFoundException("unable to find file \"%s\"" % (file)) else: with openfile(file, 'r', encoding=encoding) as f: if log.isEnabledFor(logging.DEBUG): contents = f.readlines() logContents("Contents of %s;" % os.path.basename(file), contents) else: contents = f ignores = [re.compile(i, flags=flags) for i in (ignores or [])] regexpr = re.compile(expr, flags=flags) if None in mappers: mappers = [m for m in mappers if m] for line in contents: # pre-process for mapper in mappers: line = mapper(line) if line is None: break if line is None: continue m = regexpr.search(line) if m is not None: if not any([i.search(line) for i in ignores]): if returnMatch: return m return True if returnMatch: return None return False
def getmatches(file, regexpr, ignores=None, encoding=None, flags=0, mappers=[]): """Look for matches on a regular expression in an input file, return a sequence of the matches. :param file: The full path to the input file :param regexpr: The regular expression used to search for matches :param ignores: A list of regexes which will cause matches to be discarded :param encoding: Specifies the encoding to be used for opening the file, or None for default. :return: A list of the match objects :rtype: list :raises FileNotFoundException: Raised if the input file does not exist """ matches = [] rexp = re.compile(regexpr, flags=0) log.debug("Looking for expression \"%s\" in input file %s" %(regexpr, file)) ignores = [re.compile(i, flags=flags) for i in (ignores or [])] if None in mappers: mappers = [m for m in mappers if m] if not pathexists(file): raise FileNotFoundException("unable to find file \"%s\"" % (file)) else: with openfile(file, 'r', encoding=encoding) as f: for l in f: # pre-process for mapper in mappers: l = mapper(l) if l is None: break if l is None: continue match = rexp.search(l) if match is not None: shouldignore = False for i in ignores: if i.search(l): shouldignore = True break if shouldignore: continue log.debug(("Found match for line: %s" % l).rstrip()) matches.append(match) return matches
def setup(self, numTests=0, cycles=1, xargs=None, threads=0, testoutdir=u'', runner=None, **kwargs): for k in self.pluginProperties: if not hasattr(type(self), k): raise UserError('Unknown property "%s" for %s' % (k, self)) self.runner = runner if not self.destDir: raise Exception('Cannot set destDir to ""') if not self.fileIncludesRegex: raise Exception('fileIncludesRegex must be specified for %s' % type(self).__name__) self.destDir = os.path.normpath( os.path.join(runner.output + '/..', self.destDir)) if pathexists(self.destDir + os.sep + 'pysysproject.xml'): raise Exception('Cannot set destDir to testRootDir') # the code below assumes (for long path safe logic) this includes correct slashes (if any) self.outputPattern = self.outputPattern.replace('/', os.sep).replace( '\\', os.sep) if self.destArchive: self.destArchive = os.path.join(self.destDir, self.destArchive) if os.path.exists(self.destDir): deletedir( self.destDir ) # remove any existing archives (but not if this dir seems to have other stuff in it!) def prepRegex(exp): if not exp: return None if not exp.endswith('$'): exp = exp + '$' # by default require regex to match up to the end to avoid common mistakes return re.compile(exp) self.fileExcludesRegex = prepRegex(self.fileExcludesRegex) self.fileIncludesRegex = prepRegex(self.fileIncludesRegex) self.collectedFileCount = 0
def filediff(file1, file2, ignore=[], sort=True, replacementList=[], include=[], unifiedDiffOutput=None, encoding=None, stripWhitespace=True, flags=0): """Perform a file comparison between two (preprocessed) input files, returning true if the files are equivalent. The method reads in the files and loads the contents of each as a list of strings. The two files are said to be equal if the two lists are equal. The method allows for preprocessing of the string lists to trim down their contents prior to the comparison being performed. Preprocessing is either to remove entries from the lists which match any entry in a set of regular expressions, include only lines which match any entry in a set of regular expressions, replace certain keywords in the string values of each list with a set value (e.g. to replace time stamps etc), or to sort the lists before the comparison (e.g. where determinism may not exist). Verbose logging of the method occurs at DEBUG level showing the contents of the processed lists prior to the comparison being performed. :param file1: The full path to the first file to use in the comparison :param file2: The full path to the second file to use in the comparison, typically a reference file :param ignore: A list of regular expressions which remove entries in the input file contents before making the comparison :param sort: Boolean to sort the input file contents before making the comparison :param replacementList: A list of tuples (key, value) where matches to key are replaced with value in the input file contents before making the comparison :param stripWhitespace: If True, every line has leading and trailing whitespace stripped before comparison, which means indentation differences and whether the file ends with a blank line do not affect the outcome. If False, only newline characters are stripped. :param include: A list of regular expressions used to select lines from the input file contents to use in the comparison :param unifiedDiffOutput: If specified, indicates the full path of a file to which unified diff output will be written, if the diff fails. :param encoding: Specifies the encoding to be used for opening the file, or None for default. :return: success (True / False) :rtype: boolean :raises FileNotFoundException: Raised if either of the files do not exist """ for file in file1, file2: if not pathexists(file): raise FileNotFoundException("unable to find file \"%s\"" % file) else: stripchars = None if stripWhitespace else '\r\n' # None means all whitespace with openfile(file1, 'r', encoding=encoding) as f: list1 = [i.strip(stripchars) for i in f] with openfile(file2, 'r', encoding=encoding) as f: list2 = [i.strip(stripchars) for i in f] list1 = trimContents(list1, ignore, exclude=True, flags=flags) list2 = trimContents(list2, ignore, exclude=True, flags=flags) list1 = trimContents(list1, include, exclude=False, flags=flags) list2 = trimContents(list2, include, exclude=False, flags=flags) list1 = replace(list1, replacementList, flags=flags) list2 = replace(list2, replacementList, flags=flags) if sort: list1.sort() list2.sort() logContents( "Contents of %s after pre-processing;" % os.path.basename(file1), list1) logContents( "Contents of %s after pre-processing;" % os.path.basename(file2), list2) if not list1 and not list2: # maybe this should be an exception... it's probably not what was intended log.warning( 'File comparison pre-processing has filtered out all lines from the files to be diffed, please check if this is intended: %s, %s', os.path.basename(file1), os.path.basename(file2)) if list1 != list2: log.debug("Unified diff between pre-processed input files;") l1 = [] l2 = [] for i in list1: l1.append("%s\n" % i) for i in list2: l2.append("%s\n" % i) file1display = file1 file2display = file2 try: commonprefix = os.path.commonprefix( [file1display, file2display]) except ValueError: pass else: if commonprefix: # heuristic to give a longer prefix than just basename (to distinguish reference+output files with same basename) file1display = file1display[len(commonprefix):] file2display = file2display[len(commonprefix):] # nb: have to switch 1 and 2 around to get the right diff for a typical output,ref file pair diff = ''.join( difflib.unified_diff( l2, l1, fromfile='%s (%d lines)' % (file2display, len(l2)), tofile='%s (%d lines)' % (file1display, len(l1)), )) if unifiedDiffOutput: with openfile(unifiedDiffOutput, 'w', encoding=encoding) as f: f.write(diff) for line in diff.split('\n'): log.debug(" %s", line) if list1 == list2: return True return False
def cleanup(self, **kwargs): java = pysysjava.javaplugin.JavaPlugin() java.setup(self.runner) coverageDestDir = self.destDir assert os.path.isabs( coverageDestDir ) # The base class is responsible for absolutizing this config property coverageDestDir = os.path.normpath(fromLongPathSafe(coverageDestDir)) if not pathexists(coverageDestDir): log.info('No Java coverage files were generated.') return log.info('Preparing Java coverage report in: %s', coverageDestDir) cliJar = safeGlob(self.jacocoDir + '/*jacoco*cli*.jar', expected='==1', name='JaCoCo CLI jar (from the jacocoDir)') coveragefiles = [ f for f in os.listdir(coverageDestDir) if f.endswith('.javacoverage') ] java.startJava( cliJar, ['merge'] + coveragefiles + ['--destfile', 'jacoco-merged-java-coverage.exec'], abortOnError=True, workingDir=coverageDestDir, stdouterr=coverageDestDir + '/java-coverage-merge', disableCoverage=True, onError=lambda process: 'Failed to merge Java code coverage data: %s' % self.runner. getExprFromFile(process.stderr, '.+', returnAll=True)[ -1] or self.runner.logFileContents(process.stderr, maxLines=0)) for f in coveragefiles: os.remove(toLongPathSafe(coverageDestDir + os.sep + f)) classpath = java.toClasspathList(self.classpath) if not classpath: log.info( 'No Java report will be generated as no classpath was specified' ) else: log.debug( 'Application classpath for the coverage report is: \n%s', '\n'.join(" cp #%-2d : %s%s" % (i + 1, pathelement, '' if os.path. exists(pathelement) else ' (does not exist!)') for i, pathelement in enumerate(classpath))) sourceDirs = java.toClasspathList( self.sourceDirs ) # not really a classpath, but or consistency, parse it the same way args = [] for x in classpath: args.extend(['--classfiles', x]) for x in sourceDirs: args.extend(['--sourcefiles', x]) if sourceDirs: (log.warn if any(not os.path.exists(p) for p in sourceDirs) else log.debug )('Java source directories for the coverage report are: \n%s', '\n'.join(" dir #%-2d : %s%s" % (i + 1, pathelement, '' if os.path. exists(pathelement) else ' (does not exist!)') for i, pathelement in enumerate(sourceDirs))) else: log.info( 'No source directories were provided so the coverage HTML report will not include line-by-line highlighted source files' ) java.startJava( cliJar, [ 'report', 'jacoco-merged-java-coverage.exec', '--xml', 'java-coverage.xml', '--html', '.' ] + java._splitShellArgs(self.reportArgs) + args, abortOnError=True, workingDir=coverageDestDir, stdouterr=coverageDestDir + '/java-coverage-report', disableCoverage=True, onError=lambda process: 'Failed to create Java code coverage report: %s' % self.runner. getExprFromFile(process.stderr, '.+', returnAll=True)[-1] or self.runner.logFileContents(process.stderr, maxLines=0)) # to avoid confusion, remove any zero byte out/err files from the above for p in os.listdir(coverageDestDir): p = os.path.join(coverageDestDir, p) if p.endswith(('.out', '.err')) and os.path.getsize(p) == 0: os.remove(p) try: self.archiveAndPublish() except PermissionError: # pragma: no cover - can occur transiently on Windows due to file system locking time.sleep(5.0) self.archiveAndPublish()
def getTemplates(self): project = self.project projectroot = os.path.normpath(os.path.dirname(project.projectFile)) dir = self.parentDir DIR_CONFIG_DESCRIPTOR = 'pysysdirconfig.xml' if not project.projectFile or not dir.startswith(projectroot): log.debug( 'Project file does not exist under "%s" so processing of %s files is disabled', dir, DIR_CONFIG_DESCRIPTOR) return None from pysys.config.descriptor import _XMLDescriptorParser # uses a non-public API, so please don't copy this into your own test maker # load any descriptors between the project dir up to (AND including) the dir we'll be walking searchdirsuffix = dir[len(projectroot) + 1:].split( os.sep) if len(dir) > len(projectroot) else [] DEFAULT_DESCRIPTOR = _XMLDescriptorParser.DEFAULT_DESCRIPTOR def expandAndValidateTemplate(t, defaults): source = t.get('source', '<unknown source>') if defaults is None: defaults = DEFAULT_DESCRIPTOR if t['name'].lower().replace('_', '').replace(' ', '') != t['name']: raise UserError( # enforce this to make them easy to type on cmd line, and consistent "Invalid template name \"%s\" - must be lowercase and use hyphens not underscores/spaces for separating words, in \"%s\"" % (t['name'], source)) source = t.get('source', None) if t['mkdir'] is None: t['mkdir'] = [defaults.output, defaults.reference] if defaults.input not in [ '!Input_dir_if_present_else_testDir!', '!INPUT_DIR_IF_PRESENT_ELSE_TEST_DIR!' ]: t['mkdir'].append(defaults.input) t['testOutputDir'] = defaults.output t['copy'] = [ os.path.normpath( os.path.join( os.path.dirname(source) if source else '', project.expandProperties(x).strip())) for x in t['copy'] ] copy = [] for c in t['copy']: globbed = glob.glob(c) if not globbed: raise UserError( 'Cannot find any file or directory "%s" in maker template "%s" of "%s"' % (c, t['name'], source)) copy.extend(globbed) t['copy'] = copy t['replace'] = [(r1, project.expandProperties(r2)) for (r1, r2) in t['replace']] for r1, r2 in t['replace']: try: re.compile(r1) except Exception as ex: raise UserError( 'Invalid replacement regular expression "%s" in maker template "%s" of "%s": %s' % (r1, t['name'], source, ex)) return t # start with the built-ins and project templates = [ expandAndValidateTemplate(t, project._defaultDirConfig) for t in self.__PYSYS_DEFAULT_TEMPLATES ] if project._defaultDirConfig: templates = [ expandAndValidateTemplate(t, project._defaultDirConfig) for t in project._defaultDirConfig._makeTestTemplates ] + templates parentDirDefaults = None for i in range(len(searchdirsuffix) + 1): # up to AND including dir if i == 0: currentdir = projectroot else: currentdir = projectroot + os.sep + os.sep.join( searchdirsuffix[:i]) if pathexists(currentdir + os.sep + DIR_CONFIG_DESCRIPTOR): parentDirDefaults = _XMLDescriptorParser.parse( currentdir + os.sep + DIR_CONFIG_DESCRIPTOR, parentDirDefaults=parentDirDefaults, istest=False, project=project) newtemplates = [ expandAndValidateTemplate(t, parentDirDefaults) for t in parentDirDefaults._makeTestTemplates ] log.debug( 'Loaded directory configuration descriptor from %s: \n%s', currentdir, parentDirDefaults) # Add in existing templates from higher levels, but de-dup'd, giving priority to the latest defined template, and also putting the latest ones at the top of the list # for increased prominence for deftmpl in templates: if not any(tmpl['name'] == deftmpl['name'] for tmpl in newtemplates): newtemplates.append(deftmpl) templates = newtemplates log.debug('Loaded templates: \n%s', json.dumps(templates, indent=' ')) return templates
def loadDescriptors(self, dir, **kwargs): """Find all descriptors located under the specified directory, and return them as a list. Subclasses may change the returned descriptors and/or add additional instances of their own to the list after calling the super implementation:: descriptors = super(CustomDescriptorLoader, self).loadDescriptors(dir, **kwargs) ... return descriptors :param dir: The parent directory to search for runnable tests. :return: List of L{pysys.xml.descriptor.TestDescriptor} objects which could be selected for execution. If a test can be run in multiple modes there must be a single descriptor for it in the list returned from this method. Each multi-mode descriptor is later expanded out into separate mode-specific descriptors (at the same time as descriptor filtering based on command line arguments, and addition of project-level execution-order), before the final list is sorted and passed to L{pysys.baserunner.BaseRunner}. The order of the returned list is random, so the caller is responsible for sorting this list to ensure deterministic behaviour. :rtype: list :raises UserError: Raised if no testcases can be found. """ assert not kwargs, 'reserved for future use: %s' % kwargs.keys() assert self.project, 'project must be specified' assert dir, 'dir must be specified' assert os.path.isabs(dir), 'dir must be an absolute path: %s' % dir project = self.project descriptors = [] ignoreSet = set(OSWALK_IGNORES) descriptorSet = set([ s.strip() for s in project.getProperty( 'pysysTestDescriptorFileNames', default=','.join(DEFAULT_DESCRIPTOR)).split(',') ]) assert project.projectFile != None log = logging.getLogger('pysys.launcher') # although it's highly unlikely, if any test paths did slip outside the Windows 256 char limit, # it would be very dangerous to skip them (which is what os.walk does unless passed a \\?\ path), # so must use long-path-safe - but need to re-encode from unicode string back to bytestring in Python 2 i18n_reencode = locale.getpreferredencoding() if PY2 and isinstance( dir, str) else None dir = toLongPathSafe(os.path.normpath(dir)) assert os.path.exists(dir), dir # sanity check if project.projectFile: projectroot = toLongPathSafe( os.path.normpath(os.path.dirname(project.projectFile))) DIR_CONFIG_DESCRIPTOR = 'pysysdirconfig.xml' if not project.projectFile or not dir.startswith(projectroot): dirconfigs = None log.debug( 'Project file does not exist under "%s" so processing of %s files is disabled', dir, DIR_CONFIG_DESCRIPTOR) else: # find directory config descriptors between the project root and the testcase # dirs. We deliberately use project dir not current working dir since # we don't want descriptors to be loaded differently depending on where the # tests are run from (i.e. should be independent of cwd). dirconfigs = {} # load any descriptors between the project dir up to (but not including) the dir we'll be walking searchdirsuffix = dir[len(projectroot) + 1:].split( os.sep) if len(dir) > len(projectroot) else [] currentconfig = None for i in range( len(searchdirsuffix)): # up to but not including dir if i == 0: currentdir = projectroot else: currentdir = projectroot + os.sep + os.sep.join( searchdirsuffix[:i]) if pathexists(currentdir + os.sep + DIR_CONFIG_DESCRIPTOR): currentconfig = self._parseTestDescriptor( currentdir + os.sep + DIR_CONFIG_DESCRIPTOR, parentDirDefaults=currentconfig, isDirConfig=True) log.debug( 'Loaded directory configuration descriptor from %s: \n%s', currentdir, currentconfig) # this is the top-level directory that will be checked below dirconfigs[os.path.dirname(dir)] = currentconfig for root, dirs, files in os.walk(toLongPathSafe(dir)): ignorematch = next( (f for f in files if (f == '.pysysignore' or f == 'pysysignore')), None) if ignorematch: log.debug('Skipping directory %s due to ignore file %s', root, ignorematch) del dirs[:] continue parentconfig = None if dirconfigs is not None: parentconfig = dirconfigs[os.path.dirname(root)] if next((f for f in files if (f == DIR_CONFIG_DESCRIPTOR)), None): parentconfig = self._parseTestDescriptor( root + os.sep + DIR_CONFIG_DESCRIPTOR, parentDirDefaults=parentconfig, isDirConfig=True) log.debug( 'Loaded directory configuration descriptor from %s: \n%s', root, parentconfig) # allow subclasses to modify descriptors list and/or avoid processing # subdirectories if self._handleSubDirectory(root, dirs, files, descriptors, parentDirDefaults=parentconfig): del dirs[:] continue intersection = descriptorSet & set(files) if intersection: descriptorfile = fromLongPathSafe( os.path.join(root, intersection.pop())) # PY2 gets messed up if we start passing unicode rather than byte str objects here, # as it proliferates to all strings in each test if i18n_reencode is not None: descriptorfile = descriptorfile.encode(i18n_reencode) try: parsed = self._parseTestDescriptor( descriptorfile, parentDirDefaults=parentconfig) if parsed: descriptors.append(parsed) except UserError: raise # no stack trace needed, will already include descriptorfile name except Exception as e: log.info('Failed to read descriptor: ', exc_info=True) raise Exception( "Error reading descriptor file '%s': %s - %s" % (descriptorfile, e.__class__.__name__, e)) # if this is a test dir, it never makes sense to look at sub directories del dirs[:] continue for ignore in (ignoreSet & set(dirs)): dirs.remove(ignore) if dirconfigs is not None and len(dirs) > 0: # stash it for when we navigate down to subdirectories # only add to dict if we're continuing to process children dirconfigs[root] = parentconfig return descriptors