def reportResult(self, testobj, value, resultKey, unit, toleranceStdDevs=None, resultDetails=None): path = self.getRunSummaryFile(testobj) mkdir(os.path.dirname(path)) with self._lock: alreadyexists = os.path.exists(toLongPathSafe(path)) with io.open(toLongPathSafe(path), 'a', encoding='utf-8') as f: if not alreadyexists: testobj.log.info( 'Creating performance summary log file at: %s', os.path.normpath(path)) f.write('{"runDetails": ') json.dump(self.getRunDetails(testobj), f) f.write(', "results":[\n') else: f.write(',\n') json.dump( { 'resultKey': resultKey, 'value': value, 'unit': str(unit), 'biggerIsBetter': unit.biggerIsBetter, 'samples': 1, 'stdDev': 0, 'toleranceStdDevs': toleranceStdDevs, 'testId': testobj.descriptor.id, 'resultDetails': resultDetails or {} }, f) self.__summaryFilesWritten.add(path)
def archiveAndPublish(self): """ Generate an archive of the destDir (if configured) and publish artifacts (if configured). Called by default as part of `cleanup()`. """ if self.destArchive: mkdir(os.path.dirname(toLongPathSafe(self.destArchive))) with zipfile.ZipFile(toLongPathSafe(self.destArchive), 'w', zipfile.ZIP_DEFLATED, allowZip64=True) as archive: rootlen = len(self.destDir) for base, dirs, files in os.walk(self.destDir): for f in files: if os.path.normpath(os.path.join( base, f)) == os.path.normpath(self.destArchive): continue fn = os.path.join(base, f) archive.write(fn, fn[rootlen:].replace('\\', '/')) if self.publishArtifactDirCategory: self.runner.publishArtifact(self.destDir, self.publishArtifactDirCategory) if self.publishArtifactArchiveCategory and self.destArchive: self.runner.publishArtifact(self.destArchive, self.publishArtifactArchiveCategory)
def unzip(zfilename, replace=False, binary=False): """Unzip a .gz archive and write the contents to disk. Deprecated - replace ``unzip(gzfilename, binary=True)`` with ``self.unpackArchive(gzfilename, gzfilename[:-3])`` The method will unpack a file of the form C{file.data.gz} to C{file.data}, removing the archive file in the process if the C{replace} input parameter is set to true. By default the unpacked archive is treated as non-binary data, unless the binary input parameter is set to true. :param zfilename: The full path to the archive file. :param replace: Boolean flag to indicate if the archive file should be removed after unpacking. :param binary: Boolean flag to indicate if the unzipped file should be written as binary. The default value of False indicates that on some platforms newline characters will be converted to the operating system default. :raises pysys.exceptions.FileNotFoundException: Raised if the archive file does not exist. :raises pysys.exceptions.IncorrectFileTypeEception: Raised if the archive file does not have a .gz extension. """ if not os.path.exists(zfilename): raise FileNotFoundException("unable to find file %s" % (os.path.basename(zfilename))) tokens = zfilename.split('.') if tokens[len(tokens) - 1] != 'gz': raise IncorrectFileTypeException("file does not have a .gz extension") uzfilename = '' for i in range(len(tokens) - 1): uzfilename = uzfilename + tokens[i] if i != len(tokens) - 2: uzfilename = uzfilename + '.' # must read and write in binary in all cases, since we don't know for # certain what encoding it's in and want to avoid corrupting # non-newline characters with gzip.GzipFile(toLongPathSafe(zfilename), 'rb', 9) as zfile: with open(toLongPathSafe(uzfilename), 'wb') as uzfile: if binary: # do an efficient block-by-block copy, in case it's large shutil.copyfileobj(zfile, uzfile) else: # non-binary means fix newlines. # for compatibility with pre-1.3 PySys this is currently # implemented for basic cases and only on windows. buffer = zfile.read() if PLATFORM == 'win32': buffer = buffer.replace(b'\n', b'\r\n') uzfile.write(buffer) if replace: try: os.remove(zfilename) except OSError: pass
def filecopy(src, dst): """Copy source file to a destination file. :param src: Full path to the source filename :param dst: Full path the destination filename :raises FileNotFoundException: Raised if the source file does not exist """ if not pathexists(src): raise FileNotFoundException("unable to find file %s" % (os.path.basename(src))) shutil.copyfile(toLongPathSafe(src), toLongPathSafe(dst))
def collectPath(self, testObj, path, **kwargs): name, ext = os.path.splitext(os.path.basename(path)) collectdest = toLongPathSafe( os.path.join( self.destDir, (self.outputPattern.replace('@TESTID@', str(testObj)).replace( '@FILENAME@', name).replace('.@FILENAME_EXT@', ext)))) i = 1 while pathexists(collectdest.replace('@UNIQUE@', '%d' % (i))): i += 1 collectdest = collectdest.replace('@UNIQUE@', '%d' % (i)) mkdir(os.path.dirname(collectdest)) shutil.copyfile(toLongPathSafe(path.replace('/', os.sep)), collectdest) self.collectedFileCount += 1
def recordResult(self, formatted, testobj): """Record results to the performance summary file. :param formatted: the formatted string to write :param testobj: object reference to the calling test """ # generate a file in the test output directory for convenience/triaging, plus add to the global summary path = testobj.output + '/performance_results.csv' encoding = None if PY2 else 'utf-8' if not os.path.exists(path): with openfile(path, 'w', encoding=encoding) as f: f.write(self.getRunHeader()) with openfile(path, 'a', encoding=encoding) as f: f.write(formatted) # now the global one path = self.getRunSummaryFile(testobj) mkdir(os.path.dirname(path)) with self._lock: alreadyexists = os.path.exists(toLongPathSafe(path)) with openfile(path, 'a', encoding=encoding) as f: if not alreadyexists: testobj.log.info( 'Creating performance summary log file at: %s', os.path.normpath(path)) f.write(self.getRunHeader()) f.write(formatted) self.__summaryFilesWritten.add(path)
def setup(self, numTests=0, cycles=1, xargs=None, threads=0, testoutdir=u'', runner=None, **kwargs): self.runner = runner if not self.destDir: raise Exception('Cannot set destDir to ""') self.destDir = toLongPathSafe(os.path.normpath(os.path.join(runner.project.root, self.destDir\ .replace('@OUTDIR@', os.path.basename(runner.outsubdir)) \ ))) if os.path.exists(self.destDir) and all(f.endswith(('.txt', '.zip')) for f in os.listdir(self.destDir)): deletedir(self.destDir) # remove any existing archives (but not if this dir seems to have other stuff in it!) self.archiveAtEndOfRun = str(self.archiveAtEndOfRun).lower()=='true' self.fileExcludesRegex = re.compile(self.fileExcludesRegex) if self.fileExcludesRegex else None self.fileIncludesRegex = re.compile(self.fileIncludesRegex) if self.fileIncludesRegex else None self.maxArchiveSizeMB = float(self.maxArchiveSizeMB) self.maxArchives = int(self.maxArchives) self.__totalBytesRemaining = int(float(self.maxTotalSizeMB)*1024*1024) if self.archiveAtEndOfRun: self.queuedInstructions = [] self.skippedTests = [] self.archivesCreated = 0 self.__artifactWriters = [w for w in self.runner.writers if isinstance(w, ArtifactPublisher)] def pub(path, category): path = fromLongPathSafe(path).replace('\\','/') for a in self.__artifactWriters: a.publishArtifact(path, category) self.runner.publishArtifact = pub
def archiveAndPublish(self): """ Generate an archive of the destDir (if configured) and publish artifacts (if configured). Called by default as part of `cleanup()`. """ if self.destArchive: mkdir(os.path.dirname(toLongPathSafe(self.destArchive))) with zipfile.ZipFile(toLongPathSafe(self.destArchive), 'w', zipfile.ZIP_DEFLATED, allowZip64=True) as archive: rootlen = len(self.destDir) for base, dirs, files in os.walk(self.destDir): for f in files: if os.path.normpath(os.path.join( base, f)) == os.path.normpath(self.destArchive): continue fn = os.path.join(base, f) destname = fn[rootlen:].replace('\\', '/').lstrip('/') try: try: archive.write(fn, destname) except PermissionError: # pragma: no cover - can happen on windows due to file system locking issues time.sleep(5.0) archive.write(fn, destname) except Exception as ex: # pragma: no cover # Deal with failures (even after retry) - don't abort the whole archive # (e.g. a locked .err file in coverage output dir doesn't matter) log.warning( 'Could not write file to archive %s: "%s" - %s: %s', os.path.basename(self.destArchive), fn, ex.__class__.__name__, ex) archive.writestr( destname + '.pysyserror.txt', '!!! PySys could not write this file to the archive - %s: %s' % (ex.__class__.__name__, ex)) if self.publishArtifactDirCategory: self.runner.publishArtifact(self.destDir, self.publishArtifactDirCategory) if self.publishArtifactArchiveCategory and self.destArchive: self.runner.publishArtifact(self.destArchive, self.publishArtifactArchiveCategory)
def openfile(path, mode='r', encoding=None, errors=None, **kwargs): """ Opens the specified file, following the default "open()" semantics for this Python version unless an encoding is explicitly specified, in which case a file stream yielding (unicode) character strings is always returned. Specifically: On Python 3 this method returns a file stream yielding character strings unless a binary mode was specified in which case a stream yielding bytes is returned. On Python 2 this method returns a file stream yielding unicode character strings only if an encoding was explicitly specified; otherwise it returns a file stream yielding "str" bytes objects. :param path: The path to open; must be an absolute path. Even on Windows this path can be long (e.g. more than the usual 256 character Windows limit). :param mode: The file mode, e.g. 'r' for reading, 'wb' for binary writing. :param encoding: The encoding to use to translate between the bytes of the file and the characters used in the returned stream. If an encoding is specified then the returned stream is always a unicode character stream. This must be None if the mode specifies binary. :param errors: Optional string that specifies how encoding/decoding errors are handled, such as 'strict', 'ignore', 'replace'; see documentation of io module for more details. The value of this attribute is ignored if using the python 2 open() built-in with bytes mode that does not support it. :param kwargs: Any additional args to be passed to open() or io.open(). :return: A file stream, either using unicode characters or binary bytes. This stream should be closed when no longer required. """ assert path # sanity check to avoid accidentally creating files in cwd rather than test output directory assert os.path.isabs(path), path if encoding: __log.debug('Opening file using encoding=%s: %s', encoding, path) from pysys.utils.fileutils import toLongPathSafe # import here to avoid circular dependency path = toLongPathSafe(path, onlyIfNeeded=True) if encoding or (not PY2): if encoding: assert 'b' not in mode, 'cannot open file %s with binary mode %s as an encoding was specified' % ( path, mode) return io.open(path, mode=mode, encoding=encoding, errors=errors, **kwargs) return open(path, mode=mode, **kwargs)
def _writeXMLDocument(self, document, testObj, **kwargs): with io.open( toLongPathSafe( os.path.join(self.outputDir, ('TEST-%s.%s.xml' % (testObj.descriptor.id, self.cycle + 1)) if self.cycles > 1 else ('TEST-%s.xml' % (testObj.descriptor.id)))), 'wb') as fp: fp.write(self._serializeXMLDocumentToBytes(document))
def load(src): """ Read the runDetails and results from the specified .csv file on disk. :param str src: The path to read. :returns: A new `CSVPerformanceFile` instance. .. versionadded:: 2.1 """ with io.open(toLongPathSafe(src), 'r', encoding='utf-8') as f: return CSVPerformanceFile(f.read(), name=src)
def cleanup(self, **kwargs): pythonCoverageDir = self.destDir assert os.path.isabs(pythonCoverageDir) pythonCoverageDir = os.path.normpath( fromLongPathSafe(pythonCoverageDir)) if not pathexists(pythonCoverageDir): log.info('No Python coverage files were generated.') return log.info('Preparing Python coverage report in: %s', pythonCoverageDir) self.runner.startPython( ['-m', 'coverage', 'combine'], abortOnError=True, workingDir=pythonCoverageDir, stdouterr=pythonCoverageDir + '/python-coverage-combine', disableCoverage=True, onError=lambda process: 'Failed to combine Python code coverage data: %s' % self.runner. getExprFromFile(process.stdout, '.+', returnNoneIfMissing=True) or self.runner.logFileContents(process.stderr, maxLines=0)) # produces coverage.xml in a standard format that is useful to code coverage tools self.runner.startPython( ['-m', 'coverage', 'xml'], abortOnError=False, workingDir=pythonCoverageDir, stdouterr=pythonCoverageDir + '/python-coverage-xml', disableCoverage=True, onError=lambda process: self.runner.getExprFromFile( process.stdout, '.+', returnNoneIfMissing=True) or self.runner. logFileContents(process.stderr, maxLines=0)) self.runner.startPython( [ '-m', 'coverage', 'html', '-d', toLongPathSafe(pythonCoverageDir + '/htmlcov') ] + self.getCoverageArgsList(), abortOnError=False, workingDir=pythonCoverageDir, stdouterr=pythonCoverageDir + '/python-coverage-html', disableCoverage=True, onError=lambda process: self.runner.getExprFromFile( process.stdout, '.+', returnNoneIfMissing=True) or self.runner. logFileContents(process.stderr, maxLines=0)) # to avoid confusion, remove any zero byte out/err files from the above for p in os.listdir(pythonCoverageDir): p = os.path.join(pythonCoverageDir, p) if p.endswith(('.out', '.err')) and os.path.getsize(p) == 0: os.remove(p) self.archiveAndPublish()
def setup(self, numTests=0, cycles=1, xargs=None, threads=0, testoutdir=u'', runner=None, **kwargs): for k in self.pluginProperties: if not hasattr(type(self), k): raise UserError('Unknown property "%s" for %s' % (k, self)) self.runner = runner if not self.destDir: raise Exception('Cannot set destDir to ""') # avoid double-expanding (which could mess up ${$} escapes), but if using default value we need to expand it if self.destDir == TestOutputArchiveWriter.destDir: self.destDir = runner.project.expandProperties(self.destDir) self.destDir = toLongPathSafe( os.path.normpath(os.path.join(runner.output + '/..', self.destDir))) if os.path.exists(self.destDir) and all( f.endswith(('.txt', '.zip')) for f in os.listdir(self.destDir)): deletedir( self.destDir ) # remove any existing archives (but not if this dir seems to have other stuff in it!) self.fileExcludesRegex = re.compile( self.fileExcludesRegex) if self.fileExcludesRegex else None self.fileIncludesRegex = re.compile( self.fileIncludesRegex) if self.fileIncludesRegex else None self.__totalBytesRemaining = int( float(self.maxTotalSizeMB) * 1024 * 1024) if self.archiveAtEndOfRun: self.queuedInstructions = [] self.skippedTests = [] self.archivesCreated = 0 self.includeNonFailureOutcomes = [ str(o) for o in OUTCOMES ] if self.includeNonFailureOutcomes == '*' else [ o.strip().upper() for o in self.includeNonFailureOutcomes.split(',') if o.strip() ] for o in self.includeNonFailureOutcomes: if not any(o == str(outcome) for outcome in OUTCOMES): raise UserError( 'Unknown outcome display name "%s" in includeNonFailureOutcomes' % o)
def recordResult(self, formatted, testobj): """Record results to the performance summary file. :meta private: :param formatted: the formatted string to write :param testobj: object reference to the calling test """ # generate a file in the test output directory for convenience/triaging, plus add to the global summary path = testobj.output + '/performance_results.csv' encoding = 'utf-8' def callGetRunHeader(): try: return self.getRunHeader(testobj) except Exception: # pragma: no cover - for pre-2.0 signature return self.getRunHeader() if not os.path.exists(path): with io.open(toLongPathSafe(path), 'w', encoding=encoding) as f: f.write(callGetRunHeader()) with io.open(toLongPathSafe(path), 'a', encoding=encoding) as f: f.write(formatted) # now the global one path = self.getRunSummaryFile(testobj) mkdir(os.path.dirname(path)) with self._lock: alreadyexists = os.path.exists(toLongPathSafe(path)) with io.open(toLongPathSafe(path), 'a', encoding=encoding) as f: if not alreadyexists: testobj.log.info( 'Creating performance summary log file at: %s', os.path.normpath(path)) f.write(callGetRunHeader()) f.write(formatted) self.__summaryFilesWritten.add(path)
def cleanup(self): with self._lock: if self.__summaryFilesWritten: for p in sorted(list(self.__summaryFilesWritten)): with io.open(toLongPathSafe(p), 'a', encoding='utf-8') as f: f.write('\n]}\n') log.info( 'Performance results were written to: %s', os.path.normpath(p).replace( os.path.normpath(self.project.testRootDir), '').lstrip('/\\')) if self.publishArtifactCategory: self.runner.publishArtifact( p, self.publishArtifactCategory)
def dump(self, dest): """ Dump the runDetails and results from this object to a CSV at the specified location. Any existing file is overwritten. :param str dest: The destination path or file handle to write to. .. versionadded:: 2.1 """ if isinstance(dest, str): with io.open(toLongPathSafe(dest), 'w', encoding='utf-8') as f: return self.dump(f) dest.write(self.makeCSVHeaderLine(self.runDetails)) for v in self.results: dest.write(self.toCSVLine(v)+'\n')
def cleanup(self, **kwargs): java = pysysjava.javaplugin.JavaPlugin() java.setup(self.runner) coverageDestDir = self.destDir assert os.path.isabs( coverageDestDir ) # The base class is responsible for absolutizing this config property coverageDestDir = os.path.normpath(fromLongPathSafe(coverageDestDir)) if not pathexists(coverageDestDir): log.info('No Java coverage files were generated.') return log.info('Preparing Java coverage report in: %s', coverageDestDir) cliJar = safeGlob(self.jacocoDir + '/*jacoco*cli*.jar', expected='==1', name='JaCoCo CLI jar (from the jacocoDir)') coveragefiles = [ f for f in os.listdir(coverageDestDir) if f.endswith('.javacoverage') ] java.startJava( cliJar, ['merge'] + coveragefiles + ['--destfile', 'jacoco-merged-java-coverage.exec'], abortOnError=True, workingDir=coverageDestDir, stdouterr=coverageDestDir + '/java-coverage-merge', disableCoverage=True, onError=lambda process: 'Failed to merge Java code coverage data: %s' % self.runner. getExprFromFile(process.stderr, '.+', returnAll=True)[ -1] or self.runner.logFileContents(process.stderr, maxLines=0)) for f in coveragefiles: os.remove(toLongPathSafe(coverageDestDir + os.sep + f)) classpath = java.toClasspathList(self.classpath) if not classpath: log.info( 'No Java report will be generated as no classpath was specified' ) else: log.debug( 'Application classpath for the coverage report is: \n%s', '\n'.join(" cp #%-2d : %s%s" % (i + 1, pathelement, '' if os.path. exists(pathelement) else ' (does not exist!)') for i, pathelement in enumerate(classpath))) sourceDirs = java.toClasspathList( self.sourceDirs ) # not really a classpath, but or consistency, parse it the same way args = [] for x in classpath: args.extend(['--classfiles', x]) for x in sourceDirs: args.extend(['--sourcefiles', x]) if sourceDirs: (log.warn if any(not os.path.exists(p) for p in sourceDirs) else log.debug )('Java source directories for the coverage report are: \n%s', '\n'.join(" dir #%-2d : %s%s" % (i + 1, pathelement, '' if os.path. exists(pathelement) else ' (does not exist!)') for i, pathelement in enumerate(sourceDirs))) else: log.info( 'No source directories were provided so the coverage HTML report will not include line-by-line highlighted source files' ) java.startJava( cliJar, [ 'report', 'jacoco-merged-java-coverage.exec', '--xml', 'java-coverage.xml', '--html', '.' ] + java._splitShellArgs(self.reportArgs) + args, abortOnError=True, workingDir=coverageDestDir, stdouterr=coverageDestDir + '/java-coverage-report', disableCoverage=True, onError=lambda process: 'Failed to create Java code coverage report: %s' % self.runner. getExprFromFile(process.stderr, '.+', returnAll=True)[-1] or self.runner.logFileContents(process.stderr, maxLines=0)) # to avoid confusion, remove any zero byte out/err files from the above for p in os.listdir(coverageDestDir): p = os.path.join(coverageDestDir, p) if p.endswith(('.out', '.err')) and os.path.getsize(p) == 0: os.remove(p) try: self.archiveAndPublish() except PermissionError: # pragma: no cover - can occur transiently on Windows due to file system locking time.sleep(5.0) self.archiveAndPublish()
def loadDescriptors(self, dir, **kwargs): """Find all descriptors located under the specified directory, and return them as a list. Subclasses may change the returned descriptors and/or add additional instances of their own to the list after calling the super implementation:: descriptors = super(CustomDescriptorLoader, self).loadDescriptors(dir, **kwargs) ... return descriptors :param dir: The parent directory to search for runnable tests. :return: List of L{pysys.xml.descriptor.TestDescriptor} objects which could be selected for execution. If a test can be run in multiple modes there must be a single descriptor for it in the list returned from this method. Each multi-mode descriptor is later expanded out into separate mode-specific descriptors (at the same time as descriptor filtering based on command line arguments, and addition of project-level execution-order), before the final list is sorted and passed to L{pysys.baserunner.BaseRunner}. The order of the returned list is random, so the caller is responsible for sorting this list to ensure deterministic behaviour. :rtype: list :raises UserError: Raised if no testcases can be found. """ assert not kwargs, 'reserved for future use: %s' % kwargs.keys() assert self.project, 'project must be specified' assert dir, 'dir must be specified' assert os.path.isabs(dir), 'dir must be an absolute path: %s' % dir project = self.project descriptors = [] ignoreSet = set(OSWALK_IGNORES) descriptorSet = set([ s.strip() for s in project.getProperty( 'pysysTestDescriptorFileNames', default=','.join(DEFAULT_DESCRIPTOR)).split(',') ]) assert project.projectFile != None log = logging.getLogger('pysys.launcher') # although it's highly unlikely, if any test paths did slip outside the Windows 256 char limit, # it would be very dangerous to skip them (which is what os.walk does unless passed a \\?\ path), # so must use long-path-safe - but need to re-encode from unicode string back to bytestring in Python 2 i18n_reencode = locale.getpreferredencoding() if PY2 and isinstance( dir, str) else None dir = toLongPathSafe(os.path.normpath(dir)) assert os.path.exists(dir), dir # sanity check if project.projectFile: projectroot = toLongPathSafe( os.path.normpath(os.path.dirname(project.projectFile))) DIR_CONFIG_DESCRIPTOR = 'pysysdirconfig.xml' if not project.projectFile or not dir.startswith(projectroot): dirconfigs = None log.debug( 'Project file does not exist under "%s" so processing of %s files is disabled', dir, DIR_CONFIG_DESCRIPTOR) else: # find directory config descriptors between the project root and the testcase # dirs. We deliberately use project dir not current working dir since # we don't want descriptors to be loaded differently depending on where the # tests are run from (i.e. should be independent of cwd). dirconfigs = {} # load any descriptors between the project dir up to (but not including) the dir we'll be walking searchdirsuffix = dir[len(projectroot) + 1:].split( os.sep) if len(dir) > len(projectroot) else [] currentconfig = None for i in range( len(searchdirsuffix)): # up to but not including dir if i == 0: currentdir = projectroot else: currentdir = projectroot + os.sep + os.sep.join( searchdirsuffix[:i]) if pathexists(currentdir + os.sep + DIR_CONFIG_DESCRIPTOR): currentconfig = self._parseTestDescriptor( currentdir + os.sep + DIR_CONFIG_DESCRIPTOR, parentDirDefaults=currentconfig, isDirConfig=True) log.debug( 'Loaded directory configuration descriptor from %s: \n%s', currentdir, currentconfig) # this is the top-level directory that will be checked below dirconfigs[os.path.dirname(dir)] = currentconfig for root, dirs, files in os.walk(toLongPathSafe(dir)): ignorematch = next( (f for f in files if (f == '.pysysignore' or f == 'pysysignore')), None) if ignorematch: log.debug('Skipping directory %s due to ignore file %s', root, ignorematch) del dirs[:] continue parentconfig = None if dirconfigs is not None: parentconfig = dirconfigs[os.path.dirname(root)] if next((f for f in files if (f == DIR_CONFIG_DESCRIPTOR)), None): parentconfig = self._parseTestDescriptor( root + os.sep + DIR_CONFIG_DESCRIPTOR, parentDirDefaults=parentconfig, isDirConfig=True) log.debug( 'Loaded directory configuration descriptor from %s: \n%s', root, parentconfig) # allow subclasses to modify descriptors list and/or avoid processing # subdirectories if self._handleSubDirectory(root, dirs, files, descriptors, parentDirDefaults=parentconfig): del dirs[:] continue intersection = descriptorSet & set(files) if intersection: descriptorfile = fromLongPathSafe( os.path.join(root, intersection.pop())) # PY2 gets messed up if we start passing unicode rather than byte str objects here, # as it proliferates to all strings in each test if i18n_reencode is not None: descriptorfile = descriptorfile.encode(i18n_reencode) try: parsed = self._parseTestDescriptor( descriptorfile, parentDirDefaults=parentconfig) if parsed: descriptors.append(parsed) except UserError: raise # no stack trace needed, will already include descriptorfile name except Exception as e: log.info('Failed to read descriptor: ', exc_info=True) raise Exception( "Error reading descriptor file '%s': %s - %s" % (descriptorfile, e.__class__.__name__, e)) # if this is a test dir, it never makes sense to look at sub directories del dirs[:] continue for ignore in (ignoreSet & set(dirs)): dirs.remove(ignore) if dirconfigs is not None and len(dirs) > 0: # stash it for when we navigate down to subdirectories # only add to dict if we're continuing to process children dirconfigs[root] = parentconfig return descriptors
def setup(self, **kwargs): # Creates the DOM for the test output summary and writes to logfile. self.numTests = kwargs["numTests"] if "numTests" in kwargs else 0 self.logfile = os.path.join( self.outputDir or kwargs['runner'].output + '/..', self.logfile) try: self.fp = io.open(toLongPathSafe(self.logfile), "wb") impl = getDOMImplementation() self.document = impl.createDocument(None, "pysyslog", None) if self.stylesheet: stylesheet = self.document.createProcessingInstruction( "xml-stylesheet", "href=\"%s\" type=\"text/xsl\"" % (self.stylesheet)) self.document.insertBefore(stylesheet, self.document.childNodes[0]) # create the root and add in the status, number of tests and number completed self.rootElement = self.document.documentElement self.statusAttribute = self.document.createAttribute("status") self.statusAttribute.value = "running" self.rootElement.setAttributeNode(self.statusAttribute) self.completedAttribute = self.document.createAttribute( "completed") self.completedAttribute.value = "%s/%s" % (self.numResults, self.numTests) self.rootElement.setAttributeNode(self.completedAttribute) # add the data node element = self.document.createElement("timestamp") element.appendChild( self.document.createTextNode( time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))) self.rootElement.appendChild(element) # add the platform node element = self.document.createElement("platform") element.appendChild(self.document.createTextNode(PLATFORM)) self.rootElement.appendChild(element) # add the test host node element = self.document.createElement("host") element.appendChild(self.document.createTextNode(HOSTNAME)) self.rootElement.appendChild(element) # add the test host node element = self.document.createElement("root") element.appendChild( self.document.createTextNode( self.__pathToURL(kwargs['runner'].project.root))) self.rootElement.appendChild(element) # add the extra params nodes element = self.document.createElement("xargs") if "xargs" in kwargs: for key in list(kwargs["xargs"].keys()): childelement = self.document.createElement("xarg") nameAttribute = self.document.createAttribute("name") valueAttribute = self.document.createAttribute("value") nameAttribute.value = key valueAttribute.value = kwargs["xargs"][key].__str__() childelement.setAttributeNode(nameAttribute) childelement.setAttributeNode(valueAttribute) element.appendChild(childelement) self.rootElement.appendChild(element) # write the file out self._writeXMLDocument() except Exception: log.info("caught %s in XMLResultsWriter: %s", sys.exc_info()[0], sys.exc_info()[1], exc_info=1)
def tryDeserializePerformanceFile(path): if not path.endswith('.json'): return None with io.open(toLongPathSafe(path), encoding='utf-8') as f: data = json.load(f) return PerformanceRunData(path, data['runDetails'], data['results'])
def _archiveTestOutputDir(self, id, outputDir, **kwargs): """ Creates an archive for the specified test, unless doing so would violate the configured limits (e.g. maxArchives). :param str id: The testId (plus a cycle suffix if it's a multi-cycle run). :param str outputDir: The path of the test output dir. """ if self.archivesCreated == 0: mkdir(self.destDir) if self.archivesCreated == self.maxArchives: self.skippedTests.append(outputDir) log.debug( 'Skipping archiving for %s as maxArchives limit is reached', id) return if self.__totalBytesRemaining < 500: self.skippedTests.append(outputDir) log.debug( 'Skipping archiving for %s as maxTotalMB limit is reached', id) return self.archivesCreated += 1 try: outputDir = toLongPathSafe(outputDir) skippedFiles = [] # this is performance-critical so worth caching these fileExcludesRegex = self.fileExcludesRegex fileIncludesRegex = self.fileIncludesRegex isPurgableFile = self.runner.isPurgableFile bytesRemaining = min(int(self.maxArchiveSizeMB * 1024 * 1024), self.__totalBytesRemaining) triedTmpZipFile = False zippath, myzip = self._newArchive(id) filesInZip = 0 with myzip: rootlen = len(outputDir) + 1 for base, dirs, files in os.walk(outputDir): # Just the files, don't bother with the directories for now files.sort(key=lambda fn: [fn != 'run.log', fn] ) # be deterministic, and put run.log first for f in files: fn = os.path.join(base, f) if fileExcludesRegex is not None and fileExcludesRegex.search( fn.replace('\\', '/')): skippedFiles.append(fn) continue if fileIncludesRegex is not None and not fileIncludesRegex.search( fn.replace('\\', '/')): skippedFiles.append(fn) continue fileSize = os.path.getsize(fn) if fileSize == 0: # Since (if not waiting until end) this gets called before testComplete has had a chance to clean things up, skip the # files that it would have deleted. Don't bother listing these in skippedFiles since user # won't be expecting them anyway continue if bytesRemaining < 500: skippedFiles.append(fn) continue try: if fileSize > bytesRemaining: if triedTmpZipFile: # to save effort, don't keep trying once we're close - from now on only attempt small files skippedFiles.append(fn) continue triedTmpZipFile = True # Only way to know if it'll fit is to try compressing it log.debug( 'File size of %s might push the archive above the limit; creating a temp zip to check', fn) tmpname, tmpzip = self._newArchive(id + '.tmp') try: with tmpzip: tmpzip.write(fn, 'tmp') compressedSize = tmpzip.getinfo( 'tmp').compress_size if compressedSize > bytesRemaining: log.debug( 'Skipping file as compressed size of %s bytes exceeds remaining limit of %s bytes: %s', compressedSize, bytesRemaining, fn) skippedFiles.append(fn) continue finally: os.remove(tmpname) # Here's where we actually add it to the real archive memberName = fn[rootlen:].replace('\\', '/') myzip.write(fn, memberName) except Exception as ex: # might happen due to file locking or similar log.warning( 'Failed to add output file "%s" to archive: %s', fn, ex) skippedFiles.append(fn) continue filesInZip += 1 bytesRemaining -= myzip.getinfo( memberName).compress_size if skippedFiles and fileIncludesRegex is None: # keep the archive clean if there's an explicit include skippedFilesStr = os.linesep.join( [fromLongPathSafe(f) for f in skippedFiles]) skippedFilesStr = skippedFilesStr.encode('utf-8') myzip.writestr('__pysys_skipped_archive_files.txt', skippedFilesStr) if filesInZip == 0: # don't leave empty zips around log.debug('No files added to zip so deleting: %s', zippath) self.archivesCreated -= 1 os.remove(zippath) return self.__totalBytesRemaining -= os.path.getsize(zippath) self.runner.publishArtifact(zippath, 'TestOutputArchive') except Exception: self.skippedTests.append(outputDir) raise
def perfReportsToolMain(args): USAGE = """ perfreportstool.py aggregate PATH1 PATH2... > aggregated.csv perfreportstool.py compare PATH_GLOB1 PATH_GLOB2... where PATH is a .csv file or directory of .csv files GLOB_PATH is a path to a file or files, optionally containing by * and ** globs The aggregate command combines the specifies CSVfile(s) to form a single file with one row for each resultKey, with the 'value' equal to the mean of all values for that resultKey and the 'stdDev' updated with the standard deviation. This can also be used with one or more .csv file to aggregate results from multiple cycles. The compare command prints a comparison from each listed performance file to the final one in the list. Note that the format of the output may change at any time, and it is not intended for machine parsing. """ # could later add support for automatically comparing files if '-h' in sys.argv or '--help' in args or len( args) < 2 or args[0] not in ['aggregate', 'compare']: sys.stderr.write(USAGE) sys.exit(1) cmd = args[0] # send log output to stderr to avoid interfering with output we might be redirecting to a file logging.basicConfig(format='%(levelname)s: %(message)s', stream=sys.stderr, level=getattr( logging, os.getenv('PYSYS_LOG_LEVEL', 'INFO').upper())) if cmd == 'aggregate': paths = [] for p in args[1:]: if os.path.isfile(p): paths.append(p) elif os.path.isdir(p): for (dirpath, dirnames, filenames) in os.walk(p): for f in sorted(filenames): if f.endswith('.csv'): paths.append(dirpath + '/' + f) else: raise Exception('Cannot find file: %s' % p) if not paths: raise Exception('No .csv files found') files = [] for p in paths: with io.open(toLongPathSafe(os.path.abspath(p)), encoding='utf-8') as f: files.append(CSVPerformanceFile(f.read())) f = CSVPerformanceFile.aggregate(files) f.dump(sys.stdout) elif cmd == 'compare': paths = args[1:] from pysys.config.project import Project project = Project.findAndLoadProject() # Can't easily get these classes from project without replicating the logic to instantiate them, which would # be error prone performanceReporterClasses = [ CSVPerformanceReporter, JSONPerformanceReporter ] gen = PerformanceComparisonGenerator(performanceReporterClasses) files = gen.loadFiles(baselineBaseDir=project.testRootDir, paths=paths) gen.logComparisons(files)
def parseArgs(self, args, printXOptions=None): # add any default args first; shlex.split does a great job of providing consistent parsing from str->list, # but need to avoid mangling \'s on windows; since this env var will be different for each OS no need for consistent win+unix behaviour if os.getenv('PYSYS_DEFAULT_ARGS', ''): log.info('Using PYSYS_DEFAULT_ARGS = %s' % os.environ['PYSYS_DEFAULT_ARGS']) args = shlex.split(os.environ['PYSYS_DEFAULT_ARGS'].replace( os.sep, os.sep * 2 if os.sep == '\\' else os.sep)) + args printLogsDefault = PrintLogs.ALL if '--ci' in args: # to ensure identical behaviour, set these as if on the command line # (printLogs we don't set here since we use the printLogsDefault mechanism to allow it to be overridden # by CI writers and/or the command line; note that setting --mode=ALL would be incorrect if # supportMultipleModesPerRun=false but that's a legacy options so we raise an exception later if this happened) args = [ '--purge', '--record', '-j0', '--type=auto', '--mode=ALL', '-XcodeCoverage' ] + args printLogsDefault = PrintLogs.FAILURES try: optlist, self.arguments = getopt.gnu_getopt( args, self.optionString, self.optionList) except Exception: log.warn("Error parsing command line arguments: %s" % (sys.exc_info()[1])) sys.exit(1) log.debug('PySys arguments: tests=%s options=%s', self.arguments, optlist) EXPR1 = re.compile("^[\w\.]*=.*$") EXPR2 = re.compile("^[\w\.]*$") printLogs = None ci = False defaultAbortOnError = None logging.getLogger('pysys').setLevel(logging.INFO) # as a special case, set a non-DEBUG log level for the implementation of assertions # so that it doesn't get enabled with -vDEBUG only -vassertions=DEBUG # as it is incredibly verbose and slow and not often useful logging.getLogger('pysys.assertions').setLevel(logging.INFO) for option, value in optlist: if option in ("-h", "--help"): self.printUsage(printXOptions) elif option in ['--ci']: continue # handled above elif option in ("-r", "--record"): self.record = True elif option in ("-p", "--purge"): self.purge = True elif option in ("-v", "--verbosity"): verbosity = value if '=' in verbosity: loggername, verbosity = value.split('=') assert not loggername.startswith( 'pysys.' ), 'The "pysys." prefix is assumed and should not be explicitly specified' if loggername.startswith('python:'): loggername = loggername[len('python:'):] assert not loggername.startswith( 'pysys' ), 'Cannot use python: with pysys.*' # would produce a duplicate log handler # in the interests of performance and simplicity we normally only add the pysys.* category logging.getLogger(loggername).addHandler( pysys.internal.initlogging.pysysLogHandler) else: loggername = 'pysys.' + loggername else: loggername = None if verbosity.upper() == "DEBUG": verbosity = logging.DEBUG elif verbosity.upper() == "INFO": verbosity = logging.INFO elif verbosity.upper() == "WARN": verbosity = logging.WARN elif verbosity.upper() == "CRIT": verbosity = logging.CRITICAL else: log.warn('Invalid log level "%s"' % verbosity) sys.exit(1) if loggername is None: # when setting global log level to a higher level like WARN etc we want to affect stdout but # not necessarily downgrade the root level (would make run.log less useful and break # some PrintLogs behaviour) stdoutHandler.setLevel(verbosity) if verbosity == logging.DEBUG: logging.getLogger('pysys').setLevel(logging.DEBUG) else: # for specific level setting we need the opposite - only change stdoutHandler if we're # turning up the logging (since otherwise it wouldn't be seen) but also change the specified level logging.getLogger(loggername).setLevel(verbosity) elif option in ("-a", "--type"): self.type = value if self.type not in ["auto", "manual"]: log.warn( "Unsupported test type - valid types are auto and manual" ) sys.exit(1) elif option in ("-t", "--trace"): self.trace = value elif option in ("-i", "--include"): self.includes.append(value) elif option in ("-e", "--exclude"): self.excludes.append(value) elif option in ("-c", "--cycle"): try: self.cycle = int(value) except Exception: print( "Error parsing command line arguments: A valid integer for the number of cycles must be supplied" ) sys.exit(1) elif option in ("-o", "--outdir"): value = os.path.normpath(value) if os.path.isabs(value) and not value.startswith('\\\\?\\'): value = fromLongPathSafe(toLongPathSafe(value)) self.outsubdir = value elif option in ("-m", "--mode", "--modeinclude"): self.modeinclude = self.modeinclude + [ x.strip() for x in value.split(',') ] elif option in ["--modeexclude"]: self.modeexclude = self.modeexclude + [ x.strip() for x in value.split(',') ] elif option in ["-n", "-j", "--threads"]: N_CPUS = multiprocessing.cpu_count() if value.lower() == 'auto': value = '0' if value.lower().startswith('x'): self.threads = max(1, int(float(value[1:]) * N_CPUS)) else: self.threads = int(value) if self.threads <= 0: self.threads = int( os.getenv('PYSYS_DEFAULT_THREADS', N_CPUS)) elif option in ("-b", "--abort"): defaultAbortOnError = str(value.lower() == 'true') elif option in ["-g", "--progress"]: self.progress = True elif option in ["--printLogs"]: printLogs = getattr(PrintLogs, value.upper(), None) if printLogs is None: print( "Error parsing command line arguments: Unsupported --printLogs value '%s'" % value) sys.exit(1) elif option in ["-X"]: if '=' in value: key, value = value.split('=', 1) else: key, value = value, 'true' # best not to risk unintended consequences with matching of other types, but for boolean # it's worth it to resolve the inconsistent behaviour of -Xkey=true and -Xkey that existed until 1.6.0, # and because getting a bool where you expected a string is a bit more likely to give an exception # and be noticed that getting a string where you expected a boolean (e.g. the danger of if "false":) if value.lower() == 'true': value = True elif value.lower() == 'false': value = False self.userOptions[key] = value elif option in ("-y", "--validateOnly"): self.userOptions['validateOnly'] = True elif option in ("-G", "--grep"): self.grep = value else: print("Unknown option: %s" % option) sys.exit(1) # log this once we've got the log levels setup log.debug('PySys is installed at: %s; python from %s', os.path.dirname(pysys.__file__), sys.executable) # retained for compatibility, but PYSYS_DEFAULT_ARGS is a better way to achieve the same thing if os.getenv('PYSYS_PROGRESS', '').lower() == 'true': self.progress = True # special hidden dict of extra values to pass to the runner, since we can't change # the public API now self.userOptions['__extraRunnerOptions'] = { 'progressWritersEnabled': self.progress, 'printLogs': printLogs, 'printLogsDefault': printLogsDefault, # to use if not provided by a CI writer or cmdline } # load project AFTER we've parsed the arguments, which opens the possibility of using cmd line config in # project properties if needed Project.findAndLoadProject(outdir=self.outsubdir) if defaultAbortOnError is not None: setattr(Project.getInstance(), 'defaultAbortOnError', defaultAbortOnError) if '--ci' in args and not Project.getInstance().getProperty( 'supportMultipleModesPerRun', True): raise UserError( 'Cannot use --ci option with a legacy supportMultipleModesPerRun=false project' ) descriptors = createDescriptors(self.arguments, self.type, self.includes, self.excludes, self.trace, self.workingDir, modeincludes=self.modeinclude, modeexcludes=self.modeexclude, expandmodes=True) descriptors.sort( key=lambda d: [d.executionOrderHint, d._defaultSortKey]) # No exception handler above, as any createDescriptors failure is really a fatal problem that should cause us to # terminate with a non-zero exit code; we don't want to run no tests without realizing it and return success if self.grep: regex = re.compile(self.grep, flags=re.IGNORECASE) descriptors = [ d for d in descriptors if (regex.search(d.id) or regex.search(d.title)) ] runnermode = self.modeinclude[0] if len( self.modeinclude ) == 1 else None # used when supportMultipleModesPerRun=False return self.record, self.purge, self.cycle, runnermode, self.threads, self.outsubdir, descriptors, self.userOptions