示例#1
0
        def logForOutcome(decider):
            for cycle in self.results:
                cyclestr = ''
                if len(self.results) > 1:
                    cyclestr = '[CYCLE %d] ' % (cycle + 1)
                for outcome in OUTCOMES:
                    if not decider(outcome): continue

                    # sort similar outcomes together to make the results easier to read; by reason then testDir
                    self.results[cycle][outcome].sort(
                        key=lambda test: [test[1], test[3]])

                    for (id, reason, testTitle, testDir,
                         outputdir) in self.results[cycle][outcome]:
                        log("  %s%s: %s ",
                            cyclestr,
                            outcome,
                            id,
                            extra=ColorLogFormatter.tag(str(outcome).lower()))
                        if showTestTitle and testTitle:
                            log("      (title: %s)",
                                testTitle,
                                extra=ColorLogFormatter.tag(LOG_DEBUG))
                        if showOutcomeReason and reason:
                            log("      %s",
                                reason,
                                extra=ColorLogFormatter.tag(LOG_TEST_OUTCOMES))

                        try:
                            outputdir = os.path.normpath(
                                os.path.relpath(
                                    fromLongPathSafe(outputdir))) + os.sep
                            testDir = os.path.normpath(
                                os.path.relpath(
                                    fromLongPathSafe(testDir))) + os.sep
                        except Exception as ex:  # relpath can fail if on different Windows drives
                            logging.getLogger('pysys.writer').debug(
                                'Failed to generate relative paths for "%s" and "%s": %s',
                                outputdir, testDir, ex)

                        if showTestDir and not (showOutputDir and
                                                outputdir.startswith(testDir)):
                            # don't confuse things by showing the testDir unless its information is not present in the outputDir (due to --outdir)
                            log("      %s", testDir)
                        if showOutputDir:
                            log("      %s", outputdir)
示例#2
0
    def cleanup(self, **kwargs):
        if not pathexists(self.destDir):
            log.debug(
                'No matching output files were found for collection directory: %s',
                os.path.normpath(self.destDir))
            return

        log.info('Collected %s test output files to directory: %s',
                 '{:}'.format(self.collectedFileCount),
                 os.path.normpath(fromLongPathSafe(self.destDir)))
        self.archiveAndPublish()
示例#3
0
    def cleanup(self, **kwargs):
        pythonCoverageDir = self.destDir
        assert os.path.isabs(pythonCoverageDir)
        pythonCoverageDir = os.path.normpath(
            fromLongPathSafe(pythonCoverageDir))
        if not pathexists(pythonCoverageDir):
            log.info('No Python coverage files were generated.')
            return

        log.info('Preparing Python coverage report in: %s', pythonCoverageDir)

        self.runner.startPython(
            ['-m', 'coverage', 'combine'],
            abortOnError=True,
            workingDir=pythonCoverageDir,
            stdouterr=pythonCoverageDir + '/python-coverage-combine',
            disableCoverage=True,
            onError=lambda process:
            'Failed to combine Python code coverage data: %s' % self.runner.
            getExprFromFile(process.stdout, '.+', returnNoneIfMissing=True) or
            self.runner.logFileContents(process.stderr, maxLines=0))

        # produces coverage.xml in a standard format that is useful to code coverage tools
        self.runner.startPython(
            ['-m', 'coverage', 'xml'],
            abortOnError=False,
            workingDir=pythonCoverageDir,
            stdouterr=pythonCoverageDir + '/python-coverage-xml',
            disableCoverage=True,
            onError=lambda process: self.runner.getExprFromFile(
                process.stdout, '.+', returnNoneIfMissing=True) or self.runner.
            logFileContents(process.stderr, maxLines=0))

        self.runner.startPython(
            [
                '-m', 'coverage', 'html', '-d',
                toLongPathSafe(pythonCoverageDir + '/htmlcov')
            ] + self.getCoverageArgsList(),
            abortOnError=False,
            workingDir=pythonCoverageDir,
            stdouterr=pythonCoverageDir + '/python-coverage-html',
            disableCoverage=True,
            onError=lambda process: self.runner.getExprFromFile(
                process.stdout, '.+', returnNoneIfMissing=True) or self.runner.
            logFileContents(process.stderr, maxLines=0))

        # to avoid confusion, remove any zero byte out/err files from the above
        for p in os.listdir(pythonCoverageDir):
            p = os.path.join(pythonCoverageDir, p)
            if p.endswith(('.out', '.err')) and os.path.getsize(p) == 0:
                os.remove(p)

        self.archiveAndPublish()
示例#4
0
    def visitTestOutputFile(self, testObj, path, **kwargs):
        # strip off test root dir prefix for the regex comparison
        cmppath = fromLongPathSafe(path)
        if cmppath.startswith(self.runner.project.testRootDir):
            cmppath = cmppath[len(self.runner.project.testRootDir) + 1:]
        cmppath = cmppath.replace('\\', '/')

        if not self.fileIncludesRegex.search(cmppath):
            #log.debug('skipping file due to fileIncludesRegex: %s', cmppath)
            return False

        fileExcludesRegex = self.fileExcludesRegex
        if fileExcludesRegex is not None and fileExcludesRegex.search(cmppath):
            #log.debug('skipping file due to fileExcludesRegex: %s', cmppath)
            return False
        self.collectPath(testObj, path, **kwargs)
示例#5
0
    def createTestResultDict(self, testObj, **kwargs):
        """
		Creates the dict that will be output for each test result.
		
		@returns: The dict, or ``None`` if this result should not be included/ 
		"""
        testDir = fromLongPathSafe(testObj.descriptor.testDir)
        if testDir.startswith(self.runner.project.testRootDir):
            testDir = testDir[len(self.runner.project.testRootDir) + 1:]

        data = {
            'testId':
            testObj.descriptor.id,  # includes mode suffix
            'outcome':
            str(testObj.getOutcome()),
            'outcomeReason':
            testObj.getOutcomeReason(),
            'startTime':
            time.strftime('%Y-%m-%d %H:%M:%S',
                          time.localtime(kwargs.get('testStart',
                                                    time.time()))),
            'durationSecs':
            kwargs.get("testTime", -1),
            'testDir':
            testDir.replace('\\', '/'),
            'testFile':
            testObj.descriptor._getTestFile().replace('\\', '/'),
        }
        if self.cycles > 1:
            data['cycle'] = kwargs["cycle"] + 1
        if testObj.descriptor.output != 'Output':
            data['outputDir'] = testObj.descriptor.output.replace('\\', '/')

        if self.includeTitle: data['title'] = testObj.descriptor.title

        return data
示例#6
0
    def cleanup(self, **kwargs):
        java = pysysjava.javaplugin.JavaPlugin()
        java.setup(self.runner)

        coverageDestDir = self.destDir
        assert os.path.isabs(
            coverageDestDir
        )  # The base class is responsible for absolutizing this config property
        coverageDestDir = os.path.normpath(fromLongPathSafe(coverageDestDir))
        if not pathexists(coverageDestDir):
            log.info('No Java coverage files were generated.')
            return

        log.info('Preparing Java coverage report in: %s', coverageDestDir)
        cliJar = safeGlob(self.jacocoDir + '/*jacoco*cli*.jar',
                          expected='==1',
                          name='JaCoCo CLI jar (from the jacocoDir)')

        coveragefiles = [
            f for f in os.listdir(coverageDestDir)
            if f.endswith('.javacoverage')
        ]
        java.startJava(
            cliJar, ['merge'] + coveragefiles +
            ['--destfile', 'jacoco-merged-java-coverage.exec'],
            abortOnError=True,
            workingDir=coverageDestDir,
            stdouterr=coverageDestDir + '/java-coverage-merge',
            disableCoverage=True,
            onError=lambda process:
            'Failed to merge Java code coverage data: %s' % self.runner.
            getExprFromFile(process.stderr, '.+', returnAll=True)[
                -1] or self.runner.logFileContents(process.stderr, maxLines=0))
        for f in coveragefiles:
            os.remove(toLongPathSafe(coverageDestDir + os.sep + f))

        classpath = java.toClasspathList(self.classpath)
        if not classpath:
            log.info(
                'No Java report will be generated as no classpath was specified'
            )
        else:
            log.debug(
                'Application classpath for the coverage report is: \n%s',
                '\n'.join("     cp #%-2d    : %s%s" %
                          (i + 1, pathelement, '' if os.path.
                           exists(pathelement) else ' (does not exist!)')
                          for i, pathelement in enumerate(classpath)))

            sourceDirs = java.toClasspathList(
                self.sourceDirs
            )  # not really a classpath, but or consistency, parse it the same way

            args = []
            for x in classpath:
                args.extend(['--classfiles', x])
            for x in sourceDirs:
                args.extend(['--sourcefiles', x])

            if sourceDirs:
                (log.warn if any(not os.path.exists(p)
                                 for p in sourceDirs) else log.debug
                 )('Java source directories for the coverage report are: \n%s',
                   '\n'.join("    dir #%-2d    : %s%s" %
                             (i + 1, pathelement, '' if os.path.
                              exists(pathelement) else ' (does not exist!)')
                             for i, pathelement in enumerate(sourceDirs)))
            else:
                log.info(
                    'No source directories were provided so the coverage HTML report will not include line-by-line highlighted source files'
                )

            java.startJava(
                cliJar,
                [
                    'report', 'jacoco-merged-java-coverage.exec', '--xml',
                    'java-coverage.xml', '--html', '.'
                ] + java._splitShellArgs(self.reportArgs) + args,
                abortOnError=True,
                workingDir=coverageDestDir,
                stdouterr=coverageDestDir + '/java-coverage-report',
                disableCoverage=True,
                onError=lambda process:
                'Failed to create Java code coverage report: %s' % self.runner.
                getExprFromFile(process.stderr, '.+', returnAll=True)[-1] or
                self.runner.logFileContents(process.stderr, maxLines=0))

        # to avoid confusion, remove any zero byte out/err files from the above
        for p in os.listdir(coverageDestDir):
            p = os.path.join(coverageDestDir, p)
            if p.endswith(('.out', '.err')) and os.path.getsize(p) == 0:
                os.remove(p)

        try:
            self.archiveAndPublish()
        except PermissionError:  # pragma: no cover - can occur transiently on Windows due to file system locking
            time.sleep(5.0)
            self.archiveAndPublish()
示例#7
0
    def loadDescriptors(self, dir, **kwargs):
        """Find all descriptors located under the specified directory, and 
		return them as a list.
		
		Subclasses may change the returned descriptors and/or add additional 
		instances of their own to the list after calling the super implementation::
		
		  descriptors = super(CustomDescriptorLoader, self).loadDescriptors(dir, **kwargs)
		  ...
		  return descriptors
		
		:param dir: The parent directory to search for runnable tests. 
		
		:return: List of L{pysys.xml.descriptor.TestDescriptor} objects 
			which could be selected for execution. 
			
			If a test can be run in multiple modes there must be a single descriptor 
			for it in the list returned from this method. Each multi-mode 
			descriptor is later expanded out into separate mode-specific 
			descriptors (at the same time as descriptor filtering based on 
			command line arguments, and addition of project-level 
			execution-order), before the final list is sorted and passed to 
			L{pysys.baserunner.BaseRunner}. 
			
			The order of the returned list is random, so the caller is responsible 
			for sorting this list to ensure deterministic behaviour. 
		
		:rtype: list
		:raises UserError: Raised if no testcases can be found.
		
		"""
        assert not kwargs, 'reserved for future use: %s' % kwargs.keys()
        assert self.project, 'project must be specified'
        assert dir, 'dir must be specified'
        assert os.path.isabs(dir), 'dir must be an absolute path: %s' % dir

        project = self.project

        descriptors = []
        ignoreSet = set(OSWALK_IGNORES)

        descriptorSet = set([
            s.strip() for s in project.getProperty(
                'pysysTestDescriptorFileNames',
                default=','.join(DEFAULT_DESCRIPTOR)).split(',')
        ])

        assert project.projectFile != None
        log = logging.getLogger('pysys.launcher')

        # although it's highly unlikely, if any test paths did slip outside the Windows 256 char limit,
        # it would be very dangerous to skip them (which is what os.walk does unless passed a \\?\ path),
        # so must use long-path-safe - but need to re-encode from unicode string back to bytestring in Python 2
        i18n_reencode = locale.getpreferredencoding() if PY2 and isinstance(
            dir, str) else None

        dir = toLongPathSafe(os.path.normpath(dir))
        assert os.path.exists(dir), dir  # sanity check
        if project.projectFile:
            projectroot = toLongPathSafe(
                os.path.normpath(os.path.dirname(project.projectFile)))

        DIR_CONFIG_DESCRIPTOR = 'pysysdirconfig.xml'
        if not project.projectFile or not dir.startswith(projectroot):
            dirconfigs = None
            log.debug(
                'Project file does not exist under "%s" so processing of %s files is disabled',
                dir, DIR_CONFIG_DESCRIPTOR)
        else:
            # find directory config descriptors between the project root and the testcase
            # dirs. We deliberately use project dir not current working dir since
            # we don't want descriptors to be loaded differently depending on where the
            # tests are run from (i.e. should be independent of cwd).
            dirconfigs = {}

            # load any descriptors between the project dir up to (but not including) the dir we'll be walking
            searchdirsuffix = dir[len(projectroot) + 1:].split(
                os.sep) if len(dir) > len(projectroot) else []
            currentconfig = None
            for i in range(
                    len(searchdirsuffix)):  # up to but not including dir
                if i == 0:
                    currentdir = projectroot
                else:
                    currentdir = projectroot + os.sep + os.sep.join(
                        searchdirsuffix[:i])

                if pathexists(currentdir + os.sep + DIR_CONFIG_DESCRIPTOR):
                    currentconfig = self._parseTestDescriptor(
                        currentdir + os.sep + DIR_CONFIG_DESCRIPTOR,
                        parentDirDefaults=currentconfig,
                        isDirConfig=True)
                    log.debug(
                        'Loaded directory configuration descriptor from %s: \n%s',
                        currentdir, currentconfig)
            # this is the top-level directory that will be checked below
            dirconfigs[os.path.dirname(dir)] = currentconfig

        for root, dirs, files in os.walk(toLongPathSafe(dir)):
            ignorematch = next(
                (f for f in files
                 if (f == '.pysysignore' or f == 'pysysignore')), None)
            if ignorematch:
                log.debug('Skipping directory %s due to ignore file %s', root,
                          ignorematch)
                del dirs[:]
                continue

            parentconfig = None
            if dirconfigs is not None:
                parentconfig = dirconfigs[os.path.dirname(root)]
                if next((f for f in files if (f == DIR_CONFIG_DESCRIPTOR)),
                        None):
                    parentconfig = self._parseTestDescriptor(
                        root + os.sep + DIR_CONFIG_DESCRIPTOR,
                        parentDirDefaults=parentconfig,
                        isDirConfig=True)
                    log.debug(
                        'Loaded directory configuration descriptor from %s: \n%s',
                        root, parentconfig)

            # allow subclasses to modify descriptors list and/or avoid processing
            # subdirectories
            if self._handleSubDirectory(root,
                                        dirs,
                                        files,
                                        descriptors,
                                        parentDirDefaults=parentconfig):
                del dirs[:]
                continue

            intersection = descriptorSet & set(files)
            if intersection:
                descriptorfile = fromLongPathSafe(
                    os.path.join(root, intersection.pop()))
                # PY2 gets messed up if we start passing unicode rather than byte str objects here,
                # as it proliferates to all strings in each test
                if i18n_reencode is not None:
                    descriptorfile = descriptorfile.encode(i18n_reencode)

                try:
                    parsed = self._parseTestDescriptor(
                        descriptorfile, parentDirDefaults=parentconfig)
                    if parsed:
                        descriptors.append(parsed)
                except UserError:
                    raise  # no stack trace needed, will already include descriptorfile name
                except Exception as e:
                    log.info('Failed to read descriptor: ', exc_info=True)
                    raise Exception(
                        "Error reading descriptor file '%s': %s - %s" %
                        (descriptorfile, e.__class__.__name__, e))

                # if this is a test dir, it never makes sense to look at sub directories
                del dirs[:]
                continue

            for ignore in (ignoreSet & set(dirs)):
                dirs.remove(ignore)

            if dirconfigs is not None and len(dirs) > 0:
                # stash it for when we navigate down to subdirectories
                # only add to dict if we're continuing to process children
                dirconfigs[root] = parentconfig

        return descriptors
示例#8
0
    def parseArgs(self, args, printXOptions=None):
        # add any default args first; shlex.split does a great job of providing consistent parsing from str->list,
        # but need to avoid mangling \'s on windows; since this env var will be different for each OS no need for consistent win+unix behaviour
        if os.getenv('PYSYS_DEFAULT_ARGS', ''):
            log.info('Using PYSYS_DEFAULT_ARGS = %s' %
                     os.environ['PYSYS_DEFAULT_ARGS'])
            args = shlex.split(os.environ['PYSYS_DEFAULT_ARGS'].replace(
                os.sep, os.sep * 2 if os.sep == '\\' else os.sep)) + args

        printLogsDefault = PrintLogs.ALL
        if '--ci' in args:
            # to ensure identical behaviour, set these as if on the command line
            # (printLogs we don't set here since we use the printLogsDefault mechanism to allow it to be overridden
            # by CI writers and/or the command line; note that setting --mode=ALL would be incorrect if
            # supportMultipleModesPerRun=false but that's a legacy options so we raise an exception later if this happened)
            args = [
                '--purge', '--record', '-j0', '--type=auto', '--mode=ALL',
                '-XcodeCoverage'
            ] + args
            printLogsDefault = PrintLogs.FAILURES

        try:
            optlist, self.arguments = getopt.gnu_getopt(
                args, self.optionString, self.optionList)
        except Exception:
            log.warn("Error parsing command line arguments: %s" %
                     (sys.exc_info()[1]))
            sys.exit(1)

        log.debug('PySys arguments: tests=%s options=%s', self.arguments,
                  optlist)

        EXPR1 = re.compile("^[\w\.]*=.*$")
        EXPR2 = re.compile("^[\w\.]*$")

        printLogs = None
        ci = False
        defaultAbortOnError = None

        logging.getLogger('pysys').setLevel(logging.INFO)

        # as a special case, set a non-DEBUG log level for the implementation of assertions
        # so that it doesn't get enabled with -vDEBUG only -vassertions=DEBUG
        # as it is incredibly verbose and slow and not often useful
        logging.getLogger('pysys.assertions').setLevel(logging.INFO)

        for option, value in optlist:
            if option in ("-h", "--help"):
                self.printUsage(printXOptions)

            elif option in ['--ci']:
                continue  # handled above

            elif option in ("-r", "--record"):
                self.record = True

            elif option in ("-p", "--purge"):
                self.purge = True

            elif option in ("-v", "--verbosity"):
                verbosity = value
                if '=' in verbosity:
                    loggername, verbosity = value.split('=')
                    assert not loggername.startswith(
                        'pysys.'
                    ), 'The "pysys." prefix is assumed and should not be explicitly specified'
                    if loggername.startswith('python:'):
                        loggername = loggername[len('python:'):]
                        assert not loggername.startswith(
                            'pysys'
                        ), 'Cannot use python: with pysys.*'  # would produce a duplicate log handler
                        # in the interests of performance and simplicity we normally only add the pysys.* category
                        logging.getLogger(loggername).addHandler(
                            pysys.internal.initlogging.pysysLogHandler)
                    else:
                        loggername = 'pysys.' + loggername
                else:
                    loggername = None

                if verbosity.upper() == "DEBUG":
                    verbosity = logging.DEBUG
                elif verbosity.upper() == "INFO":
                    verbosity = logging.INFO
                elif verbosity.upper() == "WARN":
                    verbosity = logging.WARN
                elif verbosity.upper() == "CRIT":
                    verbosity = logging.CRITICAL
                else:
                    log.warn('Invalid log level "%s"' % verbosity)
                    sys.exit(1)

                if loggername is None:
                    # when setting global log level to a higher level like WARN etc we want to affect stdout but
                    # not necessarily downgrade the root level (would make run.log less useful and break
                    # some PrintLogs behaviour)
                    stdoutHandler.setLevel(verbosity)
                    if verbosity == logging.DEBUG:
                        logging.getLogger('pysys').setLevel(logging.DEBUG)
                else:
                    # for specific level setting we need the opposite - only change stdoutHandler if we're
                    # turning up the logging (since otherwise it wouldn't be seen) but also change the specified level
                    logging.getLogger(loggername).setLevel(verbosity)

            elif option in ("-a", "--type"):
                self.type = value
                if self.type not in ["auto", "manual"]:
                    log.warn(
                        "Unsupported test type - valid types are auto and manual"
                    )
                    sys.exit(1)

            elif option in ("-t", "--trace"):
                self.trace = value

            elif option in ("-i", "--include"):
                self.includes.append(value)

            elif option in ("-e", "--exclude"):
                self.excludes.append(value)

            elif option in ("-c", "--cycle"):
                try:
                    self.cycle = int(value)
                except Exception:
                    print(
                        "Error parsing command line arguments: A valid integer for the number of cycles must be supplied"
                    )
                    sys.exit(1)

            elif option in ("-o", "--outdir"):
                value = os.path.normpath(value)
                if os.path.isabs(value) and not value.startswith('\\\\?\\'):
                    value = fromLongPathSafe(toLongPathSafe(value))
                self.outsubdir = value

            elif option in ("-m", "--mode", "--modeinclude"):
                self.modeinclude = self.modeinclude + [
                    x.strip() for x in value.split(',')
                ]

            elif option in ["--modeexclude"]:
                self.modeexclude = self.modeexclude + [
                    x.strip() for x in value.split(',')
                ]

            elif option in ["-n", "-j", "--threads"]:
                N_CPUS = multiprocessing.cpu_count()
                if value.lower() == 'auto': value = '0'
                if value.lower().startswith('x'):
                    self.threads = max(1, int(float(value[1:]) * N_CPUS))
                else:
                    self.threads = int(value)
                    if self.threads <= 0:
                        self.threads = int(
                            os.getenv('PYSYS_DEFAULT_THREADS', N_CPUS))

            elif option in ("-b", "--abort"):
                defaultAbortOnError = str(value.lower() == 'true')

            elif option in ["-g", "--progress"]:
                self.progress = True

            elif option in ["--printLogs"]:
                printLogs = getattr(PrintLogs, value.upper(), None)
                if printLogs is None:
                    print(
                        "Error parsing command line arguments: Unsupported --printLogs value '%s'"
                        % value)
                    sys.exit(1)

            elif option in ["-X"]:
                if '=' in value:
                    key, value = value.split('=', 1)
                else:
                    key, value = value, 'true'

                # best not to risk unintended consequences with matching of other types, but for boolean
                # it's worth it to resolve the inconsistent behaviour of -Xkey=true and -Xkey that existed until 1.6.0,
                # and because getting a bool where you expected a string is a bit more likely to give an exception
                # and be noticed that getting a string where you expected a boolean (e.g. the danger of if "false":)
                if value.lower() == 'true':
                    value = True
                elif value.lower() == 'false':
                    value = False

                self.userOptions[key] = value

            elif option in ("-y", "--validateOnly"):
                self.userOptions['validateOnly'] = True

            elif option in ("-G", "--grep"):
                self.grep = value

            else:
                print("Unknown option: %s" % option)
                sys.exit(1)

        # log this once we've got the log levels setup
        log.debug('PySys is installed at: %s; python from %s',
                  os.path.dirname(pysys.__file__), sys.executable)

        # retained for compatibility, but PYSYS_DEFAULT_ARGS is a better way to achieve the same thing
        if os.getenv('PYSYS_PROGRESS', '').lower() == 'true':
            self.progress = True

        # special hidden dict of extra values to pass to the runner, since we can't change
        # the public API now
        self.userOptions['__extraRunnerOptions'] = {
            'progressWritersEnabled': self.progress,
            'printLogs': printLogs,
            'printLogsDefault':
            printLogsDefault,  # to use if not provided by a CI writer or cmdline
        }

        # load project AFTER we've parsed the arguments, which opens the possibility of using cmd line config in
        # project properties if needed
        Project.findAndLoadProject(outdir=self.outsubdir)

        if defaultAbortOnError is not None:
            setattr(Project.getInstance(), 'defaultAbortOnError',
                    defaultAbortOnError)
        if '--ci' in args and not Project.getInstance().getProperty(
                'supportMultipleModesPerRun', True):
            raise UserError(
                'Cannot use --ci option with a legacy supportMultipleModesPerRun=false project'
            )

        descriptors = createDescriptors(self.arguments,
                                        self.type,
                                        self.includes,
                                        self.excludes,
                                        self.trace,
                                        self.workingDir,
                                        modeincludes=self.modeinclude,
                                        modeexcludes=self.modeexclude,
                                        expandmodes=True)
        descriptors.sort(
            key=lambda d: [d.executionOrderHint, d._defaultSortKey])

        # No exception handler above, as any createDescriptors failure is really a fatal problem that should cause us to
        # terminate with a non-zero exit code; we don't want to run no tests without realizing it and return success

        if self.grep:
            regex = re.compile(self.grep, flags=re.IGNORECASE)
            descriptors = [
                d for d in descriptors
                if (regex.search(d.id) or regex.search(d.title))
            ]

        runnermode = self.modeinclude[0] if len(
            self.modeinclude
        ) == 1 else None  # used when supportMultipleModesPerRun=False
        return self.record, self.purge, self.cycle, runnermode, self.threads, self.outsubdir, descriptors, self.userOptions
示例#9
0
    def _archiveTestOutputDir(self, id, outputDir, **kwargs):
        """
		Creates an archive for the specified test, unless doing so would violate the configured limits 
		(e.g. maxArchives). 
		
		:param str id: The testId (plus a cycle suffix if it's a multi-cycle run). 
		:param str outputDir: The path of the test output dir. 
		"""
        if self.archivesCreated == 0: mkdir(self.destDir)

        if self.archivesCreated == self.maxArchives:
            self.skippedTests.append(outputDir)
            log.debug(
                'Skipping archiving for %s as maxArchives limit is reached',
                id)
            return
        if self.__totalBytesRemaining < 500:
            self.skippedTests.append(outputDir)
            log.debug(
                'Skipping archiving for %s as maxTotalMB limit is reached', id)
            return
        self.archivesCreated += 1

        try:
            outputDir = toLongPathSafe(outputDir)
            skippedFiles = []

            # this is performance-critical so worth caching these
            fileExcludesRegex = self.fileExcludesRegex
            fileIncludesRegex = self.fileIncludesRegex
            isPurgableFile = self.runner.isPurgableFile

            bytesRemaining = min(int(self.maxArchiveSizeMB * 1024 * 1024),
                                 self.__totalBytesRemaining)
            triedTmpZipFile = False

            zippath, myzip = self._newArchive(id)
            filesInZip = 0
            with myzip:
                rootlen = len(outputDir) + 1

                for base, dirs, files in os.walk(outputDir):
                    # Just the files, don't bother with the directories for now

                    files.sort(key=lambda fn: [fn != 'run.log', fn]
                               )  # be deterministic, and put run.log first

                    for f in files:
                        fn = os.path.join(base, f)
                        if fileExcludesRegex is not None and fileExcludesRegex.search(
                                fn.replace('\\', '/')):
                            skippedFiles.append(fn)
                            continue
                        if fileIncludesRegex is not None and not fileIncludesRegex.search(
                                fn.replace('\\', '/')):
                            skippedFiles.append(fn)
                            continue

                        fileSize = os.path.getsize(fn)
                        if fileSize == 0:
                            # Since (if not waiting until end) this gets called before testComplete has had a chance to clean things up, skip the
                            # files that it would have deleted. Don't bother listing these in skippedFiles since user
                            # won't be expecting them anyway
                            continue

                        if bytesRemaining < 500:
                            skippedFiles.append(fn)
                            continue

                        try:
                            if fileSize > bytesRemaining:
                                if triedTmpZipFile:  # to save effort, don't keep trying once we're close - from now on only attempt small files
                                    skippedFiles.append(fn)
                                    continue
                                triedTmpZipFile = True

                                # Only way to know if it'll fit is to try compressing it
                                log.debug(
                                    'File size of %s might push the archive above the limit; creating a temp zip to check',
                                    fn)
                                tmpname, tmpzip = self._newArchive(id + '.tmp')
                                try:
                                    with tmpzip:
                                        tmpzip.write(fn, 'tmp')
                                        compressedSize = tmpzip.getinfo(
                                            'tmp').compress_size
                                        if compressedSize > bytesRemaining:
                                            log.debug(
                                                'Skipping file as compressed size of %s bytes exceeds remaining limit of %s bytes: %s',
                                                compressedSize, bytesRemaining,
                                                fn)
                                            skippedFiles.append(fn)
                                            continue
                                finally:
                                    os.remove(tmpname)

                            # Here's where we actually add it to the real archive
                            memberName = fn[rootlen:].replace('\\', '/')
                            myzip.write(fn, memberName)
                        except Exception as ex:  # might happen due to file locking or similar
                            log.warning(
                                'Failed to add output file "%s" to archive: %s',
                                fn, ex)
                            skippedFiles.append(fn)
                            continue
                        filesInZip += 1
                        bytesRemaining -= myzip.getinfo(
                            memberName).compress_size

                if skippedFiles and fileIncludesRegex is None:  # keep the archive clean if there's an explicit include
                    skippedFilesStr = os.linesep.join(
                        [fromLongPathSafe(f) for f in skippedFiles])
                    skippedFilesStr = skippedFilesStr.encode('utf-8')
                    myzip.writestr('__pysys_skipped_archive_files.txt',
                                   skippedFilesStr)

            if filesInZip == 0:
                # don't leave empty zips around
                log.debug('No files added to zip so deleting: %s', zippath)
                self.archivesCreated -= 1
                os.remove(zippath)
                return

            self.__totalBytesRemaining -= os.path.getsize(zippath)
            self.runner.publishArtifact(zippath, 'TestOutputArchive')

        except Exception:
            self.skippedTests.append(outputDir)
            raise
示例#10
0
		def pub(path, category):
			path = fromLongPathSafe(path).replace('\\','/')
			for a in self.__artifactWriters:
				a.publishArtifact(path, category)