def execute(self): # copy the build(Release folder) and config yaml to output fileutils.mkdir(self.output + '/Release') with socketProcessMutex: self.copytree(TEST_SUBJECT_DIR + '/Release', self.output + '/Release') filecopy.filecopy(self.input + '/sample.yaml', self.output + '/sample.yaml') # create the correlator helper and start the correlator and an # engine receive listening on the Echo Channel correlator = CorrelatorHelper(self, name='mycorrelator') correlator.start(logfile='mycorrelator.log', inputLog='mycorrelator.input.log', config=[self.output + '/sample.yaml']) correlator.injectEPL( ['ConnectivityPluginsControl.mon', 'ConnectivityPlugins.mon'], filedir=PROJECT.APAMA_HOME + '/monitors') correlator.receive(filename='receive.evt', channels=['EchoChannel']) # inject the simple monitor into the correlator correlator.injectEPL(filenames=[self.input + '/DemoApp.mon']) #self.wait(3) # wait for receipt msg towards transport # we could use correlator.flush() here instead self.waitForSignal('mycorrelator.log', expr="Towards Host:", condition="==8")
def recordResult(self, formatted, testobj): """Record results to the performance summary file. @param formatted: the formatted string to write @param testobj: object reference to the calling test """ # generate a file in the test output directory for convenience/triaging, plus add to the global summary path = testobj.output + '/performance_results.csv' if not os.path.exists(path): with open(path, 'w') as f: f.write(self.getRunHeader()) with open(path, 'a') as f: f.write(formatted) # now the global one path = self.getRunSummaryFile(testobj) mkdir(os.path.dirname(path)) with self._lock: alreadyexists = os.path.exists(path) with open(path, 'a') as f: if not alreadyexists: testobj.log.info( 'Creating performance summary log file at: %s', path) f.write(self.getRunHeader()) f.write(formatted)
def reportResult(self, testobj, value, resultKey, unit, toleranceStdDevs=None, resultDetails=None): path = self.getRunSummaryFile(testobj) mkdir(os.path.dirname(path)) with self._lock: alreadyexists = os.path.exists(toLongPathSafe(path)) with io.open(toLongPathSafe(path), 'a', encoding='utf-8') as f: if not alreadyexists: testobj.log.info( 'Creating performance summary log file at: %s', os.path.normpath(path)) f.write('{"runDetails": ') json.dump(self.getRunDetails(testobj), f) f.write(', "results":[\n') else: f.write(',\n') json.dump( { 'resultKey': resultKey, 'value': value, 'unit': str(unit), 'biggerIsBetter': unit.biggerIsBetter, 'samples': 1, 'stdDev': 0, 'toleranceStdDevs': toleranceStdDevs, 'testId': testobj.descriptor.id, 'resultDetails': resultDetails or {} }, f) self.__summaryFilesWritten.add(path)
def recordResult(self, formatted, testobj): """Record results to the performance summary file. :param formatted: the formatted string to write :param testobj: object reference to the calling test """ # generate a file in the test output directory for convenience/triaging, plus add to the global summary path = testobj.output + '/performance_results.csv' encoding = None if PY2 else 'utf-8' if not os.path.exists(path): with openfile(path, 'w', encoding=encoding) as f: f.write(self.getRunHeader()) with openfile(path, 'a', encoding=encoding) as f: f.write(formatted) # now the global one path = self.getRunSummaryFile(testobj) mkdir(os.path.dirname(path)) with self._lock: alreadyexists = os.path.exists(toLongPathSafe(path)) with openfile(path, 'a', encoding=encoding) as f: if not alreadyexists: testobj.log.info( 'Creating performance summary log file at: %s', os.path.normpath(path)) f.write(self.getRunHeader()) f.write(formatted) self.__summaryFilesWritten.add(path)
def archiveAndPublish(self): """ Generate an archive of the destDir (if configured) and publish artifacts (if configured). Called by default as part of `cleanup()`. """ if self.destArchive: mkdir(os.path.dirname(toLongPathSafe(self.destArchive))) with zipfile.ZipFile(toLongPathSafe(self.destArchive), 'w', zipfile.ZIP_DEFLATED, allowZip64=True) as archive: rootlen = len(self.destDir) for base, dirs, files in os.walk(self.destDir): for f in files: if os.path.normpath(os.path.join( base, f)) == os.path.normpath(self.destArchive): continue fn = os.path.join(base, f) archive.write(fn, fn[rootlen:].replace('\\', '/')) if self.publishArtifactDirCategory: self.runner.publishArtifact(self.destDir, self.publishArtifactDirCategory) if self.publishArtifactArchiveCategory and self.destArchive: self.runner.publishArtifact(self.destArchive, self.publishArtifactArchiveCategory)
def setup(self, **kwargs): self.runner = kwargs['runner'] # NB: this method is also called by ConsoleFailureAnnotationsWriter self.includeNonFailureOutcomes = [ str(o) for o in OUTCOMES ] if self.includeNonFailureOutcomes == '*' else [ o.strip().upper() for o in self.includeNonFailureOutcomes.split(',') if o.strip() ] for o in self.includeNonFailureOutcomes: if not any(o == str(outcome) for outcome in OUTCOMES): raise UserError( 'Unknown outcome display name "%s" in includeNonFailureOutcomes' % o) self.logfile = os.path.normpath( os.path.join(self.outputDir or kwargs['runner'].output + '/..', self.logfile)) mkdir(os.path.dirname(self.logfile)) self.resultsWritten = 0 self.cycles = self.runner.cycles if self.fp is None: # this condition allows a subclass to write to something other than a .json file self.fp = io.open(self.logfile, "w", encoding='utf-8') self.fp.write('{"runDetails": ') json.dump(self.runner.runDetails, self.fp) self.fp.write(', "results":[\n') self.fp.flush()
def setup(self, **kwargs): # Creates the output directory for the writing of the test summary files. self.outputDir = (os.path.join(kwargs['runner'].project.root, 'target', 'pysys-reports') if not self.outputDir else os.path.join(kwargs['runner'].output + '/..', self.outputDir)) deletedir(self.outputDir) mkdir(self.outputDir) self.cycles = kwargs.pop('cycles', 0)
def createProjectConfig(targetdir, templatepath=None): """Create a new project configuration file in the specified targetdir. """ if not templatepath: templatepath = getProjectConfigTemplates()['default'] mkdir(targetdir) # using ascii ensures we don't unintentionally add weird characters to the default (utf-8) file with openfile(templatepath, encoding='ascii') as src: with openfile(os.path.abspath(targetdir+'/'+DEFAULT_PROJECTFILE[0]), 'w', encoding='ascii') as target: for l in src: l = l.replace('@PYTHON_VERSION@', '%s.%s.%s'%sys.version_info[0:3]) l = l.replace('@PYSYS_VERSION@', '.'.join(__version__.split('.')[0:3])) target.write(l)
def collectPath(self, testObj, path, **kwargs): name, ext = os.path.splitext(os.path.basename(path)) collectdest = toLongPathSafe( os.path.join( self.destDir, (self.outputPattern.replace('@TESTID@', str(testObj)).replace( '@FILENAME@', name).replace('.@FILENAME_EXT@', ext)))) i = 1 while pathexists(collectdest.replace('@UNIQUE@', '%d' % (i))): i += 1 collectdest = collectdest.replace('@UNIQUE@', '%d' % (i)) mkdir(os.path.dirname(collectdest)) shutil.copyfile(toLongPathSafe(path.replace('/', os.sep)), collectdest) self.collectedFileCount += 1
def cleanup(self, **kwargs): if self.archiveAtEndOfRun: for _, id, outputDir in sorted(self.queuedInstructions): # sort by hash of testId so make order deterministic self._archiveTestOutputDir(id, outputDir) if self.skippedTests: # if we hit a limit, at least record the names of the tests we missed mkdir(self.destDir) with io.open(self.destDir+os.sep+'skipped_artifacts.txt', 'w', encoding='utf-8') as f: f.write('\n'.join(os.path.normpath(t) for t in self.skippedTests)) (log.info if self.archivesCreated else log.debug)('%s created %d test output archive artifacts in: %s', self.__class__.__name__, self.archivesCreated, self.destDir) if self.archivesCreated: self.runner.publishArtifact(self.destDir, 'TestOutputArchiveDir')
def archiveAndPublish(self): """ Generate an archive of the destDir (if configured) and publish artifacts (if configured). Called by default as part of `cleanup()`. """ if self.destArchive: mkdir(os.path.dirname(toLongPathSafe(self.destArchive))) with zipfile.ZipFile(toLongPathSafe(self.destArchive), 'w', zipfile.ZIP_DEFLATED, allowZip64=True) as archive: rootlen = len(self.destDir) for base, dirs, files in os.walk(self.destDir): for f in files: if os.path.normpath(os.path.join( base, f)) == os.path.normpath(self.destArchive): continue fn = os.path.join(base, f) destname = fn[rootlen:].replace('\\', '/').lstrip('/') try: try: archive.write(fn, destname) except PermissionError: # pragma: no cover - can happen on windows due to file system locking issues time.sleep(5.0) archive.write(fn, destname) except Exception as ex: # pragma: no cover # Deal with failures (even after retry) - don't abort the whole archive # (e.g. a locked .err file in coverage output dir doesn't matter) log.warning( 'Could not write file to archive %s: "%s" - %s: %s', os.path.basename(self.destArchive), fn, ex.__class__.__name__, ex) archive.writestr( destname + '.pysyserror.txt', '!!! PySys could not write this file to the archive - %s: %s' % (ex.__class__.__name__, ex)) if self.publishArtifactDirCategory: self.runner.publishArtifact(self.destDir, self.publishArtifactDirCategory) if self.publishArtifactArchiveCategory and self.destArchive: self.runner.publishArtifact(self.destArchive, self.publishArtifactArchiveCategory)
def setup(self, *args, **kwargs): super(PythonCoverageWriter, self).setup(*args, **kwargs) import coverage if self.includeCoverageFromPySysProcess: args = self.getCoverageArgsList() assert len(args) == 1 and args[0].startswith( '--rcfile=' ), 'includeCoverageFromPySysProcess can only be used if pythonCoverageArgs is set to "--rcfile=XXXX"' mkdir(self.destDir) cov = coverage.Coverage( config_file=args[0][args[0].find('=') + 1:], data_file=self.destDir + '/.coverage.pysys_parent') log.debug('Enabling Python coverage for this process: %s', cov) # These lines avoid unhelpful warnings, and also match what coverage.process_startup() does cov._warn_preimported_source = False cov._warn_unimported_source = False cov._warn_no_data = False cov.start() self.__selfCoverage = cov
def recordResult(self, formatted, testobj): """Record results to the performance summary file. :meta private: :param formatted: the formatted string to write :param testobj: object reference to the calling test """ # generate a file in the test output directory for convenience/triaging, plus add to the global summary path = testobj.output + '/performance_results.csv' encoding = 'utf-8' def callGetRunHeader(): try: return self.getRunHeader(testobj) except Exception: # pragma: no cover - for pre-2.0 signature return self.getRunHeader() if not os.path.exists(path): with io.open(toLongPathSafe(path), 'w', encoding=encoding) as f: f.write(callGetRunHeader()) with io.open(toLongPathSafe(path), 'a', encoding=encoding) as f: f.write(formatted) # now the global one path = self.getRunSummaryFile(testobj) mkdir(os.path.dirname(path)) with self._lock: alreadyexists = os.path.exists(toLongPathSafe(path)) with io.open(toLongPathSafe(path), 'a', encoding=encoding) as f: if not alreadyexists: testobj.log.info( 'Creating performance summary log file at: %s', os.path.normpath(path)) f.write(callGetRunHeader()) f.write(formatted) self.__summaryFilesWritten.add(path)
def makeTest(self): """ Uses the previously parsed arguments to create a new test (or related asset) on disk in ``self.dest``. Can be overridden if additional post-processing steps are required for some templates. """ templates = self.getTemplates() if self.template: tmp = [t for t in templates if t['name'] == self.template] if len(tmp) != 1: raise UserError( 'Cannot find a template named "%s"; available templates for this project and directory are: %s' % (self.template, ', '.join(t['name'] for t in templates))) tmp = tmp[0] else: tmp = templates[0] # pick the default log.debug('Using template: \n%s', json.dumps(tmp, indent=' ')) dest = self.dest print("Creating %s using template %s ..." % (dest, tmp['name'])) assert tmp['isTest'] # not implemented for other asset types yet if os.path.exists(dest): raise UserError('Cannot create %s as it already exists' % dest) mkdir(dest) if not tmp['replace']: # use defaults unless user explicitly defines one or more, to save user having to keep redefining the standard ones tmp['replace'] = [ ['@@DATE@@', '@{DATE}'], ['@@USERNAME@@', '@{USERNAME}'], ['@@DIR_NAME@@', '@{DIR_NAME}'], ['@@DEFAULT_DESCRIPTOR@@', '@{DEFAULT_DESCRIPTOR}'], [ '@@DEFAULT_DESCRIPTOR_MINIMAL@@', '@{DEFAULT_DESCRIPTOR_MINIMAL}' ], ['@@LINE_LENGTH_GUIDE@@', '@{LINE_LENGTH_GUIDE}'], ] with open( self.project.pysysTemplatesDir + '/default-test/pysystest.py', 'rb') as f: DEFAULT_DESCRIPTOR = f.read() DEFAULT_DESCRIPTOR = DEFAULT_DESCRIPTOR[:DEFAULT_DESCRIPTOR.find( b'import')].rstrip().decode('ascii') DEFAULT_DESCRIPTOR = DEFAULT_DESCRIPTOR.replace( '@@DATE@@', '@{DATE}') DEFAULT_DESCRIPTOR = DEFAULT_DESCRIPTOR.replace( '@@USERNAME@@', '@{USERNAME}') DEFAULT_DESCRIPTOR = DEFAULT_DESCRIPTOR.replace( '@@LINE_LENGTH_GUIDE@@', '@{LINE_LENGTH_GUIDE}') DEFAULT_DESCRIPTOR_MINIMAL = '\n'.join([ l for l in DEFAULT_DESCRIPTOR.split('\n') if ((l.startswith('#__pysys_skipped_reason__') or not l.startswith('#__pysys_'))) ]) replace = [ ( re.compile(r1.encode('ascii')), r2 # in addition to ${...} project properties, add some that are especially useful here .replace('@{DEFAULT_DESCRIPTOR}', DEFAULT_DESCRIPTOR.replace('\\', '\\\\')).replace( '@{DEFAULT_DESCRIPTOR_MINIMAL}', DEFAULT_DESCRIPTOR_MINIMAL.replace('\\', '\\\\') ).replace('@{DATE}', self.project.startDate).replace( '@{USERNAME}', self.project.username).replace( '@{DIR_NAME}', os.path.basename(dest)).replace( '@{LINE_LENGTH_GUIDE}', self.project.getProperty( "pysystestTemplateLineLengthGuide", 80 * "=")). encode( 'utf-8' ) # non-ascii chars are unlikely, but a reasonable default is to use utf-8 to match typical XML ) for (r1, r2) in tmp['replace'] ] log.debug('Using replacements: %s', replace) for c in tmp['copy']: target = dest + os.sep + os.path.basename(c) if os.path.basename(c) == tmp['testOutputDir']: log.debug(" Not copying dir %s" % target) continue if os.path.exists(target): raise Exception('Cannot copy to %s as it already exists' % target) self.copy(c, target, replace) print(" Copied %s%s" % (target, os.sep + '*' if os.path.isdir(target) else '')) for d in tmp['mkdir']: if os.path.isabs(d): log.debug('Skipping creation of absolute directory: %s', d) else: mkdir(dest + os.sep + d) return dest
def __call__(self, *args, **kwargs): """Over-ridden call builtin to allow the class instance to be called directly. Invoked by thread pool when using multiple worker threads. """ exc_info = [] self.testStart = time.time() try: # stdout - set this up right at the very beginning to ensure we can see the log output in case any later step fails self.testFileHandlerStdout = ThreadedStreamHandler(StringIO()) self.testFileHandlerStdout.setFormatter(PROJECT.formatters.stdout) self.testFileHandlerStdout.setLevel(stdoutHandler.level) log.addHandler(self.testFileHandlerStdout) # set the output subdirectory and purge contents if os.path.isabs(self.runner.outsubdir): self.outsubdir = os.path.join(self.runner.outsubdir, self.descriptor.id) else: self.outsubdir = os.path.join(self.descriptor.output, self.runner.outsubdir) mkdir(self.outsubdir) if self.cycle == 0 and not self.runner.validateOnly: self.purgeDirectory(self.outsubdir) if self.runner.cycle > 1: self.outsubdir = os.path.join(self.outsubdir, 'cycle%d' % (self.cycle + 1)) mkdir(self.outsubdir) # run.log handler self.testFileHandlerRunLog = ThreadedFileHandler( os.path.join(self.outsubdir, 'run.log')) self.testFileHandlerRunLog.setFormatter(PROJECT.formatters.runlog) self.testFileHandlerRunLog.setLevel(logging.INFO) if stdoutHandler.level == logging.DEBUG: self.testFileHandlerRunLog.setLevel(logging.DEBUG) log.addHandler(self.testFileHandlerRunLog) log.info(62 * "=") title = textwrap.wrap( self.descriptor.title.replace('\n', '').strip(), 56) log.info("Id : %s", self.descriptor.id, extra=BaseLogFormatter.tag(LOG_TEST_DETAILS, 0)) if len(title) > 0: log.info("Title: %s", str(title[0]), extra=BaseLogFormatter.tag(LOG_TEST_DETAILS, 0)) for l in title[1:]: log.info(" %s", str(l), extra=BaseLogFormatter.tag(LOG_TEST_DETAILS, 0)) if self.runner.cycle > 1: log.info("Cycle: %s", str(self.cycle + 1), extra=BaseLogFormatter.tag(LOG_TEST_DETAILS, 0)) log.info(62 * "=") except KeyboardInterrupt: self.kbrdInt = True except Exception: exc_info.append(sys.exc_info()) # import the test class with global_lock: try: module = import_module( os.path.basename(self.descriptor.module), [os.path.dirname(self.descriptor.module)], True) self.testObj = getattr(module, self.descriptor.classname)( self.descriptor, self.outsubdir, self.runner) except KeyboardInterrupt: self.kbrdInt = True except Exception: exc_info.append(sys.exc_info()) self.testObj = BaseTest(self.descriptor, self.outsubdir, self.runner) for writer in self.runner.writers: try: if hasattr(writer, 'processTestStarting'): writer.processTestStarting(testObj=self.testObj, cycle=self.cycle) except Exception: log.warn("caught %s calling processTestStarting on %s: %s", sys.exc_info()[0], writer.__class__.__name__, sys.exc_info()[1], exc_info=1) # execute the test if we can try: if self.descriptor.state != 'runnable': self.testObj.addOutcome(SKIPPED, 'Not runnable', abortOnError=False) elif self.runner.mode and self.runner.mode not in self.descriptor.modes: self.testObj.addOutcome(SKIPPED, "Unable to run test in %s mode" % self.runner.mode, abortOnError=False) elif len(exc_info) > 0: self.testObj.addOutcome(BLOCKED, 'Failed to set up test: %s' % exc_info[0][1], abortOnError=False) for info in exc_info: log.warn("caught %s while setting up test %s: %s", info[0], self.descriptor.id, info[1], exc_info=info) elif self.kbrdInt: log.warn("test interrupt from keyboard") self.testObj.addOutcome(BLOCKED, 'Test interrupt from keyboard', abortOnError=False) else: try: if not self.runner.validateOnly: self.testObj.setup() self.testObj.execute() self.testObj.validate() except AbortExecution as e: del self.testObj.outcome[:] self.testObj.addOutcome(e.outcome, e.value, abortOnError=False, callRecord=e.callRecord) log.warn('Aborted test due to abortOnError set to true') if self.detectCore(self.outsubdir): self.testObj.addOutcome( DUMPEDCORE, 'Core detected in output subdirectory', abortOnError=False) except KeyboardInterrupt: self.kbrdInt = True self.testObj.addOutcome(BLOCKED, 'Test interrupt from keyboard', abortOnError=False) except Exception: log.warn("caught %s while running test: %s", sys.exc_info()[0], sys.exc_info()[1], exc_info=1) self.testObj.addOutcome(BLOCKED, '%s (%s)' % (sys.exc_info()[1], sys.exc_info()[0]), abortOnError=False) # call the cleanup method to tear down the test try: self.testObj.cleanup() except KeyboardInterrupt: self.kbrdInt = True self.testObj.addOutcome(BLOCKED, 'Test interrupt from keyboard', abortOnError=False) # print summary and close file handles try: self.testTime = math.floor(100 * (time.time() - self.testStart)) / 100.0 log.info("") log.info("Test duration: %s", ('%.2f secs' % self.testTime), extra=BaseLogFormatter.tag(LOG_DEBUG, 0)) log.info("Test final outcome: %s", LOOKUP[self.testObj.getOutcome()], extra=BaseLogFormatter.tag( LOOKUP[self.testObj.getOutcome()].lower(), 0)) if self.testObj.getOutcomeReason( ) and self.testObj.getOutcome() != PASSED: log.info("Test failure reason: %s", self.testObj.getOutcomeReason(), extra=BaseLogFormatter.tag(LOG_TEST_OUTCOMES, 0)) log.info("") self.testFileHandlerRunLog.close() log.removeHandler(self.testFileHandlerRunLog) log.removeHandler(self.testFileHandlerStdout) except Exception: pass # return a reference to self return self
def _archiveTestOutputDir(self, id, outputDir, **kwargs): """ Creates an archive for the specified test, unless doing so would violate the configured limits (e.g. maxArchives). :param str id: The testId (plus a cycle suffix if it's a multi-cycle run). :param str outputDir: The path of the test output dir. """ if self.archivesCreated == 0: mkdir(self.destDir) if self.archivesCreated == self.maxArchives: self.skippedTests.append(outputDir) log.debug( 'Skipping archiving for %s as maxArchives limit is reached', id) return if self.__totalBytesRemaining < 500: self.skippedTests.append(outputDir) log.debug( 'Skipping archiving for %s as maxTotalMB limit is reached', id) return self.archivesCreated += 1 try: outputDir = toLongPathSafe(outputDir) skippedFiles = [] # this is performance-critical so worth caching these fileExcludesRegex = self.fileExcludesRegex fileIncludesRegex = self.fileIncludesRegex isPurgableFile = self.runner.isPurgableFile bytesRemaining = min(int(self.maxArchiveSizeMB * 1024 * 1024), self.__totalBytesRemaining) triedTmpZipFile = False zippath, myzip = self._newArchive(id) filesInZip = 0 with myzip: rootlen = len(outputDir) + 1 for base, dirs, files in os.walk(outputDir): # Just the files, don't bother with the directories for now files.sort(key=lambda fn: [fn != 'run.log', fn] ) # be deterministic, and put run.log first for f in files: fn = os.path.join(base, f) if fileExcludesRegex is not None and fileExcludesRegex.search( fn.replace('\\', '/')): skippedFiles.append(fn) continue if fileIncludesRegex is not None and not fileIncludesRegex.search( fn.replace('\\', '/')): skippedFiles.append(fn) continue fileSize = os.path.getsize(fn) if fileSize == 0: # Since (if not waiting until end) this gets called before testComplete has had a chance to clean things up, skip the # files that it would have deleted. Don't bother listing these in skippedFiles since user # won't be expecting them anyway continue if bytesRemaining < 500: skippedFiles.append(fn) continue try: if fileSize > bytesRemaining: if triedTmpZipFile: # to save effort, don't keep trying once we're close - from now on only attempt small files skippedFiles.append(fn) continue triedTmpZipFile = True # Only way to know if it'll fit is to try compressing it log.debug( 'File size of %s might push the archive above the limit; creating a temp zip to check', fn) tmpname, tmpzip = self._newArchive(id + '.tmp') try: with tmpzip: tmpzip.write(fn, 'tmp') compressedSize = tmpzip.getinfo( 'tmp').compress_size if compressedSize > bytesRemaining: log.debug( 'Skipping file as compressed size of %s bytes exceeds remaining limit of %s bytes: %s', compressedSize, bytesRemaining, fn) skippedFiles.append(fn) continue finally: os.remove(tmpname) # Here's where we actually add it to the real archive memberName = fn[rootlen:].replace('\\', '/') myzip.write(fn, memberName) except Exception as ex: # might happen due to file locking or similar log.warning( 'Failed to add output file "%s" to archive: %s', fn, ex) skippedFiles.append(fn) continue filesInZip += 1 bytesRemaining -= myzip.getinfo( memberName).compress_size if skippedFiles and fileIncludesRegex is None: # keep the archive clean if there's an explicit include skippedFilesStr = os.linesep.join( [fromLongPathSafe(f) for f in skippedFiles]) skippedFilesStr = skippedFilesStr.encode('utf-8') myzip.writestr('__pysys_skipped_archive_files.txt', skippedFilesStr) if filesInZip == 0: # don't leave empty zips around log.debug('No files added to zip so deleting: %s', zippath) self.archivesCreated -= 1 os.remove(zippath) return self.__totalBytesRemaining -= os.path.getsize(zippath) self.runner.publishArtifact(zippath, 'TestOutputArchive') except Exception: self.skippedTests.append(outputDir) raise