def baseline(destination, srcdirs, extract=[], remove=[], branch_coverage=False): assert len( srcdirs) != 0, "Need atleast one srcdir to collect coverage from" opts = makeopts(destination, branch_coverage) # set src for s in srcdirs: opts += ['-d', s] # create the baseline try: # make sure the target dir exists io.create_dir(destination) # create the baseline check_output(['lcov', '-c', '-i'] + opts) # apply file filtering filter(destination, extract=extract, remove=remove) except CalledProcessError as e: logger.debug("Lcov reported: %s" % e.output) raise LcovError("Setting the lcov baseline failed")
def collect(destination, srcdirs, baseline=None, extract=[], remove=[], branch_coverage=False): assert len( srcdirs) != 0, "Need atleast one srcdir to collect coverage from" opts = makeopts(destination, branch_coverage) # set src for s in srcdirs: opts += ['-d', s] try: # make sure the target dir exists io.create_dir(destination) # collect the coverage logger.debug(['lcov', '-c'] + opts) check_output(['lcov', '-c'] + opts) if baseline is not None: # combine the data with the baseline check_output(['lcov', '-a', baseline, '-a', destination] + makeopts(destination, branch_coverage)) # finally filter the collected data filter(destination, extract=extract, remove=remove) except CalledProcessError as e: logger.debug("Lcov reported: %s" % e.output) raise LcovError("Collecting coverage using lcov failed")
def clean( self ): logger.debug( "Cleaning `%s`" % config.TEMP_DIR ) if os.path.isdir( config.TEMP_DIR ): shutil.rmtree( config.TEMP_DIR ) # make the tmp directory for use io.create_dir( os.path.join( config.TEMP_DIR, "bla" ))
def store( self ): """ Store the completed test results to the results directory. raises: IOError: Unable to write results to disk. """ # Clean already existing results for this test. self.clean() # create the storage dir fpath = self.path() io.create_dir( fpath ) # write the results to file try: with open( fpath, 'w' ) as fh: # encoding defaults to utf8; done json.dump( self.result.serialize(), fh, sort_keys=True, indent=4 ) except IOError as e: # do not write partial results self.clean() raise IOError( "Could not write test results to disk: %s" % str( e )) logger.debug( "Stored results in `%s`" % fpath )
def collect( destination, srcdirs, baseline=None, extract=[], remove=[], branch_coverage=False ): assert len( srcdirs ) != 0, "Need atleast one srcdir to collect coverage from" opts = makeopts( destination, branch_coverage ) # set src for s in srcdirs: opts += [ '-d', s ] try: # make sure the target dir exists io.create_dir( destination ) # collect the coverage logger.debug([ 'lcov', '-c' ] + opts ) check_output([ 'lcov', '-c' ] + opts ) if baseline is not None: # combine the data with the baseline check_output([ 'lcov', '-a', baseline, '-a', destination ] + makeopts( destination, branch_coverage )) # finally filter the collected data filter( destination, extract=extract, remove=remove ) except CalledProcessError as e: logger.debug( "Lcov reported: %s" % e.output ) raise LcovError( "Collecting coverage using lcov failed" )
def store(self): """ Store the completed test results to the results directory. raises: IOError: Unable to write results to disk. """ # Clean already existing results for this test. self.clean() # create the storage dir fpath = self.path() io.create_dir(fpath) # write the results to file try: with open(fpath, 'w') as fh: # encoding defaults to utf8; done json.dump(self.result.serialize(), fh, sort_keys=True, indent=4) except IOError as e: # do not write partial results self.clean() raise IOError("Could not write test results to disk: %s" % str(e)) logger.debug("Stored results in `%s`" % fpath)
def log( self, logdata ): """ log the logdata to the results log file """ # get the output log path path = self.test.log_path() # write the test output temporarily to a file # for the collectors to use io.create_dir( path ) with open( path, 'w' ) as fh: fh.write( logdata )
def collect( self, test ): # output file outfile = os.path.join( test.env()[ 'RESULTS_DIR' ], LogCollector.FILENAME ) io.create_dir( outfile ) try: shutil.copy( test.log_path(), outfile ) except IOError as e: logger.error( "Failed to copy the log file to the test results" ) logger.debug( "OS reported: %s" % str( e )) return None
def collect(self, test): # output file outfile = os.path.join(test.env()['RESULTS_DIR'], LogCollector.FILENAME) io.create_dir(outfile) try: shutil.copy(test.log_path(), outfile) except IOError as e: logger.error("Failed to copy the log file to the test results") logger.debug("OS reported: %s" % str(e)) return None
def _store_result( self, result ): JsonStorageProvider( result ).store() temp_result_dir = os.path.join( config.TEMP_DIR, result.storage_dir() ) for path, dirs, files in os.walk( temp_result_dir ): for f in files: abspath = os.path.join( path, f ) relpath = os.path.relpath( abspath, config.TEMP_DIR ) logger.debug( "Moving result file `%s`", relpath ) io.create_dir( relpath ) shutil.move( abspath, relpath ) # After moving all the results files from the temp_result_dir, delete it. logger.debug( "Removing temporary result directory: `%s`." % temp_result_dir ) shutil.rmtree( temp_result_dir, ignore_errors=True )
def collect( self, test ): try: with open( self.statpath ) as fh: out = fh.read() except IOError as e: logger.warn( "No rulestat data found for test `%s`" % test.name ) return {} # save the results in the test results path = os.path.join( test.env()[ 'RESULTS_DIR' ], RulestatCollector.RULESTAT_RESULT_FILE ) io.create_dir( path ) with open( path, 'w' ) as fh: fh.write( out ) # parse the key value pairs into a dict parsed = kv_colon.parse( out ) # interpret the values as integers return { key: int( value ) for key, value in parsed.items() }
def collect(self, test): try: with open(self.statpath) as fh: out = fh.read() except IOError as e: logger.warn("No rulestat data found for test `%s`" % test.name) return {} # save the results in the test results path = os.path.join(test.env()['RESULTS_DIR'], RulestatCollector.RULESTAT_RESULT_FILE) io.create_dir(path) with open(path, 'w') as fh: fh.write(out) # parse the key value pairs into a dict parsed = kv_colon.parse(out) # interpret the values as integers return {key: int(value) for key, value in parsed.items()}
def baseline( destination, srcdirs, extract=[], remove=[], branch_coverage=False ): assert len( srcdirs ) != 0, "Need atleast one srcdir to collect coverage from" opts = makeopts( destination, branch_coverage ) # set src for s in srcdirs: opts += [ '-d', s ] # create the baseline try: # make sure the target dir exists io.create_dir( destination ) # create the baseline check_output([ 'lcov', '-c', '-i' ] + opts ) # apply file filtering filter( destination, extract=extract, remove=remove ) except CalledProcessError as e: logger.debug( "Lcov reported: %s" % e.output ) raise LcovError( "Setting the lcov baseline failed" )