def textSummary(args, filesAndTests, gCovResults, prefix=""): summary = GCov.computeSummary(filesAndTests, gCovResults) if not args.includeTestSources: summary = GCovSummary.removeTestSourceFiles(summary) if len(summary) == 0: return if args.explain: pp = pprint.PrettyPrinter(indent=2, width=150) pp.pprint(summary) maximumFileLength = max(map(lambda entry: len(entry), summary)) format = "%s%-" + str(maximumFileLength) + "s %6s" print format % (prefix, "File Path", "Score") format = "%s%-" + str(maximumFileLength) + "s %6.2f" for testedFile in sorted(summary.keys()): string = format % (prefix, testedFile, summary[testedFile]["coverage"]) if summary[testedFile]["direct"] == "indirect": ANSITerm.printColorized("magenta", string) else: LongBow.scorePrinter(eval(args.distribution), summary[testedFile]["coverage"], string) return
def textFunctionResult(file, function, maxFileNameLength, maxFunctionNameLength): score = computeComplexityScore(function.cyclomatic_complexity) format = "%-" + str(maxFileNameLength) + "s %-" + str(maxFunctionNameLength) + "s %6d %2d %6.2f" string = format % (file.filename, function.name, function.start_line, function.cyclomatic_complexity, score) LongBow.scorePrinter([90, 80], score, string) return function.cyclomatic_complexity
def gradeAndPrint(targets, testDirs=[], problemsOnly=False, prefix=""): filesAndTests = getFilesAndTests(targets, testDirs) newGCovResults = map( lambda fileAndTestFile: GCov.getCoverage(fileAndTestFile[1]), filesAndTests) summarys = GCov.computeSummary(filesAndTests, newGCovResults) if len(summarys) < 1: print "%sNo GCov Results - Please be sure to run 'make check' first" % prefix return False summarys = GCovSummary.removeTestSourceFiles(summarys) paths = summarys.keys() if problemsOnly: paths = filter(lambda key: summarys[key]["coverage"] < 100, paths) distribution = [99, 90] maximumFileLength = max( map(lambda entry: len(os.path.relpath(entry)), paths)) format = "%s%-" + str(maximumFileLength) + "s %6s" print format % (prefix, "File Path", "Score") format = "%s%-" + str(maximumFileLength) + "s %6.2f" for path in sorted(paths): string = format % (prefix, os.path.relpath(path), summarys[path]["coverage"]) LongBow.scorePrinter(distribution, summarys[path]["coverage"], string) return True
def textFileVocabulary(file, maxFileNameLength, printFormat=""): score = computeVocabularyScore(file.average_CCN) if printFormat == "": printFormat = "%-" + str(maxFileNameLength) + "s %6.2f %6.2f" string = printFormat % (file.filename, file.average_token, score) LongBow.scorePrinter([90, 80], score, string) return
def textScore(distribution, report, maxFileNameLength, prefix=""): ''' ''' format = "%s%-*s %6d %6d %6.2f" string = format % (prefix, maxFileNameLength, report["fileName"], report["totalLines"], report["nonCompliantLines"], report["score"]) LongBow.scorePrinter(distribution, report["score"], string) return
def csvFunctionResult(file, function): score = computeVocabularyScore(file.token_count) string = "vocabulary,%s,%s,%d,%d,%.2f" % (file.filename, function.name, function.start_line, function.token_count, score) LongBow.scorePrinter([90, 80], score, string) return function.token_count
def csvFunctionResult(file, function): score = computeComplexityScore(function.cyclomatic_complexity) string = "complexity,%s,%s,%d,%d,%.2f" % ( file.filename, function.name, function.start_line, function.cyclomatic_complexity, score) LongBow.scorePrinter([90, 80], score, string) return function.cyclomatic_complexity
def csvSummary(distribution, documentation): formatString ="documentation,%s,%d,%d,%.2f%%" for entry in documentation: badLines = len(documentation[entry]) totalLines = LongBow.countLines(entry) score = float(totalLines - badLines) / float(totalLines) * 100.0 LongBow.scorePrinter(distribution, score, formatString % (entry, totalLines, badLines, score)) return
def gradeAndPrint(targets, problemsOnly=False, printPrefix=""): if len(targets) < 1: print "No Files To Grade" return distribution = [99, 90] maxFileNameLength = max(max(map(lambda target: len(target), targets)), len("File Name")) moduleConformanceSet = ModuleSetConformanceContainer() headers = getConformanceHeaders() pformat = '{prefix}{:<{maxFileNameLength}}' nformat = pformat for header in headers: nformat = nformat + '{:>15}' print nformat.format('File Name', *headers, prefix=printPrefix, maxFileNameLength=maxFileNameLength) for target in targets: module = Module(target) if module.isTestSourceName(): continue fileNamePrefix = module.getModuleName() path = module.getModulePath() try: moduleConformance = computeModuleConformance(path, fileNamePrefix) if not moduleConformance.processModule(): pass else: moduleConformanceSet.addConformanceContainer(moduleConformance) scores = moduleConformance.getScores() minScore = 100.0 for key in scores: score = scores[key] if score < minScore: minScore = score scores[key] = '%3.1f'%score if problemsOnly and minScore == 100.0: continue printVals=[] for hval in headers: score = 'N/A' if hval in scores: score = scores[hval] printVals.append(score) line = nformat.format(target, *printVals, prefix=printPrefix, maxFileNameLength=maxFileNameLength) LongBow.scorePrinter(distribution, minScore, line) except NoObjectFileException as e: eformat = pformat + "Could Not Grade: No .o file found for file" line = eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e) print LongBow.buildRed(line) pass except Exception as e: eformat = pformat + "Could Not Grade: {msg}" line = eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e) print LongBow.buildRed(line) pass moduleConformanceSet.analyzeConformance()
def csvSummary(distribution, documentation): formatString = "documentation,%s,%d,%d,%.2f%%" for entry in documentation: badLines = len(documentation[entry]) totalLines = LongBow.countLines(entry) score = float(totalLines - badLines) / float(totalLines) * 100.0 LongBow.scorePrinter( distribution, score, formatString % (entry, totalLines, badLines, score)) return
def textTotal(distribution, complianceList): totalLines = reduce( lambda sum, x: sum + x, map(lambda element: element.getTotalLines(), complianceList)) totalNonCompliantLines = reduce( lambda sum, x: sum + x, map(lambda element: element.getNonCompliantLines(), complianceList)) value = 100.0 - (100.0 * float(totalNonCompliantLines) / float(totalLines)) LongBow.scorePrinter(distribution, value, "%.2f" % (value)) return
def csvAverage(args, filesAndTests, gcovResults): summary = GCov.computeSummary(filesAndTests, gcovResults) if not args.includeTestSources: summary = GCovSummary.removeTestSourceFiles(summary) score = GCovSummary.averageCoverage(summary) LongBow.scorePrinter(eval(args.distribution), score, "%.2f" % (score)) return
def csvSummary(args, filesAndTests, gCovResults): summary = GCov.computeSummary(filesAndTests, gCovResults) if not args.includeTestSources: summary = GCovSummary.removeTestSourceFiles(summary) if len(summary) > 0: for testedFile in sorted(summary.keys()): outputString = "%s,%.2f" % (testedFile, summary[testedFile]["coverage"]) LongBow.scorePrinter(eval(args.distribution), summary[testedFile]["coverage"], outputString) return
def textualSummary(distribution, documentation): maxWidth = 0 for entry in documentation: if len(entry) > maxWidth: maxWidth = len(entry) formatString ="%-" + str(maxWidth) + "s %8d %8d %.2f%%" for entry in documentation: badLines = len(documentation[entry]) totalLines = LongBow.countLines(entry) score = float(totalLines - badLines) / float(totalLines) * 100.0 LongBow.scorePrinter(distribution, score, formatString % (entry, totalLines, badLines, score)) return
def textualAverage(distribution, documentation, format): sum = 0.0 for entry in documentation: badLines = len(documentation[entry]) totalLines = LongBow.countLines(entry) score = float(totalLines - badLines) / float(totalLines) * 100.0 sum = sum + score if len(documentation) == 0: averageScore = 100.0 else: averageScore = sum / float(len(documentation)) LongBow.scorePrinter(distribution, averageScore, format % averageScore)
def textualSummary(distribution, documentation): maxWidth = 0 for entry in documentation: if len(entry) > maxWidth: maxWidth = len(entry) formatString = "%-" + str(maxWidth) + "s %8d %8d %.2f%%" for entry in documentation: badLines = len(documentation[entry]) totalLines = LongBow.countLines(entry) score = float(totalLines - badLines) / float(totalLines) * 100.0 LongBow.scorePrinter( distribution, score, formatString % (entry, totalLines, badLines, score)) return
def gradeAndPrint(targets, testDirs=[], problemsOnly=False, prefix=""): filesAndTests = getFilesAndTests(targets, testDirs) newGCovResults = map(lambda fileAndTestFile: GCov.getCoverage(fileAndTestFile[1]), filesAndTests) summarys = GCov.computeSummary(filesAndTests, newGCovResults) if len(summarys) < 1: print "%sNo GCov Results - Please be sure to run 'make check' first" % prefix return False summarys = GCovSummary.removeTestSourceFiles(summarys) paths = summarys.keys() if problemsOnly: paths = filter(lambda key: summarys[key]["coverage"] < 100, paths) distribution=[99,90] maximumFileLength = max(map(lambda entry: len(os.path.relpath(entry)), paths)) format = "%s%-" + str(maximumFileLength) + "s %6s" print format % (prefix, "File Path", "Score") format = "%s%-" + str(maximumFileLength) + "s %6.2f" for path in sorted(paths): string = format % (prefix, os.path.relpath(path), summarys[path]["coverage"]) LongBow.scorePrinter(distribution, summarys[path]["coverage"], string) return True
def csvFileComplexity(file): score = computeComplexityScore(file.average_CCN) string = "complexity,%s,,,%.2f,%.2f" % (file.filename, file.average_CCN, score) LongBow.scorePrinter([90, 80], score, string) return
def csvFunctionResult(file, function): score = computeComplexityScore(function.cyclomatic_complexity) string = "complexity,%s,%s,%d,%d,%.2f" % (file.filename, function.name, function.start_line, function.cyclomatic_complexity, score) LongBow.scorePrinter([90, 80], score, string) return function.cyclomatic_complexity
def textAverage(distribution, complianceList): scores = map(lambda target: target.getScore(), complianceList) sum = reduce(lambda sum, score : sum + score, scores) value = float(sum) / float(len(complianceList)) LongBow.scorePrinter(distribution, value, "%.2f" % (value)) return
def textFileComplexity(file, maxFileNameLength): score = computeComplexityScore(file.average_CCN) string = ("%-" + str(maxFileNameLength) + "s %6.2f %6.2f") % (file.filename, file.average_CCN, score) LongBow.scorePrinter([90, 80], score, string) return
def textTotal(distribution, complianceList): totalLines = reduce(lambda sum, x: sum + x, map(lambda element : element.getTotalLines(), complianceList)) totalNonCompliantLines = reduce(lambda sum, x: sum + x, map(lambda element : element.getNonCompliantLines(), complianceList)) value = 100.0 - (100.0 * float(totalNonCompliantLines) / float(totalLines)) LongBow.scorePrinter(distribution, value, "%.2f" % (value)) return
def csvFileVocabulary(file): score = computeVocabularyScore(file.token_count) string = "vocabulary,%s,,,%.2f,%.2f" % (file.filename, file.average_token, score) LongBow.scorePrinter([90, 80], score, string) return
def csvScore(distribution, report): string = "style,%s,%d,%d,%.2f" % (report["fileName"], report["totalLines"], report["nonCompliantLines"], report["score"]) LongBow.scorePrinter(distribution, report["score"], string) return
def textAverage(distribution, complianceList): scores = map(lambda target: target.getScore(), complianceList) sum = reduce(lambda sum, score: sum + score, scores) value = float(sum) / float(len(complianceList)) LongBow.scorePrinter(distribution, value, "%.2f" % (value)) return
def gradeAndPrint(targets, objectDirs, problemsOnly=False, printPrefix=""): if len(targets) < 1: print "No Files To Grade" return distribution = [99, 90] maxFileNameLength = max(max(map(lambda target: len(target), targets)), len("File Name")) moduleConformanceSet = ModuleSetConformanceContainer() headers = getConformanceHeaders() pformat = '{prefix}{:<{maxFileNameLength}}' nformat = pformat for header in headers: nformat = nformat + '{:>15}' print nformat.format('File Name', *headers, prefix=printPrefix, maxFileNameLength=maxFileNameLength) for target in targets: module = Module(target, objectDirs) if module.isTestSourceName(): continue fileNamePrefix = module.getModuleName() path = module.getModulePath() try: moduleConformance = computeModuleConformance(module) if not moduleConformance.processModule(): pass else: moduleConformanceSet.addConformanceContainer(moduleConformance) scores = moduleConformance.getScores() minScore = 100.0 for key in scores: score = scores[key] if score < minScore: minScore = score scores[key] = '%3.1f' % score if problemsOnly and minScore == 100.0: continue printVals = [] for hval in headers: score = 'N/A' if hval in scores: score = scores[hval] printVals.append(score) line = nformat.format(target, *printVals, prefix=printPrefix, maxFileNameLength=maxFileNameLength) LongBow.scorePrinter(distribution, minScore, line) except NoObjectFileException as e: eformat = pformat + "Could Not Grade: No .o file found for file" line = eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e) print LongBow.buildRed(line) pass except Exception as e: eformat = pformat + "Could Not Grade: {msg}" line = eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e) print LongBow.buildRed(line) pass moduleConformanceSet.analyzeConformance()