def textFunctionResult(file, function, maxFileNameLength, maxFunctionNameLength):
	score = computeComplexityScore(function.cyclomatic_complexity)
	format = "%-" + str(maxFileNameLength) + "s %-" + str(maxFunctionNameLength) + "s %6d %2d %6.2f"
	string = format % (file.filename, function.name, function.start_line, function.cyclomatic_complexity, score)

	LongBow.scorePrinter([90, 80], score, string)
	return function.cyclomatic_complexity
Example #2
0
def gradeAndPrint(targets,
                  exemplarCommand,
                  exemplarConfig,
                  problemsOnly=False,
                  prefix=""):
    complianceList = []
    problemList = []
    for target in targets:
        try:
            complianceList.append(
                SyntaxCompliance(target, exemplarCommand,
                                 exemplarConfig).check())
        except:
            problemList.append(target)
            pass
    complianceList = sorted(complianceList, key=lambda k: k.getFileName())
    if problemsOnly:
        complianceList = filter(lambda entry: entry.getScore() < 100,
                                complianceList)
    distribution = [99, 90]
    textSummary(distribution, complianceList, prefix)

    for target in problemList:
        print LongBow.buildRed("%s%s could not be evaluated" %
                               (prefix, target))
Example #3
0
def textSummary(args, filesAndTests, gCovResults, prefix=""):

    summary = GCov.computeSummary(filesAndTests, gCovResults)

    if not args.includeTestSources:
        summary = GCovSummary.removeTestSourceFiles(summary)

    if len(summary) == 0:
        return

    if args.explain:
        pp = pprint.PrettyPrinter(indent=2, width=150)
        pp.pprint(summary)

    maximumFileLength = max(map(lambda entry: len(entry), summary))

    format = "%s%-" + str(maximumFileLength) + "s %6s"
    print format % (prefix, "File Path", "Score")

    format = "%s%-" + str(maximumFileLength) + "s %6.2f"
    for testedFile in sorted(summary.keys()):
        string = format % (prefix, testedFile, summary[testedFile]["coverage"])
        if summary[testedFile]["direct"] == "indirect":
            ANSITerm.printColorized("magenta", string)
        else:
            LongBow.scorePrinter(eval(args.distribution),
                                 summary[testedFile]["coverage"], string)

    return
def textFunctionResult(file, function, maxFileNameLength, maxFunctionNameLength):
	score = computeComplexityScore(function.cyclomatic_complexity)
	format = "%-" + str(maxFileNameLength) + "s %-" + str(maxFunctionNameLength) + "s %6d %2d %6.2f"
	string = format % (file.filename, function.name, function.start_line, function.cyclomatic_complexity, score)

	LongBow.scorePrinter([90, 80], score, string)
	return function.cyclomatic_complexity
Example #5
0
def gradeAndPrint(targets, testDirs=[], problemsOnly=False, prefix=""):
    filesAndTests = getFilesAndTests(targets, testDirs)
    newGCovResults = map(
        lambda fileAndTestFile: GCov.getCoverage(fileAndTestFile[1]),
        filesAndTests)

    summarys = GCov.computeSummary(filesAndTests, newGCovResults)
    if len(summarys) < 1:
        print "%sNo GCov Results - Please be sure to run 'make check' first" % prefix
        return False
    summarys = GCovSummary.removeTestSourceFiles(summarys)

    paths = summarys.keys()
    if problemsOnly:
        paths = filter(lambda key: summarys[key]["coverage"] < 100, paths)

    distribution = [99, 90]
    maximumFileLength = max(
        map(lambda entry: len(os.path.relpath(entry)), paths))
    format = "%s%-" + str(maximumFileLength) + "s %6s"
    print format % (prefix, "File Path", "Score")
    format = "%s%-" + str(maximumFileLength) + "s %6.2f"
    for path in sorted(paths):
        string = format % (prefix, os.path.relpath(path),
                           summarys[path]["coverage"])
        LongBow.scorePrinter(distribution, summarys[path]["coverage"], string)

    return True
Example #6
0
def textFileVocabulary(file, maxFileNameLength, printFormat=""):
    score = computeVocabularyScore(file.average_CCN)
    if printFormat == "":
        printFormat = "%-" + str(maxFileNameLength) + "s %6.2f %6.2f"
    string = printFormat % (file.filename, file.average_token, score)
    LongBow.scorePrinter([90, 80], score, string)
    return
Example #7
0
def textFileVocabulary(file, maxFileNameLength, printFormat=""):
	score = computeVocabularyScore(file.average_CCN)
	if printFormat == "":
		printFormat = "%-" + str(maxFileNameLength) + "s %6.2f %6.2f"
	string =  printFormat % (file.filename, file.average_token, score)
	LongBow.scorePrinter([90, 80], score, string)
	return
Example #8
0
def textSummary(args, filesAndTests, gCovResults, prefix=""):

    summary = GCov.computeSummary(filesAndTests, gCovResults)

    if not args.includeTestSources:
        summary = GCovSummary.removeTestSourceFiles(summary)

    if len(summary) == 0:
        return

    if args.explain:
        pp = pprint.PrettyPrinter(indent=2, width=150)
        pp.pprint(summary)

    maximumFileLength = max(map(lambda entry: len(entry), summary))

    format = "%s%-" + str(maximumFileLength) + "s %6s"
    print format % (prefix, "File Path", "Score")

    format = "%s%-" + str(maximumFileLength) + "s %6.2f"
    for testedFile in sorted(summary.keys()):
        string = format % (prefix, testedFile, summary[testedFile]["coverage"])
        if summary[testedFile]["direct"] == "indirect":
            ANSITerm.printColorized("magenta", string)
        else:
            LongBow.scorePrinter(eval(args.distribution), summary[testedFile]["coverage"], string)

    return
Example #9
0
def csvSummary(distribution, documentation):
        formatString ="documentation,%s,%d,%d,%.2f%%"
        for entry in documentation:
            badLines = len(documentation[entry])
            totalLines =  LongBow.countLines(entry)
            score = float(totalLines - badLines) / float(totalLines) * 100.0
            LongBow.scorePrinter(distribution, score, formatString % (entry, totalLines, badLines, score))
        return
Example #10
0
def csvFunctionResult(file, function):
    score = computeVocabularyScore(file.token_count)
    string = "vocabulary,%s,%s,%d,%d,%.2f" % (file.filename, function.name,
                                              function.start_line,
                                              function.token_count, score)

    LongBow.scorePrinter([90, 80], score, string)
    return function.token_count
Example #11
0
def textScore(distribution, report, maxFileNameLength, prefix=""):
    '''

    '''
    format = "%s%-*s %6d %6d %6.2f"
    string = format % (prefix, maxFileNameLength, report["fileName"], report["totalLines"], report["nonCompliantLines"], report["score"])
    LongBow.scorePrinter(distribution, report["score"], string)
    return
Example #12
0
def csvFunctionResult(file, function):
    score = computeComplexityScore(function.cyclomatic_complexity)
    string = "complexity,%s,%s,%d,%d,%.2f" % (
        file.filename, function.name, function.start_line,
        function.cyclomatic_complexity, score)

    LongBow.scorePrinter([90, 80], score, string)
    return function.cyclomatic_complexity
Example #13
0
def textTotal(distribution, complianceList):
    totalLines = reduce(
        lambda sum, x: sum + x,
        map(lambda element: element.getTotalLines(), complianceList))
    totalNonCompliantLines = reduce(
        lambda sum, x: sum + x,
        map(lambda element: element.getNonCompliantLines(), complianceList))
    value = 100.0 - (100.0 * float(totalNonCompliantLines) / float(totalLines))
    LongBow.scorePrinter(distribution, value, "%.2f" % (value))
    return
Example #14
0
def csvAverage(args, filesAndTests, gcovResults):
    summary = GCov.computeSummary(filesAndTests, gcovResults)

    if not args.includeTestSources:
        summary = GCovSummary.removeTestSourceFiles(summary)

    score = GCovSummary.averageCoverage(summary)

    LongBow.scorePrinter(eval(args.distribution), score, "%.2f" % (score))
    return
Example #15
0
def csvSummary(distribution, documentation):
    formatString = "documentation,%s,%d,%d,%.2f%%"
    for entry in documentation:
        badLines = len(documentation[entry])
        totalLines = LongBow.countLines(entry)
        score = float(totalLines - badLines) / float(totalLines) * 100.0
        LongBow.scorePrinter(
            distribution, score,
            formatString % (entry, totalLines, badLines, score))
    return
Example #16
0
def textScore(distribution, report, maxFileNameLength, prefix=""):
    '''

    '''
    format = "%s%-*s %6d %6d %6.2f"
    string = format % (prefix, maxFileNameLength, report["fileName"],
                       report["totalLines"], report["nonCompliantLines"],
                       report["score"])
    LongBow.scorePrinter(distribution, report["score"], string)
    return
Example #17
0
def csvAverage(args, filesAndTests, gcovResults):
    summary = GCov.computeSummary(filesAndTests, gcovResults)

    if not args.includeTestSources:
        summary = GCovSummary.removeTestSourceFiles(summary)

    score = GCovSummary.averageCoverage(summary)

    LongBow.scorePrinter(eval(args.distribution), score, "%.2f" % (score))
    return
Example #18
0
def csvSummary(args, filesAndTests, gCovResults):
    summary = GCov.computeSummary(filesAndTests, gCovResults)

    if not args.includeTestSources:
        summary = GCovSummary.removeTestSourceFiles(summary)

    if len(summary) > 0:
        for testedFile in sorted(summary.keys()):
            outputString = "%s,%.2f" % (testedFile, summary[testedFile]["coverage"])
            LongBow.scorePrinter(eval(args.distribution), summary[testedFile]["coverage"], outputString)

    return
Example #19
0
def csvSummary(args, filesAndTests, gCovResults):
    summary = GCov.computeSummary(filesAndTests, gCovResults)

    if not args.includeTestSources:
        summary = GCovSummary.removeTestSourceFiles(summary)

    if len(summary) > 0:
        for testedFile in sorted(summary.keys()):
            outputString = "%s,%.2f" % (testedFile, summary[testedFile]["coverage"])
            LongBow.scorePrinter(eval(args.distribution), summary[testedFile]["coverage"], outputString)

    return
Example #20
0
def textualSummary(distribution, documentation):
    maxWidth = 0
    for entry in documentation:
        if len(entry) > maxWidth:
            maxWidth = len(entry)

    formatString ="%-" + str(maxWidth) + "s %8d %8d   %.2f%%"
    for entry in documentation:
        badLines = len(documentation[entry])
        totalLines =  LongBow.countLines(entry)
        score = float(totalLines - badLines) / float(totalLines) * 100.0
        LongBow.scorePrinter(distribution, score, formatString % (entry, totalLines, badLines, score))
    return
Example #21
0
def textualAverage(distribution, documentation, format):
    sum = 0.0

    for entry in documentation:
        badLines = len(documentation[entry])
        totalLines = LongBow.countLines(entry)
        score = float(totalLines - badLines) / float(totalLines) * 100.0
        sum = sum + score

    if len(documentation) == 0:
        averageScore = 100.0
    else:
        averageScore = sum / float(len(documentation))

    LongBow.scorePrinter(distribution, averageScore, format % averageScore)
Example #22
0
def textualAverage(distribution, documentation, format):
    sum = 0.0

    for entry in documentation:
        badLines = len(documentation[entry])
        totalLines =  LongBow.countLines(entry)
        score = float(totalLines - badLines) / float(totalLines) * 100.0
        sum = sum + score

    if len(documentation) == 0:
        averageScore = 100.0
    else:
        averageScore = sum / float(len(documentation))

    LongBow.scorePrinter(distribution, averageScore, format % averageScore)
Example #23
0
def textualSummary(distribution, documentation):
    maxWidth = 0
    for entry in documentation:
        if len(entry) > maxWidth:
            maxWidth = len(entry)

    formatString = "%-" + str(maxWidth) + "s %8d %8d   %.2f%%"
    for entry in documentation:
        badLines = len(documentation[entry])
        totalLines = LongBow.countLines(entry)
        score = float(totalLines - badLines) / float(totalLines) * 100.0
        LongBow.scorePrinter(
            distribution, score,
            formatString % (entry, totalLines, badLines, score))
    return
Example #24
0
def exclude(args, complianceList):
    excluded = map(lambda token: token.strip(), args.exclude.split(","))
    complianceList = filter(
        lambda entry: LongBow.score(eval(args.distribution), entry.getScore())
        not in excluded, complianceList)

    return complianceList
Example #25
0
def gradeAndPrint(targets, exemplarCommand, exemplarConfig, problemsOnly=False, prefix=""):
    complianceList = []
    problemList = []
    for target in targets:
        try:
            complianceList.append(SyntaxCompliance(target, exemplarCommand, exemplarConfig).check())
        except:
            problemList.append(target)
            pass
    complianceList = sorted(complianceList, key=lambda k: k.getFileName())
    if problemsOnly:
        complianceList = filter(lambda entry: entry.getScore() < 100, complianceList)
    distribution=[99,90]
    textSummary(distribution, complianceList, prefix)

    for target in problemList:
        print LongBow.buildRed("%s%s could not be evaluated" % (prefix, target))
Example #26
0
def tuplesListToPrettyText(points, distribution = [99, 90]):
    '''
    Convert a list of tuples -- data points -- to a list of colorized strings based
    on the provided distribution.
    '''
    lines = []
    for point in points:
        line = ""
        for i in range(len(point) - 1):
            line = line + str(point[i]) + " "
        line = line + str(point[-1])
        lines.append(LongBow.scoreBuilder(distribution, point[-1], line))
    return lines
Example #27
0
def tuplesListToPrettyText(points, distribution=[99, 90]):
    '''
    Convert a list of tuples -- data points -- to a list of colorized strings based
    on the provided distribution.
    '''
    lines = []
    for point in points:
        line = ""
        for i in range(len(point) - 1):
            line = line + str(point[i]) + " "
        line = line + str(point[-1])
        lines.append(LongBow.scoreBuilder(distribution, point[-1], line))
    return lines
Example #28
0
def gradeAndPrint(targets, problemsOnly=False, printPrefix=""):
    if len(targets) < 1:
        print "No Files To Grade"
        return

    distribution = [99, 90]
    maxFileNameLength = max(max(map(lambda target: len(target), targets)), len("File Name"))

    moduleConformanceSet = ModuleSetConformanceContainer()
    headers = getConformanceHeaders()
    pformat = '{prefix}{:<{maxFileNameLength}}'
    nformat = pformat
    for header in headers:
        nformat = nformat + '{:>15}'
    print nformat.format('File Name', *headers, prefix=printPrefix, maxFileNameLength=maxFileNameLength)


    for target in targets:
        module = Module(target)
        if module.isTestSourceName():
            continue
        fileNamePrefix = module.getModuleName()
        path = module.getModulePath()
        try:
            moduleConformance = computeModuleConformance(path, fileNamePrefix)
            if not moduleConformance.processModule():
                pass
            else:
                moduleConformanceSet.addConformanceContainer(moduleConformance)
                scores = moduleConformance.getScores()
                minScore = 100.0
                for key in scores:
                    score = scores[key]
                    if score < minScore:
                        minScore = score
                    scores[key] = '%3.1f'%score
                if problemsOnly and minScore == 100.0:
                    continue
                printVals=[]
                for hval in headers:
                    score = 'N/A'
                    if hval in scores:
                        score = scores[hval]
                    printVals.append(score)
                line = nformat.format(target, *printVals, prefix=printPrefix, maxFileNameLength=maxFileNameLength)
                LongBow.scorePrinter(distribution, minScore, line)
        except NoObjectFileException as e:
            eformat = pformat + "Could Not Grade: No .o file found for file"
            line =  eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e)
            print LongBow.buildRed(line)
            pass
        except Exception as e:
            eformat = pformat + "Could Not Grade: {msg}"
            line =  eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e)
            print LongBow.buildRed(line)
            pass
    moduleConformanceSet.analyzeConformance()
Example #29
0
def gradeAndPrint(targets, testDirs=[], problemsOnly=False, prefix=""):
    filesAndTests = getFilesAndTests(targets, testDirs)
    newGCovResults = map(lambda fileAndTestFile: GCov.getCoverage(fileAndTestFile[1]), filesAndTests)

    summarys = GCov.computeSummary(filesAndTests, newGCovResults)
    if len(summarys) < 1:
        print "%sNo GCov Results - Please be sure to run 'make check' first" % prefix
        return False
    summarys = GCovSummary.removeTestSourceFiles(summarys)

    paths = summarys.keys()
    if problemsOnly:
        paths = filter(lambda key: summarys[key]["coverage"] < 100, paths)

    distribution=[99,90]
    maximumFileLength = max(map(lambda entry: len(os.path.relpath(entry)), paths))
    format = "%s%-" + str(maximumFileLength) + "s %6s"
    print format % (prefix, "File Path", "Score")
    format = "%s%-" + str(maximumFileLength) + "s %6.2f"
    for path in sorted(paths):
        string = format % (prefix, os.path.relpath(path), summarys[path]["coverage"])
        LongBow.scorePrinter(distribution, summarys[path]["coverage"], string)

    return True
def csvFileComplexity(file):
	score = computeComplexityScore(file.average_CCN)
	string = "complexity,%s,,,%.2f,%.2f" % (file.filename, file.average_CCN, score)
	LongBow.scorePrinter([90, 80], score, string)
	return
def csvFunctionResult(file, function):
	score = computeComplexityScore(function.cyclomatic_complexity)
	string = "complexity,%s,%s,%d,%d,%.2f" % (file.filename, function.name, function.start_line, function.cyclomatic_complexity, score)

	LongBow.scorePrinter([90, 80], score, string)
	return function.cyclomatic_complexity
Example #32
0
def csvFileComplexity(file):
    score = computeComplexityScore(file.average_CCN)
    string = "complexity,%s,,,%.2f,%.2f" % (file.filename, file.average_CCN,
                                            score)
    LongBow.scorePrinter([90, 80], score, string)
    return
Example #33
0
def textAverage(distribution, complianceList):
    scores = map(lambda target: target.getScore(), complianceList)
    sum = reduce(lambda sum, score : sum + score, scores)
    value = float(sum) / float(len(complianceList))
    LongBow.scorePrinter(distribution, value, "%.2f" % (value))
    return
Example #34
0
def textFileComplexity(file, maxFileNameLength):
    score = computeComplexityScore(file.average_CCN)
    string = ("%-" + str(maxFileNameLength) +
              "s %6.2f %6.2f") % (file.filename, file.average_CCN, score)
    LongBow.scorePrinter([90, 80], score, string)
    return
Example #35
0
def textTotal(distribution, complianceList):
    totalLines = reduce(lambda sum, x: sum + x, map(lambda element : element.getTotalLines(), complianceList))
    totalNonCompliantLines = reduce(lambda sum, x: sum + x, map(lambda element : element.getNonCompliantLines(), complianceList))
    value = 100.0 - (100.0 * float(totalNonCompliantLines) / float(totalLines))
    LongBow.scorePrinter(distribution, value, "%.2f" % (value))
    return
Example #36
0
def csvFileVocabulary(file):
	score = computeVocabularyScore(file.token_count)
	string = "vocabulary,%s,,,%.2f,%.2f" % (file.filename, file.average_token, score)
	LongBow.scorePrinter([90, 80], score, string)
	return
Example #37
0
def csvFunctionResult(file, function):
	score = computeVocabularyScore(file.token_count)
	string = "vocabulary,%s,%s,%d,%d,%.2f" % (file.filename, function.name, function.start_line, function.token_count, score)

	LongBow.scorePrinter([90, 80], score, string)
	return function.token_count
Example #38
0
def exclude(args, complianceList):
    excluded = map(lambda token : token.strip(), args.exclude.split(","))
    complianceList = filter(lambda entry: LongBow.score(eval(args.distribution), entry.getScore()) not in excluded, complianceList)

    return complianceList
Example #39
0
def csvFileVocabulary(file):
    score = computeVocabularyScore(file.token_count)
    string = "vocabulary,%s,,,%.2f,%.2f" % (file.filename, file.average_token,
                                            score)
    LongBow.scorePrinter([90, 80], score, string)
    return
Example #40
0
def main():
    desc = '''
Report on number of lines of one or more C source or header files.

Input is either from a list of files supplied as command line parameters,
or as a list of newline separated file names read from standard input.
Output is a plain text (default) or a CSV file reporting
the file name and the total number of lines in the file.

Usage:

% longbow-size-report *.[ch]

Report the number of lines in .c and .h files specified as command line parameters.

% longbow-size-report -
Read the lists of files from standard input, one file per line.

$ longbow-size-report parc_JSON.c
parc_JSON.c    239
$
$
$ echo parc_JSON.c | longbow-size-report -o csv -
parc_JSON.c,239
$
'''

    parser = argparse.ArgumentParser(prog='longbow-size-report', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)
    parser.add_argument('-', '--stdin', default=False, action="store_true", required=False, help="read the list of files from standard input.")
    parser.add_argument('-s', '--summary', default=False, action="store_true", required=False, help="display the number of lines for each file")
    parser.add_argument('-t', '--total', default=False, action="store_true", required=False, help="display the total number of lines for all files")
    parser.add_argument('-o', '--output', default="text", action="store", required=False, type=str, help="the output format: \"text\" or \"csv\"")

    parser.add_argument("files", help="Files to check", nargs="*")

    args = parser.parse_args()

    if args.summary == False and args.total == False:
        args.summary = True

    targets = []

    if args.stdin:
        for line in sys.stdin:
            t = line.strip()
            if len(t) > 0:
                targets.append(t)
    else:
        targets = args.files

    if len(targets) == 0:
        parser.print_usage()
        sys.exit(1)

    files = map(lambda fileName: [ fileName, LongBow.countLines(fileName)], targets)
    total = sum(map(lambda element: element[1], files))

    if args.summary:
        if args.output == "text":
            textSummary(files)
        else:
            csvSummary(files)

    if args.total:
        if args.output == "text":
            textTotal(files)
        else:
            csvTotal(files)
Example #41
0
def csvScore(distribution, report):
    string = "style,%s,%d,%d,%.2f" % (report["fileName"], report["totalLines"],
                                      report["nonCompliantLines"],
                                      report["score"])
    LongBow.scorePrinter(distribution, report["score"], string)
    return
Example #42
0
def textAverage(distribution, complianceList):
    scores = map(lambda target: target.getScore(), complianceList)
    sum = reduce(lambda sum, score: sum + score, scores)
    value = float(sum) / float(len(complianceList))
    LongBow.scorePrinter(distribution, value, "%.2f" % (value))
    return
def textFileComplexity(file, maxFileNameLength):
	score = computeComplexityScore(file.average_CCN)
	string =  ("%-" + str(maxFileNameLength) + "s %6.2f %6.2f") % (file.filename, file.average_CCN, score)
	LongBow.scorePrinter([90, 80], score, string)
	return
Example #44
0
def main():
    desc = '''
Report on number of lines of one or more C source or header files.

Input is either from a list of files supplied as command line parameters,
or as a list of newline separated file names read from standard input.
Output is a plain text (default) or a CSV file reporting
the file name and the total number of lines in the file.

Usage:

% longbow-size-report *.[ch]

Report the number of lines in .c and .h files specified as command line parameters.

% longbow-size-report -
Read the lists of files from standard input, one file per line.

$ longbow-size-report parc_JSON.c
parc_JSON.c    239
$
$
$ echo parc_JSON.c | longbow-size-report -o csv -
parc_JSON.c,239
$
'''

    parser = argparse.ArgumentParser(
        prog='longbow-size-report',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        description=desc)
    parser.add_argument('-',
                        '--stdin',
                        default=False,
                        action="store_true",
                        required=False,
                        help="read the list of files from standard input.")
    parser.add_argument('-s',
                        '--summary',
                        default=False,
                        action="store_true",
                        required=False,
                        help="display the number of lines for each file")
    parser.add_argument('-t',
                        '--total',
                        default=False,
                        action="store_true",
                        required=False,
                        help="display the total number of lines for all files")
    parser.add_argument('-o',
                        '--output',
                        default="text",
                        action="store",
                        required=False,
                        type=str,
                        help="the output format: \"text\" or \"csv\"")

    parser.add_argument("files", help="Files to check", nargs="*")

    args = parser.parse_args()

    if args.summary == False and args.total == False:
        args.summary = True

    targets = []

    if args.stdin:
        for line in sys.stdin:
            t = line.strip()
            if len(t) > 0:
                targets.append(t)
    else:
        targets = args.files

    if len(targets) == 0:
        parser.print_usage()
        sys.exit(1)

    files = map(
        lambda fileName: [fileName, LongBow.countLines(fileName)], targets)
    total = sum(map(lambda element: element[1], files))

    if args.summary:
        if args.output == "text":
            textSummary(files)
        else:
            csvSummary(files)

    if args.total:
        if args.output == "text":
            textTotal(files)
        else:
            csvTotal(files)
Example #45
0
def gradeAndPrint(targets, objectDirs, problemsOnly=False, printPrefix=""):
    if len(targets) < 1:
        print "No Files To Grade"
        return

    distribution = [99, 90]
    maxFileNameLength = max(max(map(lambda target: len(target), targets)),
                            len("File Name"))

    moduleConformanceSet = ModuleSetConformanceContainer()
    headers = getConformanceHeaders()
    pformat = '{prefix}{:<{maxFileNameLength}}'
    nformat = pformat
    for header in headers:
        nformat = nformat + '{:>15}'
    print nformat.format('File Name',
                         *headers,
                         prefix=printPrefix,
                         maxFileNameLength=maxFileNameLength)

    for target in targets:
        module = Module(target, objectDirs)
        if module.isTestSourceName():
            continue
        fileNamePrefix = module.getModuleName()
        path = module.getModulePath()
        try:
            moduleConformance = computeModuleConformance(module)
            if not moduleConformance.processModule():
                pass
            else:
                moduleConformanceSet.addConformanceContainer(moduleConformance)
                scores = moduleConformance.getScores()
                minScore = 100.0
                for key in scores:
                    score = scores[key]
                    if score < minScore:
                        minScore = score
                    scores[key] = '%3.1f' % score
                if problemsOnly and minScore == 100.0:
                    continue
                printVals = []
                for hval in headers:
                    score = 'N/A'
                    if hval in scores:
                        score = scores[hval]
                    printVals.append(score)
                line = nformat.format(target,
                                      *printVals,
                                      prefix=printPrefix,
                                      maxFileNameLength=maxFileNameLength)
                LongBow.scorePrinter(distribution, minScore, line)
        except NoObjectFileException as e:
            eformat = pformat + "Could Not Grade: No .o file found for file"
            line = eformat.format(target,
                                  prefix=printPrefix,
                                  maxFileNameLength=maxFileNameLength,
                                  msg=e)
            print LongBow.buildRed(line)
            pass
        except Exception as e:
            eformat = pformat + "Could Not Grade: {msg}"
            line = eformat.format(target,
                                  prefix=printPrefix,
                                  maxFileNameLength=maxFileNameLength,
                                  msg=e)
            print LongBow.buildRed(line)
            pass
    moduleConformanceSet.analyzeConformance()
Example #46
0
def csvScore(distribution, report):
    string = "style,%s,%d,%d,%.2f" % (report["fileName"], report["totalLines"], report["nonCompliantLines"], report["score"])
    LongBow.scorePrinter(distribution, report["score"], string)
    return