Example #1
0
def main():
    start_time = datetime.datetime.now()
    arguments = docopt(__doc__, version=VERSION)

    insert_understand_in_path(arguments["--dllDir"])

    print ("\r\n====== srcdiffplot @ https://github.com/sglebs/srccheck ==========")
    print(arguments)
    try:
        import understand
    except:
        print ("Can' find the Understand DLL. Use --dllDir=...")
        print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64 or equivalent")
        sys.exit(-1)
    try:
        db_before = understand.open(arguments["--before"])
    except understand.UnderstandError as exc:
        print ("Error opening input file: %s" % exc)
        sys.exit(-2)
    try:
        db_after = understand.open(arguments["--after"])
    except understand.UnderstandError as exc:
        print ("Error opening input file: %s" % exc)
        sys.exit(-2)

    print("Processing %s and %s" % (db_before.name(), db_after.name()))

    for plot_lambda in [plot_diff_file_metrics, plot_diff_class_metrics,plot_diff_routine_metrics]:
        plot_lambda(db_before, db_after, arguments)


    prj_metric_names = [metric.strip() for metric in arguments["--prjMetrics"].split(",")]
    prj_metric_names = [metric for metric in prj_metric_names if len(metric)>0 ]
    all_metric_names, all_metric_values_before, all_metric_values_after, all_growth_rates = collect_metric_names_with_values_and_growth(
        db_after, db_before, prj_metric_names)
    output_dir = arguments["--outputDir"]
    file_name = os.path.split(db_before.name())[-1] + "-" + os.path.split(db_after.name())[-1] + "-diff-kiviat.png"
    absolute_file_name = "%s%s%s" % (output_dir, os.sep, file_name)
    if len (all_metric_names) > 0:
        saved_file_name = save_kiviat_with_values_and_thresholds(all_metric_names, all_metric_values_after, all_metric_values_before, absolute_file_name, "Prj Metrics", thresholdslabel="before", valueslabel="after")
        if saved_file_name is not None:
            print("Saved %s" % saved_file_name)
    print_growth_rates(all_metric_names, all_growth_rates)
    rates_by_adjusted_metric_name = {"Prj %s growth rate" % metric_name : rate for metric_name, rate in zip (all_metric_names, all_growth_rates)}
    absolute_csv_path = "%s%s%s" % (output_dir, os.sep, arguments["--outputCSV"])
    csv_ok = save_csv(absolute_csv_path, rates_by_adjusted_metric_name)
    if csv_ok:
        print("+++ Growth ratio metrics saved to %s" % absolute_csv_path)
    else:
        print("\n*** Problems creating CSV file %s" % absolute_csv_path)
    post_metrics_to_sonar(arguments, rates_by_adjusted_metric_name)
    end_time = datetime.datetime.now()
    print("\r\n--------------------------------------------------")
    print("Started : %s" % str(start_time))
    print("Finished: %s" % str(end_time))
    print("Total: %s" % str(end_time - start_time))
    print("--------------------------------------------------")
    db_before.close()
    db_after.close()
Example #2
0
def main():
    start_time = datetime.datetime.now()
    arguments = docopt(__doc__, version=VERSION)
    print(
        "\r\n====== xmlkaloi @ https://github.com/sglebs/srccheck ==========")
    print(arguments)

    adaptive = arguments.get("--adaptive", False)
    print("\r\n====== XML KALOI Metrics (%s) ==========" %
          arguments.get("--maxMetrics", False))
    max_metrics = load_json(arguments.get("--maxMetrics", False))
    xpaths = load_json(arguments.get("--xpathForEachMetric", False))
    xml = load_xml(arguments.get("--in", ""))
    print(xpaths)
    print("\r\n====== XML Metrics that failed the filters  ===========")
    [total_violation_count, current_values,
     violators] = process_xml_metrics(max_metrics, xpaths, xml)
    print("%s  (Current values: %s)" % (violators, current_values))
    if adaptive:
        write_json(arguments.get("--maxMetrics", False), current_values)
    output_dir = arguments["--outputDir"]
    absolute_csv_path = "%s%s%s" % (output_dir, os.sep,
                                    arguments["--outputCSV"])
    csv_ok = save_csv(absolute_csv_path, current_values)
    if csv_ok:
        print("+++ Metrics saved to %s" % absolute_csv_path)
    else:
        print("\n*** Problems creating CSV file %s" % absolute_csv_path)
    post_metrics_to_sonar(arguments, current_values)
    end_time = datetime.datetime.now()
    print("\r\n--------------------------------------------------")
    print("Started : %s" % str(start_time))
    print("Finished: %s" % str(end_time))
    print("Total: %s" % str(end_time - start_time))
    print("--------------------------------------------------")
    sys.exit(total_violation_count)
Example #3
0
def main():
    start_time = datetime.datetime.now()
    arguments = docopt(__doc__, version=VERSION)
    dllDir = arguments["--dllDir"]
    sys.path.insert(0,dllDir) # add the dir with the DLLs - Qt etc
    os.environ["PATH"] = dllDir + os.pathsep + os.environ["PATH"] # prepend
    sys.path.insert(0,os.path.join(dllDir,"Python")) # also needed, For interop
    sys.path.insert(0,os.path.join(dllDir,"python")) # also needed, For interop with older versions of Understand (which used lowercase)
    #hangs!!!!! os.environ["PYTHONPATH"] = os.path.join(dllDir,"python") + os.pathsep + os.environ.get("PYTHONPATH", "") # prepend

    print ("\r\n====== srcdiffplot by Marcio Marchini: [email protected] ==========")
    print(arguments)
    try:
        import understand
    except:
        print ("Can' find the Understand DLL. Use --dllDir=...")
        print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64/python or equivalent")
        sys.exit(-1)
    try:
        db_before = understand.open(arguments["--before"])
    except understand.UnderstandError as exc:
        print ("Error opening input file: %s" % exc)
        sys.exit(-2)
    try:
        db_after = understand.open(arguments["--after"])
    except understand.UnderstandError as exc:
        print ("Error opening input file: %s" % exc)
        sys.exit(-2)

    print("Processing %s and %s" % (db_before.name(), db_after.name()))

    for plot_lambda in [plot_diff_file_metrics, plot_diff_class_metrics,plot_diff_routine_metrics]:
        plot_lambda(db_before, db_after, arguments)


    prj_metric_names = [metric.strip() for metric in arguments["--prjMetrics"].split(",")]
    prj_metric_names = [metric for metric in prj_metric_names if len(metric)>0 ]
    all_metric_names, all_metric_values_before, all_metric_values_after, all_growth_rates = collect_metric_names_with_values_and_growth(
        db_after, db_before, prj_metric_names)
    output_dir = arguments["--outputDir"]
    file_name = os.path.split(db_before.name())[-1] + "-" + os.path.split(db_after.name())[-1] + "-diff-kiviat.png"
    absolute_file_name = "%s%s%s" % (output_dir, os.sep, file_name)
    if len (all_metric_names) > 0:
        saved_file_name = save_kiviat_with_values_and_thresholds(all_metric_names, all_metric_values_after, all_metric_values_before, absolute_file_name, "Prj Metrics", thresholdslabel="before", valueslabel="after")
        if saved_file_name is not None:
            print("Saved %s" % saved_file_name)
    print_growth_rates(all_metric_names, all_growth_rates)
    rates_by_adjusted_metric_name = {"Prj %s growth rate" % metric_name : rate for metric_name, rate in zip (all_metric_names, all_growth_rates)}
    absolute_csv_path = "%s%s%s" % (output_dir, os.sep, arguments["--outputCSV"])
    csv_ok = save_csv(absolute_csv_path, rates_by_adjusted_metric_name)
    if csv_ok:
        print("+++ Growth ratio metrics saved to %s" % absolute_csv_path)
    else:
        print("\n*** Problems creating CSV file %s" % absolute_csv_path)
    post_metrics_to_sonar(arguments, rates_by_adjusted_metric_name)
    end_time = datetime.datetime.now()
    print("\r\n--------------------------------------------------")
    print("Started : %s" % str(start_time))
    print("Finished: %s" % str(end_time))
    print("Total: %s" % str(end_time - start_time))
    print("--------------------------------------------------")
    db_before.close()
    db_after.close()
Example #4
0
def main():
    start_time = datetime.datetime.now()
    arguments = docopt(__doc__, version=VERSION)
    sys.path.append(arguments["--dllDir"]
                    )  # add the dir with the DLL to interop with understand
    print(
        "\r\n====== srccheck by Marcio Marchini: [email protected] =========="
    )
    print(arguments)
    try:
        import understand
    except:
        print("Can' find the Understand DLL. Use --dllDir=...")
        print(
            "Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64/python or equivalent"
        )
        sys.exit(-1)
    try:
        db = understand.open(arguments["--in"])
    except understand.UnderstandError as exc:
        print("Error opening input file: %s" % exc)
        sys.exit(-2)

    adaptive = arguments.get("--adaptive", False)
    print("\r\n====== Project Metrics (%s) (%s) ==========" %
          (db.name(), db.language()[0]))
    prj_metrics = project_metrics(db, arguments)
    print_prj_metrics(prj_metrics)
    print("")
    print("\r\n====== Project Metrics that failed the filters  ===========")
    [total_violation_count, prj_tracked_metrics,
     prj_max_metrics] = process_prj_metrics(arguments, prj_metrics)
    if adaptive:
        write_metrics_thresholds(arguments.get("--maxPrjMetrics", False),
                                 prj_tracked_metrics)
    print("")
    print("\r\n====== File Metrics that failed the filters  ===========")
    [violation_count, file_tracked_metrics,
     file_max_metrics] = process_file_metrics(db, arguments)
    total_violation_count = total_violation_count + violation_count
    if adaptive:
        write_metrics_thresholds(arguments.get("--maxFileMetrics"),
                                 file_tracked_metrics)
    print("")
    print("\r\n====== Class Metrics that failed the filters  ==========")
    [violation_count, class_tracked_metrics,
     class_max_metrics] = process_class_metrics(db, arguments)
    total_violation_count = total_violation_count + violation_count
    if adaptive:
        write_metrics_thresholds(arguments.get("--maxClassMetrics"),
                                 class_tracked_metrics)
    print("")
    print("\r\n====== Routine Metrics that failed the filters ==========")
    [violation_count, routine_tracked_metrics,
     routine_max_metrics] = process_routine_metrics(db, arguments)
    total_violation_count = total_violation_count + violation_count
    if adaptive:
        write_metrics_thresholds(arguments.get("--maxRoutineMetrics"),
                                 routine_tracked_metrics)
    print("")
    print("\r\n====== Publishing selected metrics  ===========")
    tracked_metrics = {}
    append_dict_with_key_prefix(tracked_metrics, prj_tracked_metrics, "Prj")
    append_dict_with_key_prefix(tracked_metrics, file_tracked_metrics, "File")
    append_dict_with_key_prefix(tracked_metrics, class_tracked_metrics,
                                "Class")
    append_dict_with_key_prefix(tracked_metrics, routine_tracked_metrics,
                                "Routine")
    max_metrics = {}
    append_dict_with_key_prefix(max_metrics, prj_max_metrics, "Prj")
    append_dict_with_key_prefix(max_metrics, file_max_metrics, "File")
    append_dict_with_key_prefix(max_metrics, class_max_metrics, "Class")
    append_dict_with_key_prefix(max_metrics, routine_max_metrics, "Routine")
    output_dir = arguments["--outputDir"]
    file_prefix = "%s%s%s" % (output_dir, os.sep, os.path.split(db.name())[-1])
    file_name = save_kiviat_of_metrics(tracked_metrics, max_metrics, arguments,
                                       file_prefix)
    print("Kiviat saved to %s" % file_name)
    absolute_csv_path = "%s%s%s" % (output_dir, os.sep,
                                    arguments["--outputCSV"])
    csv_ok = save_csv(absolute_csv_path, tracked_metrics)
    if csv_ok:
        print("+++ Metrics saved to %s" % absolute_csv_path)
    else:
        print("\n*** Problems creating CSV file %s" % absolute_csv_path)

    post_metrics_to_sonar(arguments, tracked_metrics)
    print("")
    end_time = datetime.datetime.now()
    print("\r\n--------------------------------------------------")
    print("Started : %s" % str(start_time))
    print("Finished: %s" % str(end_time))
    print("Total: %s" % str(end_time - start_time))
    print("Violations: %i" % total_violation_count)
    print("--------------------------------------------------")
    db.close()
    sys.exit(total_violation_count)
Example #5
0
def main():
    start_time = datetime.datetime.now()
    arguments = docopt(__doc__, version=VERSION)

    insert_understand_in_path(arguments["--dllDir"])

    print ("\r\n====== srccheck @ https://github.com/sglebs/srccheck ==========")
    print(arguments)
    try:
        import understand
    except:
        print ("Can' find the Understand DLL. Use --dllDir=...")
        print ("Please set PYTHONPATH to point an Understand's C:/Program Files/SciTools/bin/pc-win64 or equivalent")
        sys.exit(-1)
    try:
        db = understand.open(arguments["--in"])
    except understand.UnderstandError as exc:
        print ("Error opening input file: %s" % exc)
        sys.exit(-2)

    adaptive = arguments.get("--adaptive", False)
    print ("\r\n====== Project Metrics (%s) (%s) ==========" % (db.name(), db.language()[0]))
    prj_metrics = project_metrics(db, arguments)
    print_prj_metrics(prj_metrics)
    print ("")
    print ("\r\n====== Project Metrics that failed the filters  ===========")
    [total_violation_count , prj_tracked_metrics, prj_max_metrics ] = process_prj_metrics(arguments, prj_metrics)
    if adaptive:
        write_metrics_thresholds(arguments.get("--maxPrjMetrics", False), prj_tracked_metrics)
    print ("")
    print ("\r\n====== File Metrics that failed the filters  ===========")
    [violation_count, file_tracked_metrics, file_max_metrics ] = process_file_metrics(db, arguments)
    total_violation_count = total_violation_count + violation_count
    if adaptive:
        write_metrics_thresholds(arguments.get("--maxFileMetrics"), file_tracked_metrics)
    print ("")
    print ("\r\n====== Class Metrics that failed the filters  ==========")
    [violation_count, class_tracked_metrics, class_max_metrics ] = process_class_metrics(db, arguments)
    total_violation_count = total_violation_count + violation_count
    if adaptive:
        write_metrics_thresholds(arguments.get("--maxClassMetrics"), class_tracked_metrics)
    print ("")
    print ("\r\n====== Routine Metrics that failed the filters ==========")
    [violation_count, routine_tracked_metrics, routine_max_metrics ] = process_routine_metrics(db, arguments)
    total_violation_count = total_violation_count + violation_count
    if adaptive:
        write_metrics_thresholds(arguments.get("--maxRoutineMetrics"), routine_tracked_metrics)
    print ("")
    print ("\r\n====== Publishing selected metrics  ===========")
    tracked_metrics = {}
    append_dict_with_key_prefix (tracked_metrics, prj_tracked_metrics, "Prj")
    append_dict_with_key_prefix (tracked_metrics, file_tracked_metrics, "File")
    append_dict_with_key_prefix (tracked_metrics, class_tracked_metrics, "Class")
    append_dict_with_key_prefix (tracked_metrics, routine_tracked_metrics, "Routine")
    max_metrics = {}
    append_dict_with_key_prefix (max_metrics, prj_max_metrics, "Prj")
    append_dict_with_key_prefix (max_metrics, file_max_metrics, "File")
    append_dict_with_key_prefix (max_metrics, class_max_metrics, "Class")
    append_dict_with_key_prefix (max_metrics, routine_max_metrics, "Routine")
    output_dir = arguments["--outputDir"]
    file_prefix = "%s%s%s" % (output_dir, os.sep, os.path.split(db.name())[-1])
    file_name = save_kiviat_of_metrics(tracked_metrics, max_metrics, arguments, file_prefix)
    print("Kiviat saved to %s"% file_name)
    absolute_csv_path = "%s%s%s" % (output_dir, os.sep, arguments["--outputCSV"])
    csv_ok = save_csv(absolute_csv_path, tracked_metrics)
    if csv_ok:
        print("+++ Metrics saved to %s" % absolute_csv_path)
    else:
        print ("\n*** Problems creating CSV file %s" % absolute_csv_path)

    post_metrics_to_sonar(arguments, tracked_metrics)
    print ("")
    end_time = datetime.datetime.now()
    print ("\r\n--------------------------------------------------")
    print ("Started : %s" % str(start_time))
    print ("Finished: %s" % str(end_time))
    print ("Total: %s" % str(end_time-start_time))
    print ("Violations: %i" % total_violation_count)
    print ("--------------------------------------------------")
    db.close()
    sys.exit(total_violation_count)