Esempio n. 1
0
                        default="revision",
                        help="Field to use as the x-axis. Default: 'revision'. Other possibilities: 'date'.")

    parser.add_argument('dbfile', metavar='DBFILE', type=str, nargs='+',
                        default=["./MantidSystemTests.db"], 
                        help='Required: Path to the SQL database file(s).')
        
    
    args = parser.parse_args()
    
    # Import the manager definition
    import analysis
    import sqlresults
    
    if len(args.dbfile) > 1:
        # Several files - join them into one big .db
        dbfile = join_databases(args.dbfile)
    else:
        # Only one file - use it
        dbfile = args.dbfile[0]
        
    
    if not os.path.exists(dbfile):
        print "Error! Could not find", dbfile
        sys.exit(1)
    
    # This is where we look for the DB file
    sqlresults.set_database_filename(dbfile)
    
    # Make the report
    analysis.generate_html_report(args.path, 100, args.x_field)
Esempio n. 2
0
    # --------- Table with the summary of latest results --------
    html += """<h2>Overall Results Summary</h2>"""
    html += get_html_summary_table(test_names)

    html += DEFAULT_HTML_FOOTER

    f = open(os.path.join(basedir, "report.htm"), "w")
    html = html.replace("\n", os.linesep)  # Fix line endings for windows
    f.write(html)
    f.close()

    # -------- Overview of plots ------------
    f = open(os.path.join(basedir, "overview_plot.htm"), "w")
    overview_html = overview_html.replace(
        "\n", os.linesep)  # Fix line endings for windows
    f.write(overview_html)
    f.close()

    print "Report complete!"


# ============================================================================================
if __name__ == "__main__":
    sqlresults.set_database_filename("MyFakeData.db")
    # Make up some test data
    if 0:
        if os.path.exists("MyFakeData.db"): os.remove("MyFakeData.db")
        sqlresults.generate_fake_data(300)

    generate_html_report("../Report", 50)
Esempio n. 3
0
                        default="revision",
                        help="Field to use as the x-axis. Default: 'revision'. Other possibilities: 'date'.")

    parser.add_argument('dbfile', metavar='DBFILE', type=str, nargs='+',
                        default=["./MantidSystemTests.db"],
                        help='Required: Path to the SQL database file(s).')


    args = parser.parse_args()

    # Import the manager definition
    import analysis
    import sqlresults

    if len(args.dbfile) > 1:
        # Several files - join them into one big .db
        dbfile = join_databases(args.dbfile)
    else:
        # Only one file - use it
        dbfile = args.dbfile[0]


    if not os.path.exists(dbfile):
        print "Error! Could not find", dbfile
        sys.exit(1)

    # This is where we look for the DB file
    sqlresults.set_database_filename(dbfile)

    # Make the report
    analysis.generate_html_report(args.path, 100, args.x_field)
Esempio n. 4
0
    f.write(html)
    f.close()

    # -------- Overview of plots ------------
    f = open(os.path.join(basedir, "overview_plot.htm"), "w")
    overview_html = overview_html.replace("\n", os.linesep) # Fix line endings for windows
    f.write(overview_html)
    f.close()

    print "Report complete!"




#============================================================================================
if __name__ == "__main__":
    sqlresults.set_database_filename("MyFakeData.db")
    # Make up some test data
    if 0:
        if os.path.exists("MyFakeData.db"): os.remove("MyFakeData.db")
        sqlresults.generate_fake_data(300)


    generate_html_report("../Report", 50)

#    plot_runtime(name='MyFakeTest', x_field='revision')
#    plot_runtime(name='MyFakeTest', x_field='date')
#    plot_success_count()
#    show()

Esempio n. 5
0
def run(args):
    """ Execute the program """
    print
    print "=============== Checking For Performance Loss ====================="
    dbfile = args.db[0]

    if not os.path.exists(dbfile):
        print "Database file %s not found."
        sys.exit(1)

    # Set the database to the one given
    sqlresults.set_database_filename(dbfile);

    # Convert the arguments. Will throw if the user is stupid.
    avg = int(args.avg)
    tol = float(args.tol)
    rev = sqlresults.get_latest_revison()

    print "Comparing the average of the %d revisions before rev. %d. Tolerance of %g %%." % (avg, rev, tol)
    if args.verbose: print

    # For limiting the results
    limit = 50*avg;

    names = sqlresults.get_all_test_names("revision = %d" % rev)
    if len(names) == 0:
        print "Error! No tests found at revision number %d.\n" % rev
        sys.exit(1)

    bad_results = ""
    good_results = ""
    num_same = 0
    num_good = 0
    num_bad = 0
    num_notenoughstats=0
    # The timing resolution is different across platforms and the faster tests
    # can cause more false positives on the lower-resolution clocks. We'll
    # up the tolerance for those taking less time than 10ms.
    timer_resolution_hi = 0.01
    timer_resolution_lo = 0.0011
    
    regression_names = []
    speedup_names = []

    for name in names:
        (r, t) = analysis.get_runtime_data(name, x_field='revision')
        r = np.array(r)
        t = np.array(t)

        # this is the timing of the current revision
        current_time = t[r == rev]
        tolerance = tol
        if current_time < timer_resolution_hi:
            # Increase the tolerance to avoid false positives
            if current_time < timer_resolution_lo:
                # Very fast tests are twitchy
                tolerance = 100.0
            else:
                tolerance = 70.0

        # Cut out any times after or = to the current rev
        t = t[r < rev]

        # Keep the latest "avg" #
        t = t[len(t)-avg:]

        if (len(t) == avg):
            # This is the average
            baseline_time = np.mean(t)

            # % speed change
            pct = ((baseline_time / current_time) - 1) * 100

            # Did we fail (slow down too much)
            if pct < -tolerance:
                regression_names.append(name)
            elif pct > tolerance:
                speedup_names.append(name)
    
    regLinks = ["http://builds.mantidproject.org/job/master_performancetests2/Master_branch_performance_tests/{}.htm".format(name) for name in regression_names]
    speedLinks = ["http://builds.mantidproject.org/job/master_performancetests2/Master_branch_performance_tests/{}.htm".format(name) for name in speedup_names]
    email = secureemail.SendEmailSecure(args.sender, args.pwd, args.recipient, regLinks, speedLinks)
    email.send()
Esempio n. 6
0
        '--commit',
        dest='commitid',
        default="",
        help='Commit ID of the current build (a 40-character SHA string).')

    parser.add_argument('xmlpath',
                        metavar='XMLPATH',
                        type=str,
                        nargs='+',
                        default="",
                        help='Required: Path to the Xunit XML files.')

    args = parser.parse_args()

    # Setup the SQL database but only if it does not exist
    sqlresults.set_database_filename(args.db)
    if not os.path.exists(args.db):
        sqlresults.setup_database()
    # Set up the reporter
    sql_reporter = sqlresults.SQLResultReporter()

    variables = args.variables
    # Add a new revision and get the "revision" number
    revision = sqlresults.add_revision()
    # Save the commitid
    commitid = args.commitid

    # If a directory has been provided, look there for all of the XML files
    if os.path.isdir(args.xmlpath[0]):
        xmldir = args.xmlpath[0]
        if not os.path.isabs(xmldir):
Esempio n. 7
0
    parser.add_argument('--variables', dest='variables', 
                        default="",
                        help='Optional string of comma-separated "VAR1NAME=VALUE,VAR2NAME=VALUE2" giving some parameters used, e.g. while building.')
    
    parser.add_argument('--commit', dest='commitid', 
                        default="",
                        help='Commit ID of the current build (a 40-character SHA string).')
    
    parser.add_argument('xmlpath', metavar='XMLPATH', type=str, nargs='+',
                        default="", 
                        help='Required: Path to the Xunit XML files.')
    
    args = parser.parse_args()
        
    # Setup the SQL database but only if it does not exist
    sqlresults.set_database_filename(args.db)
    if not os.path.exists(args.db):
        sqlresults.setup_database()
    # Set up the reporter    
    sql_reporter = sqlresults.SQLResultReporter()
    
    variables = args.variables 
    # Add a new revision and get the "revision" number
    revision = sqlresults.add_revision()
    # Save the commitid
    commitid = args.commitid

    # If a directory has been provided, look there for all of the XML files
    if os.path.isdir(args.xmlpath[0]):
        xmldir = args.xmlpath[0]
        if not os.path.isabs(xmldir):
Esempio n. 8
0
def run(args):
    """ Execute the program """
    print()
    print(
        "=============== Checking For Performance Loss =====================")
    dbfile = args.db[0]

    if not os.path.exists(dbfile):
        print("Database file %s not found.")
        sys.exit(1)

    # Set the database to the one given
    sqlresults.set_database_filename(dbfile)

    # Convert the arguments. Will throw if the user is stupid.
    avg = int(args.avg)
    tol = float(args.tol)
    rev = sqlresults.get_latest_revison()

    print(
        "Comparing the average of the %d revisions before rev. %d. Tolerance of %g %%."
        % (avg, rev, tol))
    if args.verbose: print

    # For limiting the results
    limit = 50 * avg

    names = sqlresults.get_all_test_names("revision = %d" % rev)
    if len(names) == 0:
        print("Error! No tests found at revision number %d.\n" % rev)
        sys.exit(1)

    bad_results = ""
    good_results = ""
    num_same = 0
    num_good = 0
    num_bad = 0
    num_notenoughstats = 0
    # The timing resolution is different across platforms and the faster tests
    # can cause more false positives on the lower-resolution clocks. We'll
    # up the tolerance for those taking less time than 10ms.
    timer_resolution_hi = 0.01
    timer_resolution_lo = 0.0011

    regression_names = []
    speedup_names = []

    for name in names:
        (r, t) = analysis.get_runtime_data(name, x_field='revision')
        r = np.array(r)
        t = np.array(t)

        # this is the timing of the current revision
        current_time = t[r == rev]
        tolerance = tol
        if current_time < timer_resolution_hi:
            # Increase the tolerance to avoid false positives
            if current_time < timer_resolution_lo:
                # Very fast tests are twitchy
                tolerance = 100.0
            else:
                tolerance = 70.0

        # Cut out any times after or = to the current rev
        t = t[r < rev]

        # Keep the latest "avg" #
        t = t[len(t) - avg:]

        if (len(t) == avg):
            # This is the average
            baseline_time = np.mean(t)

            # % speed change
            pct = ((baseline_time / current_time) - 1) * 100

            # Did we fail (slow down too much)
            if pct < -tolerance:
                regression_names.append(name)
            elif pct > tolerance:
                speedup_names.append(name)

    regLinks = [
        "http://builds.mantidproject.org/job/master_performancetests2/Master_branch_performance_tests/{}.htm"
        .format(name) for name in regression_names
    ]
    speedLinks = [
        "http://builds.mantidproject.org/job/master_performancetests2/Master_branch_performance_tests/{}.htm"
        .format(name) for name in speedup_names
    ]
    email = secureemail.SendEmailSecure(args.sender, args.pwd, args.recipient,
                                        regLinks, speedLinks)
    email.send()
Esempio n. 9
0
def run(args):
    """ Execute the program """
    print
    print "=============== Checking For Performance Loss ====================="
    dbfile = args.db[0]

    if not os.path.exists(dbfile):
        print "Database file %s not found."
        sys.exit(1)

    # Set the database to the one given
    sqlresults.set_database_filename(dbfile)

    # Convert the arguments. Will throw if the user is stupid.
    avg = int(args.avg)
    tol = float(args.tol)
    rev = sqlresults.get_latest_revison()

    print "Comparing the average of the %d revisions before rev. %d. Tolerance of %g %%." % (
        avg, rev, tol)
    if args.verbose: print

    # For limiting the results
    limit = 50 * avg

    names = sqlresults.get_all_test_names("revision = %d" % rev)
    if len(names) == 0:
        print "Error! No tests found at revision number %d.\n" % rev
        sys.exit(1)

    bad_results = ""
    good_results = ""
    num_same = 0
    num_good = 0
    num_bad = 0
    num_notenoughstats = 0
    # The timing resolution is different across platforms and the faster tests
    # can cause more false positives on the lower-resolution clocks. We'll
    # up the tolerance for those taking less time than 10ms.
    timer_resolution_hi = 0.01
    timer_resolution_lo = 0.0011

    for name in names:
        (r, t) = analysis.get_runtime_data(name, x_field='revision')
        r = np.array(r)
        t = np.array(t)

        # this is the timing of the current revision
        current_time = t[r == rev]
        tolerance = tol
        if current_time < timer_resolution_hi:
            # Increase the tolerance to avoid false positives
            if current_time < timer_resolution_lo:
                # Very fast tests are twitchy
                tolerance = 100.0
            else:
                tolerance = 70.0
            print "%s is fast, tolerance has been increased to %f" % (
                name, tolerance)

        # Cut out any times after or = to the current rev
        t = t[r < rev]

        # Keep the latest "avg" #
        t = t[len(t) - avg:]

        if (len(t) == avg):
            # This is the average
            baseline_time = np.mean(t)

            # % speed change
            pct = ((baseline_time / current_time) - 1) * 100

            timing_str = "was %8.3f s, now %8.3f s. Speed changed by %+8.1f %%." % (
                baseline_time, current_time, pct)
            if args.verbose:
                print "%s" % name
                print "   %s" % timing_str

            # Did we fail (slow down too much)
            if pct < -tolerance:
                bad_results += "Warning! Slow down in performance test %s\n" % name
                bad_results += "    (%s)\n" % timing_str
                num_bad += 1

            # Hey you got better!
            elif pct > tolerance:
                good_results += "Congratulations! You sped up the performance of test %s\n" % name
                good_results += "    (%s)\n" % timing_str
                num_good += 1
            # No change
            else:
                num_same += 1

        else:
            # Not enough stats
            num_notenoughstats += 1
            if args.verbose:
                print "%s" % name
                print "   Insufficient statistics."

    np.random.seed()

    def waswere(num):
        if num > 1 or num == 0:
            return "were"
        else:
            return "was"

    print
    print "-------- Summary ---------"
    print
    print "Out of %d tests, %d %s the same speed, %d %s faster, and %d %s slower." % (
        len(names), num_same, waswere(num_same), num_good, waswere(num_good),
        num_bad, waswere(num_bad))
    if (num_notenoughstats > 0):
        print "%d test(s) did not have a history of %d previous revisions and were not compared." % (
            num_notenoughstats, avg)
    print
    if num_good > 0:
        print good_results
    if num_bad > 0:
        print bad_results
        quips = [
            "Bad programmer! No cookie!", "Tsk. Tsk. Tsk.", "How could you!?",
            "Now get back in there and fix it!",
            "What did you do? WHAT DID YOU DO!?!"
        ]
        print quips[np.random.randint(len(quips))]
        print
Esempio n. 10
0
def run(args):
    """ Execute the program """
    print
    print "=============== Checking For Performance Loss ====================="
    dbfile = args.db[0]
    
    if not os.path.exists(dbfile):
        print "Database file %s not found."
        sys.exit(1)
    
    # Set the database to the one given
    sqlresults.set_database_filename(dbfile);
    
    # Convert the arguments. Will throw if the user is stupid.
    avg = int(args.avg)
    tol = float(args.tol)
    rev = int(args.revision[0])

    print "Comparing the average of the %d revisions before rev. %d. Tolerance of %g %%." % (avg, rev, tol)
    if args.verbose: print
    
    # For limiting the results
    limit = 50*avg;
    
    names = sqlresults.get_all_test_names("revision = %d" % rev)
    if len(names) == 0:
        print "Error! No tests found at revision number %d.\n" % rev
        sys.exit(1)
        
    bad_results = ""
    good_results = ""
    num_same = 0
    num_good = 0
    num_bad = 0
    num_notenoughstats=0
    
    for name in names:
        (r, t) = analysis.get_runtime_data(name, x_field='revision')
        r = np.array(r)
        t = np.array(t)
        
        # this is the timing of the current revision
        current_time = t[r == rev]
        
        # Cut out any times after or = to the current rev
        t = t[r < rev]
        
        # Keep the latest "avg" #
        t = t[len(t)-avg:]
        
        if (len(t) == avg):
            # This is the average
            baseline_time = np.mean(t)
            
            # % speed change
            pct = ((baseline_time / current_time) - 1) * 100
                          
            timing_str = "was %8.3f s, now %8.3f s. Speed changed by %+8.1f %%." % (baseline_time, current_time, pct)
            if args.verbose:            
                print "%s" % name
                print "   %s" % timing_str
                
            # Did we fail (slow down too much)
            if pct < -tol:
                bad_results += "Warning! Slow down in performance test %s\n" % name
                bad_results += "    (%s)\n" % timing_str
                num_bad += 1
            
            # Hey you got better!
            elif pct > tol:
                good_results += "Congratulations! You sped up the performance of test %s\n" % name
                good_results += "    (%s)\n" % timing_str
                num_good += 1
            # No change
            else:
                num_same += 1
                
        else:
            # Not enough stats
            num_notenoughstats += 1
            if args.verbose:            
                print "%s" % name
                print "   Insufficient statistics."
            
    np.random.seed()
    
    def waswere(num):
        if num > 1 or num==0: 
            return "were"
        else:
            return "was"
    
    print
    print "-------- Summary ---------"
    print
    print "Out of %d tests, %d %s the same speed, %d %s faster, and %d %s slower." % (len(names), num_same, waswere(num_same),  num_good, waswere(num_good),  num_bad, waswere(num_bad))
    if (num_notenoughstats > 0):
        print "%d test(s) did not have a history of %d previous revisions and were not compared." % (num_notenoughstats, avg)
    print
    if num_good > 0:
        print good_results
    if num_bad > 0:
        print bad_results
        quips = ["Bad programmer! No cookie!", "Tsk. Tsk. Tsk.", "How could you!?", "Now get back in there and fix it!", "What did you do? WHAT DID YOU DO!?!"]
        print quips[np.random.randint(len(quips))]
        print