Ejemplo n.º 1
0
def generate_html_subproject_report(path,
                                    last_num,
                                    x_field='revision',
                                    starts_with=""):
    """ HTML report for a subproject set of tests.

    starts_with : the prefix of the test name

    Returns: (filename saved, HTML for a page with ALL figures in it)
    """
    basedir = os.path.abspath(path)
    if not os.path.exists(basedir):
        os.mkdir(basedir)

    # Detect if you can do figures
    dofigs = True
    try:
        plt.figure()
    except:
        dofigs = False

    # Start the HTML
    overview_html = DEFAULT_HTML_HEADER
    html = DEFAULT_HTML_HEADER
    html += """<h1>Mantid System Tests: %s</h1>""" % starts_with
    if not dofigs:
        html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""

    # ------ Find the test names of interest ----------------
    # Limit with only those tests that exist in the latest rev
    latest_rev = sqlresults.get_latest_revison()
    temp_names = list(
        sqlresults.get_all_test_names(" revision = %d" % latest_rev))
    # Filter by their start
    test_names = []
    for name in temp_names:
        if name.startswith(starts_with):
            test_names.append(name)

    test_names.sort()

    # --------- Table with the summary of latest results --------
    html += """<h2>Latest Results Summary</h2>"""
    html += get_html_summary_table(test_names)

    # -------- Report for each test ------------------------
    for name in test_names:
        print "Plotting", name
        html += """<hr><h2>%s</h2>\n""" % name
        overview_html += """<hr><h2>%s</h2>\n""" % name

        if dofigs:
            # Only the latest X entries
            imgTagHtml = plot_runtime(False,
                                      True,
                                      path,
                                      name=name,
                                      x_field=x_field,
                                      last_num=last_num)
            divShort = plot_runtime(True,
                                    False,
                                    path,
                                    name=name,
                                    x_field=x_field,
                                    last_num=last_num)
            # Plot all svn times
            divDetailed = plot_runtime(True,
                                       False,
                                       path,
                                       name=name,
                                       x_field=x_field,
                                       last_num=None)

            html += divDetailed + "\n"
            overview_html += imgTagHtml + "\n"

        make_detailed_html_file(basedir, name, divShort, divDetailed, last_num)
        detailed_html = """<br><a href="%s.htm">Detailed test report for %s</a>
        <br><br>
        """ % (name, name)
        html += detailed_html
        overview_html += detailed_html

    html += DEFAULT_HTML_FOOTER
    overview_html += "</body></html>"

    filename = starts_with + ".htm"

    f = open(os.path.join(basedir, filename), "w")
    html = html.replace("\n", os.linesep)  # Fix line endings for windows
    f.write(html)
    f.close()

    return (filename, overview_html)
Ejemplo n.º 2
0
def generate_html_report(path, last_num, x_field='revision'):
    """Make a comprehensive HTML report of runtime history for all tests.
    Parameters
    ----------
        path :: base path to the report folder
        last_num :: in the shorter plot, how many SVN revs to show?
        x_field :: the field to use as the x-axis. 'revision' or 'date' make sense
    """
    basedir = os.path.abspath(path)
    if not os.path.exists(basedir):
        os.mkdir(basedir)

    # Make the CSS file to be used by all HTML
    make_css_file(path)

    # Detect if you can do figures
    dofigs = True
    # --------- Start the HTML --------------
    html = DEFAULT_HTML_HEADER
    html += """<h1>Mantid System Tests Auto-Generated Report</h1>"""
    html += """<p><a href="overview_plot.htm">See an overview of performance plots for all tests by clicking here.</a></p> """
    if not dofigs:
        html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""

    html += """<h2>Run Environment</h2>
    %s
    """ % (make_environment_html(sqlresults.get_latest_result()))

    overview_html = ""

    # ------ Find the test names of interest ----------------
    # Limit with only those tests that exist in the latest rev
    latest_rev = sqlresults.get_latest_revison()
    test_names = list(
        sqlresults.get_all_test_names(" revision = %d" % latest_rev))
    test_names.sort()

    # ------ Find a list of subproject names --------
    subprojects = set()
    for name in test_names:
        n = name.find(".")
        if n > 0:
            subprojects.add(name[:n])
    subprojects = list(subprojects)
    subprojects.sort()
    html += """<h2>Test Subprojects</h2>
    <big>
    <table cellpadding="10">    """

    for subproject in subprojects:
        (filename, this_overview) = generate_html_subproject_report(
            path, last_num, x_field, subproject)
        overview_html += this_overview
        html += """<tr> <td> <a href="%s">%s</a> </td> </tr>
        """ % (filename, subproject)
    html += """</table></big>"""

    # --------- Table with the summary of latest results --------
    html += """<h2>Overall Results Summary</h2>"""
    html += get_html_summary_table(test_names)

    html += DEFAULT_HTML_FOOTER

    f = open(os.path.join(basedir, "report.htm"), "w")
    html = html.replace("\n", os.linesep)  # Fix line endings for windows
    f.write(html)
    f.close()

    # -------- Overview of plots ------------
    f = open(os.path.join(basedir, "overview_plot.htm"), "w")
    overview_html = overview_html.replace(
        "\n", os.linesep)  # Fix line endings for windows
    f.write(overview_html)
    f.close()

    print "Report complete!"
Ejemplo n.º 3
0
def generate_html_report(path, last_num, x_field='revision'):
    """Make a comprehensive HTML report of runtime history for all tests.
    Parameters
    ----------
        path :: base path to the report folder
        last_num :: in the shorter plot, how many SVN revs to show?
        x_field :: the field to use as the x-axis. 'revision' or 'date' make sense
    """
    basedir = os.path.abspath(path)
    if not os.path.exists(basedir):
        os.mkdir(basedir)

    # Make the CSS file to be used by all HTML
    make_css_file(path)

    # Detect if you can do figures
    dofigs = True
    try:
        figure()
    except:
        dofigs = False

    # --------- Start the HTML --------------
    html = default_html_header
    html += """<h1>Mantid System Tests Auto-Generated Report</h1>"""
    html += """<p><a href="overview_plot.htm">See an overview of performance plots for all tests by clicking here.</a></p> """
    if not dofigs:
        html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""

    html += """<h2>Run Environment</h2>
    %s
    """ % ( make_environment_html(sqlresults.get_latest_result()) )

    overview_html = ""

    # ------ Find the test names of interest ----------------
    # Limit with only those tests that exist in the latest rev
    latest_rev = sqlresults.get_latest_revison()
    test_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
    test_names.sort()

    # ------ Find a list of subproject names --------
    subprojects = set()
    for name in test_names:
        n = name.find(".")
        if n > 0:
            subprojects.add( name[:n] )
    subprojects = list(subprojects)
    subprojects.sort()
    html += """<h2>Test Subprojects</h2>
    <big>
    <table cellpadding="10">    """

    for subproject in subprojects:
        (filename, this_overview) = generate_html_subproject_report(path, last_num, x_field, subproject)
        overview_html += this_overview
        html += """<tr> <td> <a href="%s">%s</a> </td> </tr>
        """ % (filename, subproject)
    html += """</table></big>"""

    # --------- Table with the summary of latest results --------
    html += """<h2>Overall Results Summary</h2>"""
    html += get_html_summary_table(test_names)

    # -------- Overall success history graphs ------------
    #if dofigs:
    #    # We report the overall success
    #    fig_path = "OverallSuccess.png"
    #    plot_success_count(type='',last_num=last_num, x_field=x_field)
    #    savefig(os.path.join(basedir, fig_path))
    #    close()
    #
    #    fig_path2 = "OverallSuccess.ALL.png"
    #    plot_success_count(type='',last_num=-1, x_field=x_field)
    #    savefig(os.path.join(basedir, fig_path2))
    #    close()
    #
    #    html += """<h2>Overall Success/Failure</h2>
    #    <img src="%s" />
    #    <img src="%s" />
    #    """ % (fig_path, fig_path2)

    html += default_html_footer

    f = open(os.path.join(basedir, "report.htm"), "w")
    html = html.replace("\n", os.linesep) # Fix line endings for windows
    f.write(html)
    f.close()

    # -------- Overview of plots ------------
    f = open(os.path.join(basedir, "overview_plot.htm"), "w")
    overview_html = overview_html.replace("\n", os.linesep) # Fix line endings for windows
    f.write(overview_html)
    f.close()

    print "Report complete!"
Ejemplo n.º 4
0
def run(args):
    """ Execute the program """
    print
    print "=============== Checking For Performance Loss ====================="
    dbfile = args.db[0]

    if not os.path.exists(dbfile):
        print "Database file %s not found."
        sys.exit(1)

    # Set the database to the one given
    sqlresults.set_database_filename(dbfile);

    # Convert the arguments. Will throw if the user is stupid.
    avg = int(args.avg)
    tol = float(args.tol)
    rev = sqlresults.get_latest_revison()

    print "Comparing the average of the %d revisions before rev. %d. Tolerance of %g %%." % (avg, rev, tol)
    if args.verbose: print

    # For limiting the results
    limit = 50*avg;

    names = sqlresults.get_all_test_names("revision = %d" % rev)
    if len(names) == 0:
        print "Error! No tests found at revision number %d.\n" % rev
        sys.exit(1)

    bad_results = ""
    good_results = ""
    num_same = 0
    num_good = 0
    num_bad = 0
    num_notenoughstats=0
    # The timing resolution is different across platforms and the faster tests
    # can cause more false positives on the lower-resolution clocks. We'll
    # up the tolerance for those taking less time than 10ms.
    timer_resolution_hi = 0.01
    timer_resolution_lo = 0.0011
    
    regression_names = []
    speedup_names = []

    for name in names:
        (r, t) = analysis.get_runtime_data(name, x_field='revision')
        r = np.array(r)
        t = np.array(t)

        # this is the timing of the current revision
        current_time = t[r == rev]
        tolerance = tol
        if current_time < timer_resolution_hi:
            # Increase the tolerance to avoid false positives
            if current_time < timer_resolution_lo:
                # Very fast tests are twitchy
                tolerance = 100.0
            else:
                tolerance = 70.0

        # Cut out any times after or = to the current rev
        t = t[r < rev]

        # Keep the latest "avg" #
        t = t[len(t)-avg:]

        if (len(t) == avg):
            # This is the average
            baseline_time = np.mean(t)

            # % speed change
            pct = ((baseline_time / current_time) - 1) * 100

            # Did we fail (slow down too much)
            if pct < -tolerance:
                regression_names.append(name)
            elif pct > tolerance:
                speedup_names.append(name)
    
    regLinks = ["http://builds.mantidproject.org/job/master_performancetests2/Master_branch_performance_tests/{}.htm".format(name) for name in regression_names]
    speedLinks = ["http://builds.mantidproject.org/job/master_performancetests2/Master_branch_performance_tests/{}.htm".format(name) for name in speedup_names]
    email = secureemail.SendEmailSecure(args.sender, args.pwd, args.recipient, regLinks, speedLinks)
    email.send()
Ejemplo n.º 5
0
def generate_html_subproject_report(path,
                                    last_num,
                                    x_field='revision',
                                    starts_with=""):
    """ HTML report for a subproject set of tests.

    starts_with : the prefix of the test name

    Returns: (filename saved, HTML for a page with ALL figures in it)
    """
    basedir = os.path.abspath(path)
    if not os.path.exists(basedir):
        os.mkdir(basedir)

    # Detect if you can do figures
    dofigs = True
    try:
        figure()
        rcParams['axes.titlesize'] = 'small'
    except:
        dofigs = False

    # Start the HTML
    overview_html = ""

    # ------ Find the test names of interest ----------------
    # Limit with only those tests that exist in the latest rev
    latest_rev = sqlresults.get_latest_revison()
    temp_names = list(
        sqlresults.get_all_test_names(" revision = %d" % latest_rev))
    # Filter by their start
    test_names = []
    for name in temp_names:
        if name.startswith(starts_with):
            test_names.append(name)

    test_names.sort()

    # -------- Report for each test ------------------------
    for name in test_names:
        print("Plotting", name)
        overview_html += """<hr><h2>%s</h2>\n""" % name

        # Path to the figures
        fig1 = "%s.runtime.v.revision.png" % name
        fig2 = "%s.runtime.v.revision.ALL.png" % name
        fig3 = "%s.memory.v.revision.png" % name
        fig4 = "%s.memory.v.revision.ALL.png" % name

        if dofigs:
            # Only the latest X entries
            plot_runtime(name=name, x_field=x_field, last_num=last_num)
            savefig(os.path.join(basedir, fig1))
            close()

            # Plot all svn times
            plot_runtime(name=name, x_field=x_field, last_num=-1)
            savefig(os.path.join(basedir, fig2))
            close()

            # Only the latest X entries
            plot_memory(name=name,
                        x_field=x_field,
                        y_field='memory_change',
                        last_num=last_num)
            savefig(os.path.join(basedir, fig3))
            close()

            # Plot all svn times
            plot_memory(name=name,
                        x_field=x_field,
                        y_field='memory_change',
                        last_num=-1)
            savefig(os.path.join(basedir, fig4))
            close()

            overview_html += """<img src="%s" alt="runtime vs revision number" />""" % (
                fig1)
            overview_html += """<img src="%s" alt="memory vs revision number" />\n""" % (
                fig3)

        make_detailed_html_file(basedir, name, fig1, fig2, fig3, fig4,
                                last_num)
        detailed_html = """<br><a href="%s.htm">Detailed test report for %s</a>
        <br><br>
        """ % (name, name)
        overview_html += detailed_html

    filename = starts_with + ".htm"

    return (filename, overview_html)
Ejemplo n.º 6
0
def generate_html_subproject_report(path, last_num, x_field='revision', starts_with=""):
    """ HTML report for a subproject set of tests.

    starts_with : the prefix of the test name

    Returns: (filename saved, HTML for a page with ALL figures in it)
    """
    basedir = os.path.abspath(path)
    if not os.path.exists(basedir):
        os.mkdir(basedir)


    # Detect if you can do figures
    dofigs = True
    try:
        figure()
    except:
        dofigs = False

    # Start the HTML
    overview_html = ""
    html = default_html_header
    html += """<h1>Mantid System Tests: %s</h1>""" % starts_with
    if not dofigs:
        html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""


    # ------ Find the test names of interest ----------------
    # Limit with only those tests that exist in the latest rev
    latest_rev = sqlresults.get_latest_revison()
    temp_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
    # Filter by their start
    test_names = []
    for name in temp_names:
        if name.startswith(starts_with):
            test_names.append(name)

    test_names.sort()

    # --------- Table with the summary of latest results --------
    html += """<h2>Latest Results Summary</h2>"""
    html += get_html_summary_table(test_names)


    # -------- Report for each test ------------------------
    for name in test_names:
        print "Plotting", name
        html += """<hr><h2>%s</h2>\n""" % name
        overview_html += """<hr><h2>%s</h2>\n""" % name

        # Path to the figures
        fig1 = "%s.runtime.v.revision.png" % name
        fig2 = "%s.runtime.v.revision.ALL.png" % name

        if dofigs:
            # Only the latest X entries
            plot_runtime(name=name,x_field=x_field,last_num=last_num)
            savefig(os.path.join(basedir, fig1))
            close()

            # Plot all svn times
            plot_runtime(name=name,x_field=x_field,last_num=-1)
            savefig(os.path.join(basedir, fig2))
            close()

            #html += """<img src="%s" alt="runtime vs revision number (latest %d entries)" />\n""" % (fig1, last_num)
            html += """<img src="%s" alt="runtime vs revision number" />\n""" % (fig2)
            overview_html +=  """<img src="%s" alt="runtime vs revision number" />\n""" % (fig1)

        make_detailed_html_file(basedir, name, fig1, fig2, last_num)
        detailed_html = """<br><a href="%s.htm">Detailed test report for %s</a>
        <br><br>
        """ % (name, name)
        html += detailed_html
        overview_html +=  detailed_html

    html += default_html_footer

    filename = starts_with + ".htm"

    f = open(os.path.join(basedir,filename), "w")
    html = html.replace("\n", os.linesep) # Fix line endings for windows
    f.write(html)
    f.close()

    return (filename, overview_html)
Ejemplo n.º 7
0
def run(args):
    """ Execute the program """
    print()
    print(
        "=============== Checking For Performance Loss =====================")
    dbfile = args.db[0]

    if not os.path.exists(dbfile):
        print("Database file %s not found.")
        sys.exit(1)

    # Set the database to the one given
    sqlresults.set_database_filename(dbfile)

    # Convert the arguments. Will throw if the user is stupid.
    avg = int(args.avg)
    tol = float(args.tol)
    rev = sqlresults.get_latest_revison()

    print(
        "Comparing the average of the %d revisions before rev. %d. Tolerance of %g %%."
        % (avg, rev, tol))
    if args.verbose: print

    # For limiting the results
    limit = 50 * avg

    names = sqlresults.get_all_test_names("revision = %d" % rev)
    if len(names) == 0:
        print("Error! No tests found at revision number %d.\n" % rev)
        sys.exit(1)

    bad_results = ""
    good_results = ""
    num_same = 0
    num_good = 0
    num_bad = 0
    num_notenoughstats = 0
    # The timing resolution is different across platforms and the faster tests
    # can cause more false positives on the lower-resolution clocks. We'll
    # up the tolerance for those taking less time than 10ms.
    timer_resolution_hi = 0.01
    timer_resolution_lo = 0.0011

    regression_names = []
    speedup_names = []

    for name in names:
        (r, t) = analysis.get_runtime_data(name, x_field='revision')
        r = np.array(r)
        t = np.array(t)

        # this is the timing of the current revision
        current_time = t[r == rev]
        tolerance = tol
        if current_time < timer_resolution_hi:
            # Increase the tolerance to avoid false positives
            if current_time < timer_resolution_lo:
                # Very fast tests are twitchy
                tolerance = 100.0
            else:
                tolerance = 70.0

        # Cut out any times after or = to the current rev
        t = t[r < rev]

        # Keep the latest "avg" #
        t = t[len(t) - avg:]

        if (len(t) == avg):
            # This is the average
            baseline_time = np.mean(t)

            # % speed change
            pct = ((baseline_time / current_time) - 1) * 100

            # Did we fail (slow down too much)
            if pct < -tolerance:
                regression_names.append(name)
            elif pct > tolerance:
                speedup_names.append(name)

    regLinks = [
        "http://builds.mantidproject.org/job/master_performancetests2/Master_branch_performance_tests/{}.htm"
        .format(name) for name in regression_names
    ]
    speedLinks = [
        "http://builds.mantidproject.org/job/master_performancetests2/Master_branch_performance_tests/{}.htm"
        .format(name) for name in speedup_names
    ]
    email = secureemail.SendEmailSecure(args.sender, args.pwd, args.recipient,
                                        regLinks, speedLinks)
    email.send()
Ejemplo n.º 8
0
def generate_html_subproject_report(path, last_num, x_field='revision', starts_with=""):
    """ HTML report for a subproject set of tests.

    starts_with : the prefix of the test name

    Returns: (filename saved, HTML for a page with ALL figures in it)
    """
    basedir = os.path.abspath(path)
    if not os.path.exists(basedir):
        os.mkdir(basedir)

    # Detect if you can do figures
    dofigs = True
    try:
        plt.figure()
    except:
        dofigs = False

    # Start the HTML
    overview_html = DEFAULT_HTML_HEADER
    html = DEFAULT_HTML_HEADER
    html += """<h1>Mantid System Tests: %s</h1>""" % starts_with
    if not dofigs:
        html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""

    # ------ Find the test names of interest ----------------
    # Limit with only those tests that exist in the latest rev
    latest_rev = sqlresults.get_latest_revison()
    temp_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
    # Filter by their start
    test_names = []
    for name in temp_names:
        if name.startswith(starts_with):
            test_names.append(name)

    test_names.sort()

    # --------- Table with the summary of latest results --------
    html += """<h2>Latest Results Summary</h2>"""
    html += get_html_summary_table(test_names)

    # -------- Report for each test ------------------------
    for name in test_names:
        print "Plotting", name
        html += """<hr><h2>%s</h2>\n""" % name
        overview_html += """<hr><h2>%s</h2>\n""" % name

        if dofigs:
            # Only the latest X entries
            imgTagHtml = plot_runtime(False, True, path, name=name, x_field=x_field, last_num=last_num)
            divShort = plot_runtime(True, False, path, name=name, x_field=x_field, last_num=last_num)
            # Plot all svn times
            divDetailed = plot_runtime(True, False, path, name=name, x_field=x_field, last_num=None)

            html += divDetailed + "\n"
            overview_html += imgTagHtml + "\n"

        make_detailed_html_file(basedir, name, divShort, divDetailed, last_num)
        detailed_html = """<br><a href="%s.htm">Detailed test report for %s</a>
        <br><br>
        """ % (name, name)
        html += detailed_html
        overview_html += detailed_html

    html += DEFAULT_HTML_FOOTER
    overview_html += "</body></html>"

    filename = starts_with + ".htm"

    f = open(os.path.join(basedir, filename), "w")
    html = html.replace("\n", os.linesep)  # Fix line endings for windows
    f.write(html)
    f.close()

    return (filename, overview_html)
Ejemplo n.º 9
0
def run(args):
    """ Execute the program """
    print
    print "=============== Checking For Performance Loss ====================="
    dbfile = args.db[0]

    if not os.path.exists(dbfile):
        print "Database file %s not found."
        sys.exit(1)

    # Set the database to the one given
    sqlresults.set_database_filename(dbfile)

    # Convert the arguments. Will throw if the user is stupid.
    avg = int(args.avg)
    tol = float(args.tol)
    rev = sqlresults.get_latest_revison()

    print "Comparing the average of the %d revisions before rev. %d. Tolerance of %g %%." % (
        avg, rev, tol)
    if args.verbose: print

    # For limiting the results
    limit = 50 * avg

    names = sqlresults.get_all_test_names("revision = %d" % rev)
    if len(names) == 0:
        print "Error! No tests found at revision number %d.\n" % rev
        sys.exit(1)

    bad_results = ""
    good_results = ""
    num_same = 0
    num_good = 0
    num_bad = 0
    num_notenoughstats = 0
    # The timing resolution is different across platforms and the faster tests
    # can cause more false positives on the lower-resolution clocks. We'll
    # up the tolerance for those taking less time than 10ms.
    timer_resolution_hi = 0.01
    timer_resolution_lo = 0.0011

    for name in names:
        (r, t) = analysis.get_runtime_data(name, x_field='revision')
        r = np.array(r)
        t = np.array(t)

        # this is the timing of the current revision
        current_time = t[r == rev]
        tolerance = tol
        if current_time < timer_resolution_hi:
            # Increase the tolerance to avoid false positives
            if current_time < timer_resolution_lo:
                # Very fast tests are twitchy
                tolerance = 100.0
            else:
                tolerance = 70.0
            print "%s is fast, tolerance has been increased to %f" % (
                name, tolerance)

        # Cut out any times after or = to the current rev
        t = t[r < rev]

        # Keep the latest "avg" #
        t = t[len(t) - avg:]

        if (len(t) == avg):
            # This is the average
            baseline_time = np.mean(t)

            # % speed change
            pct = ((baseline_time / current_time) - 1) * 100

            timing_str = "was %8.3f s, now %8.3f s. Speed changed by %+8.1f %%." % (
                baseline_time, current_time, pct)
            if args.verbose:
                print "%s" % name
                print "   %s" % timing_str

            # Did we fail (slow down too much)
            if pct < -tolerance:
                bad_results += "Warning! Slow down in performance test %s\n" % name
                bad_results += "    (%s)\n" % timing_str
                num_bad += 1

            # Hey you got better!
            elif pct > tolerance:
                good_results += "Congratulations! You sped up the performance of test %s\n" % name
                good_results += "    (%s)\n" % timing_str
                num_good += 1
            # No change
            else:
                num_same += 1

        else:
            # Not enough stats
            num_notenoughstats += 1
            if args.verbose:
                print "%s" % name
                print "   Insufficient statistics."

    np.random.seed()

    def waswere(num):
        if num > 1 or num == 0:
            return "were"
        else:
            return "was"

    print
    print "-------- Summary ---------"
    print
    print "Out of %d tests, %d %s the same speed, %d %s faster, and %d %s slower." % (
        len(names), num_same, waswere(num_same), num_good, waswere(num_good),
        num_bad, waswere(num_bad))
    if (num_notenoughstats > 0):
        print "%d test(s) did not have a history of %d previous revisions and were not compared." % (
            num_notenoughstats, avg)
    print
    if num_good > 0:
        print good_results
    if num_bad > 0:
        print bad_results
        quips = [
            "Bad programmer! No cookie!", "Tsk. Tsk. Tsk.", "How could you!?",
            "Now get back in there and fix it!",
            "What did you do? WHAT DID YOU DO!?!"
        ]
        print quips[np.random.randint(len(quips))]
        print
Ejemplo n.º 10
0
def run(args):
    """ Execute the program """
    print
    print "=============== Checking For Performance Loss ====================="
    dbfile = args.db[0]
    
    if not os.path.exists(dbfile):
        print "Database file %s not found."
        sys.exit(1)
    
    # Set the database to the one given
    sqlresults.set_database_filename(dbfile);
    
    # Convert the arguments. Will throw if the user is stupid.
    avg = int(args.avg)
    tol = float(args.tol)
    rev = int(args.revision[0])

    print "Comparing the average of the %d revisions before rev. %d. Tolerance of %g %%." % (avg, rev, tol)
    if args.verbose: print
    
    # For limiting the results
    limit = 50*avg;
    
    names = sqlresults.get_all_test_names("revision = %d" % rev)
    if len(names) == 0:
        print "Error! No tests found at revision number %d.\n" % rev
        sys.exit(1)
        
    bad_results = ""
    good_results = ""
    num_same = 0
    num_good = 0
    num_bad = 0
    num_notenoughstats=0
    
    for name in names:
        (r, t) = analysis.get_runtime_data(name, x_field='revision')
        r = np.array(r)
        t = np.array(t)
        
        # this is the timing of the current revision
        current_time = t[r == rev]
        
        # Cut out any times after or = to the current rev
        t = t[r < rev]
        
        # Keep the latest "avg" #
        t = t[len(t)-avg:]
        
        if (len(t) == avg):
            # This is the average
            baseline_time = np.mean(t)
            
            # % speed change
            pct = ((baseline_time / current_time) - 1) * 100
                          
            timing_str = "was %8.3f s, now %8.3f s. Speed changed by %+8.1f %%." % (baseline_time, current_time, pct)
            if args.verbose:            
                print "%s" % name
                print "   %s" % timing_str
                
            # Did we fail (slow down too much)
            if pct < -tol:
                bad_results += "Warning! Slow down in performance test %s\n" % name
                bad_results += "    (%s)\n" % timing_str
                num_bad += 1
            
            # Hey you got better!
            elif pct > tol:
                good_results += "Congratulations! You sped up the performance of test %s\n" % name
                good_results += "    (%s)\n" % timing_str
                num_good += 1
            # No change
            else:
                num_same += 1
                
        else:
            # Not enough stats
            num_notenoughstats += 1
            if args.verbose:            
                print "%s" % name
                print "   Insufficient statistics."
            
    np.random.seed()
    
    def waswere(num):
        if num > 1 or num==0: 
            return "were"
        else:
            return "was"
    
    print
    print "-------- Summary ---------"
    print
    print "Out of %d tests, %d %s the same speed, %d %s faster, and %d %s slower." % (len(names), num_same, waswere(num_same),  num_good, waswere(num_good),  num_bad, waswere(num_bad))
    if (num_notenoughstats > 0):
        print "%d test(s) did not have a history of %d previous revisions and were not compared." % (num_notenoughstats, avg)
    print
    if num_good > 0:
        print good_results
    if num_bad > 0:
        print bad_results
        quips = ["Bad programmer! No cookie!", "Tsk. Tsk. Tsk.", "How could you!?", "Now get back in there and fix it!", "What did you do? WHAT DID YOU DO!?!"]
        print quips[np.random.randint(len(quips))]
        print 
Ejemplo n.º 11
0
def generate_html_subproject_report(path, last_num, x_field='revision', starts_with=""):
    """ HTML report for a subproject set of tests.

    starts_with : the prefix of the test name

    Returns: (filename saved, HTML for a page with ALL figures in it)
    """
    basedir = os.path.abspath(path)
    if not os.path.exists(basedir):
        os.mkdir(basedir)


    # Detect if you can do figures
    dofigs = True
    try:
        figure()
        rcParams['axes.titlesize'] = 'small'
    except:
        dofigs = False

    # Start the HTML
    overview_html = ""

    # ------ Find the test names of interest ----------------
    # Limit with only those tests that exist in the latest rev
    latest_rev = sqlresults.get_latest_revison()
    temp_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
    # Filter by their start
    test_names = []
    for name in temp_names:
        if name.startswith(starts_with):
            test_names.append(name)

    test_names.sort()

    # -------- Report for each test ------------------------
    for name in test_names:
        print("Plotting", name)
        overview_html += """<hr><h2>%s</h2>\n""" % name

        # Path to the figures
        fig1 = "%s.runtime.v.revision.png" % name
        fig2 = "%s.runtime.v.revision.ALL.png" % name
        fig3 = "%s.memory.v.revision.png" % name
        fig4 = "%s.memory.v.revision.ALL.png" % name

        if dofigs:
            # Only the latest X entries
            plot_runtime(name=name,x_field=x_field,last_num=last_num)
            savefig(os.path.join(basedir, fig1))
            close()

            # Plot all svn times
            plot_runtime(name=name,x_field=x_field,last_num=-1)
            savefig(os.path.join(basedir, fig2))
            close()

            # Only the latest X entries
            plot_memory(name=name,x_field=x_field,y_field='memory_change',last_num=last_num)
            savefig(os.path.join(basedir, fig3))
            close()

            # Plot all svn times
            plot_memory(name=name,x_field=x_field,y_field='memory_change',last_num=-1)
            savefig(os.path.join(basedir, fig4))
            close()

            overview_html +=  """<img src="%s" alt="runtime vs revision number" />""" % (fig1)
            overview_html +=  """<img src="%s" alt="memory vs revision number" />\n""" % (fig3)

        make_detailed_html_file(basedir, name, fig1, fig2, fig3, fig4, last_num)
        detailed_html = """<br><a href="%s.htm">Detailed test report for %s</a>
        <br><br>
        """ % (name, name)
        overview_html +=  detailed_html

    filename = starts_with + ".htm"

    return (filename, overview_html)