Ejemplo n.º 1
0
def main(results_file):
    summary = {}
    with open(results_file, 'r') as f:
        summary = json.loads(f.read())
        summary['directory'] = os.path.dirname(results_file)

    date = datetime.datetime.fromtimestamp(
        summary['timestamp']).strftime('%Y-%m-%d-%H:%M:%S')

    token_create_rps = summary['token_creation']['requests_per_second']
    token_create_tps = summary['token_creation']['time_per_request']
    token_validate_rps = summary['token_validation']['requests_per_second']
    token_validate_tps = summary['token_validation']['time_per_request']

    index = e.HTML(
        e.HEAD(e.LINK(rel='stylesheet', type='text/css', href='theme.css'),
               e.TITLE('OpenStack Keystone Performance')),
        e.BODY(
            e.DIV(e.H1('OpenStack Keystone Performance'),
                  e.P('Published reports after each merged patch.',
                      CLASS('subtitle')),
                  id='header'),
            e.DIV(
                e.P('Last run date: ' + date, ),
                e.P(
                    'keystone SHA: ',
                    e.A(summary['sha'],
                        target='_blank',
                        href=KEYSTONE_LINK + summary['sha'])),
                e.P(
                    'os_keystone SHA: ',
                    e.A(summary['osa_sha'],
                        target='_blank',
                        href=OSA_LINK + summary['osa_sha'])),
                e.P(e.A('Performance Data', href=PERF_LINK, target='_blank')),
                e.DIV(CLASS('left'), e.H2('Create Token'),
                      e.P(e.STRONG(token_create_rps), ' requests per second'),
                      e.P(e.STRONG(token_create_tps), ' ms per request')),
                e.DIV(
                    CLASS('right'), e.H2('Validate Token'),
                    e.P(e.STRONG(token_validate_rps), ' requests per second'),
                    e.P(e.STRONG(token_validate_tps), ' ms per request')),
                id='content'),
            e.DIV(e.P(
                'Results provided by the ',
                e.A('OSIC Performance Bot', target='_blank', href=BOT_LINK)),
                  id='footer')))

    with open(os.path.join(summary['directory'], 'index.html'), 'w') as f:
        f.write(et.tostring(index))
Ejemplo n.º 2
0
    def insert_iref_index(self):
        # Write the heading
        self.write_heading('Index', autoAnchor='rfc.index')
        table = E.TABLE()
        # Sort iref items alphabetically, store by first letter 
        alpha_bucket = {}
        keys = list(self._iref_index.keys())
        keys.sort(key=str.upper)
        for key in keys:
            letter = key[0].upper()
            if letter in alpha_bucket:
                alpha_bucket[letter].append(key)
            else:
                alpha_bucket[letter] = [key]
        for letter in sorted(alpha_bucket.keys()):
            # Add letter element
            table.append(E.TR(E.TD(E.STRONG(letter))))
            for item in sorted(alpha_bucket[letter]):
                # Add item element
                anchor = self._iref_index[item].anchor or ''
                if anchor:
                    anchor = '#' + anchor
                    td = E.TD(E.A(item, href=anchor))
                else:
                    td = E.TD(item)
                table.append(E.TR(E.TD(' '), td))
                subkeys = list(self._iref_index[item].subitems.keys())
                subkeys.sort(key=str.upper)
                for name in subkeys:
                    subitem = self._iref_index[item].subitems[name]
                    # Add subitem element
                    td = E.TD()
                    td.text = (u'\u00a0\u00a0')  # Spaces
                    anchor = subitem.anchor or ''
                    anchor = '#' + anchor
                    td.append(E.A(name, href=anchor))
                    table.append(E.TR(E.TD(' '), td))

        self.buf.append(self._serialize(table))
Ejemplo n.º 3
0
def pr_table(token, jira_url, delta):
    u"""
    Return an Element that renders all changes in `delta` as a table listing merged PRs.abs

    Arguments:
        token: The github token to access the github API with.
        jira_url: The base url of the JIRA instance to link JIRA tickets to.
        delta (VersionDelta): The AMIs to compare.
    """
    version = delta.new or delta.base
    match = re.search(u"github.com/(?P<org>[^/]*)/(?P<repo>.*)", version.repo)
    api = GitHubAPI(match.group(u'org'), match.group(u'repo'), token)

    try:
        prs = api.get_pr_range(delta.base.sha, delta.new.sha)

        change_details = E.TABLE(
            E.CLASS(u"wrapped"),
            E.TBODY(
                E.TR(
                    E.TH(u"Merged By"),
                    E.TH(u"Author"),
                    E.TH(u"Title"),
                    E.TH(u"PR"),
                    E.TH(u"JIRA"),
                    E.TH(u"Release Notes?"),
                ), *[
                    E.TR(
                        E.TD(
                            E.A(
                                pull_request.merged_by.login,
                                href=pull_request.merged_by.html_url,
                            )),
                        E.TD(
                            E.A(
                                pull_request.user.login,
                                href=pull_request.user.html_url,
                            )),
                        E.TD(pull_request.title),
                        E.TD(
                            E.A(
                                str(pull_request.number),
                                href=pull_request.html_url,
                            )),
                        E.TD(
                            format_jira_references(jira_url,
                                                   pull_request.body)),
                        E.TD(u""),
                    ) for pull_request in sorted(
                        prs, key=lambda pr: pr.merged_by.login)
                ]))
    except Exception:  # pylint: disable=broad-except
        LOGGER.exception(u'Unable to get PRs for %r', delta)
        change_details = E.P("Unable to list changes")

    return SECTION(
        E.H3(u"Changes for {} (".format(delta.app),
             E.A(GITHUB_PREFIX.sub(u'', version.repo), href=version.repo),
             ")"),
        E.P(E.STRONG(u"Before: "),
            E.A(delta.base.sha, href=format_commit_url(delta.base))),
        E.P(E.STRONG(u"After: "),
            E.A(delta.new.sha, href=format_commit_url(delta.new))),
        change_details,
    )
Ejemplo n.º 4
0
    def _expand_ref(self, element):
        """ Return a list of HTML elements that represent the reference """   
        if element.tag == 'xref':
            target = element.attrib.get('target', '')
            format = element.attrib.get('format', self.defaults['xref_format'])
            item = self._getItemByAnchor(target)
            if not self.indexmode:
                if not item:
                    xml2rfc.log.warn("Can't resolve xref target %s" % target)
                else:
                    item.used = True
            # Create xref from index lookup
            if not item:
                text = '[' + target + ']'
            elif format == 'none':
                text = ''
            elif format == 'counter':
                text = item.counter
            elif format == 'title':
                text = item.title.strip() if item.title else ''
            else:
                # Default
                text = item.autoName

            # following the V3 HTML -
            #  If you specify text, that is what you get.
            if element.text:
                text = element.text.rstrip()
             
            a = E.A(href='#' + target)
            a.attrib["class"] = "xref"
            a.text = text
            if element.tail:
                a.tail = element.tail
            
            return [a]

        elif element.tag == 'eref':
            target = element.attrib.get('target', '')
            if element.text:
                a = E.A(element.text, href=target)
                a.tail = element.tail
                return [a]
            else:
                sp1 = E.SPAN('<')
                a = E.A(target, href=target)
                sp2 = E.SPAN('>')
                sp2.tail = element.tail
                return [sp1, a, sp2]
        elif element.tag == 'cref':
            self.cref_counter += 1
            anchor = element.attrib.get('anchor', None)
            if anchor is None:
                anchor = 'CREF' + str(self.cref_counter)
            a = E.A('[' + anchor + ']', id=anchor)
            a.attrib['class'] = 'info'
            source = element.attrib.get('source', '')
            if source:
                source = source + ": "
            b = E.SPAN(source + element.text)
            b.attrib['class'] = 'info'
            a.append( b )
            self._indexCref(self.cref_counter, anchor)
            if element.tail:
                a.tail = element.tail
            return [a]
        elif element.tag == 'iref':
            return self._add_iref_to_index(element)
        elif element.tag == 'spanx':
            style = element.attrib.get('style', self.defaults['spanx_style'])
            text = ''
            if element.text:
                text = element.text
            elem = None
            if style == 'strong':
                elem = E.STRONG(text)
            elif style == 'verb':
                elem = E.SAMP(text)
            else:
                # Default to style=emph
                elem = E.EM(text)
            if element.tail:
                elem.tail = element.tail
            return [elem]
Ejemplo n.º 5
0
def print_help_page():
    with open('rum_profile/help.html', 'w') as f:
        contents = E.DIV(
            E.P("""
This shows the amount of time spent on each step for one or more RUM
jobs. You should be able to use the output to gain some insight into
the performance of a single job, or to compare the running times for
two or more jobs in order to understand the effects of changes made to
the RUM code, to the input data, or to the system on which RUM was
run."""),
            E.P("""
The times for a single job are are compiled by parsing the log files
after the job is finished. For each step, we consider the duration to
be from the time the script was actually invoked to the time the
script exited. This means that any latency caused by the cluster's
scheduling system will not be reflected in the times."""),
            E.P("""
"CPU time" for a step measures the total time for the step across all
chunks. If you have grouped multiple jobs together, the CPU time
reported here is the median value for all the jobs. This is intended
to show the total amount of computing time the job would take for a
typical invocation. The total CPU time for the job is the sum of all
these median values.
"""),
            E.P("""
"Wallclock time" for a step is the time of the chunk that took the
longest time to complete the step. We use the maximum value in order
model the worst-case scenario, where one of the chunks takes much
longer than the other chunks, and becomes the limiting factor for the
running time of the whole job. If you have grouped jobs together,
wallclock time is the median value for all jobs. This is intended to
show the maximum duration of a typical invocation of the job."""),
            E.P("""
The times for all steps are highlighted in varying shades of yelloq in
order to indicate each step's impact on the total running time,
relative to the other steps. The step with the longest time is pure
yellow, the step with the shortest time is pure white, and the colors
for the other steps are scaled linearly according to the running
time. This is intended to allow you to quickly identify hot spots, or
steps that took a very long time compared to other steps, and which
might benefit from optimization or parameter tuning.
"""),
            E.P("""
If you have run two or more job groups, we use the first group as a
baseline and compare the running time of all the other jobs to the
baseline. The "improvement" for a step shows the number of hours saved
relative to the baseline. The percent improvement is the improvement
divided by the total running time for the baseline job. This is
intended to show the impact that improving the running time of one
step has on the running time of the entire job. For example suppose
the baseline job took 100 hours, 30 of which were spent on the "Run
BLAT" step, and that the "Run BLAT" step in the comparison job took
only 20 hours. The improvement is 30 - 20 = 10 hours, and the percent
improvement is (30 - 20) / 100 = 10%.  If a step in the new job is
slower, the improvement and percent improvement will be negative."""),
            E.P("""
The improvement for each step is colored green or red according to the
degree to which that step improved or degraded the performance
compared to the baseline."""),
            E.P(
                E.STRONG("Note:"), """
These numbers do not include the "preprocessing" phase at all. Prior
to RUM 2.0.3, it is difficult to determine from the log files exactly
when pre-processing begins and ends. RUM 2.0.3 and greater will
clearly include these times in the log file, so future versions of the
profiling program will be able to incorporate times for
preprocessing."""))

        help_page = template('help', contents)
        f.write(lxml.html.tostring(help_page))