def _maybe_profile(event):
    """Setup profiling as requested.

    If profiling is enabled, start a profiler for this thread. If memory
    profiling is requested, save the VSS and RSS.

    If already profiling, this is a no-op.
    """
    try:
        if _profilers.profiling:
            # Already profiling - e.g. called in from both start_request and
            # before_traverse, or by successive before_traverse on one
            # request.
            return
    except AttributeError:
        # The first call in on a new thread cannot be profiling at the start.
        pass
    # If this assertion has reason to fail, we'll need to add code
    # to try and stop the profiler before we delete it, in case it is
    # still running.
    assert _profilers.profiler is None
    actions = get_desired_profile_actions(event.request)
    if config.profiling.profile_all_requests:
        actions['callgrind'] = ''
    if config.profiling.memory_profile_log:
        actions['memory_profile_start'] = (memory(), resident())
    if actions:
        if 'sql' in actions:
            condition = actions['sql']
            if condition not in (True, False):
                condition = condition['condition']
            da.start_sql_logging(condition)
        if 'show' in actions or available_profilers.intersection(actions):
            _profilers.profiler = Profiler()
            _profilers.profiler.start()
        _profilers.profiling = True
        _profilers.actions = actions
def end_request(event):
    """If profiling is turned on, save profile data for the request."""
    try:
        if not _profilers.profiling:
            return
        _profilers.profiling = False
    except AttributeError:
        # Some tests don't go through all the proper motions, like firing off
        # a start request event.  Just be quiet about it.
        return
    actions = _profilers.actions
    _profilers.actions = None
    request = event.request
    # Create a timestamp including milliseconds.
    now = datetime.fromtimestamp(da.get_request_start_time())
    timestamp = "%s.%d" % (
        now.strftime('%Y-%m-%d_%H:%M:%S'), int(now.microsecond / 1000.0))
    pageid = request._orig_env.get('launchpad.pageid', 'Unknown')
    oopsid = getattr(request, 'oopsid', None)
    content_type = request.response.getHeader('content-type')
    if content_type is None:
        content_type_params = {}
        is_html = False
    else:
        _major, _minor, content_type_params = parse(content_type)
        is_html = _major == 'text' and _minor == 'html'
    template_context = {
        'actions': dict((key, True) for key in actions),
        'always_log': config.profiling.profile_all_requests}
    dump_path = config.profiling.profile_dir
    if _profilers.profiler is not None:
        prof_stats = _profilers.profiler.stop()
        # Free some memory (at least for the BzrProfiler).
        _profilers.profiler = None
        if oopsid is None:
            # Log an OOPS to get a log of the SQL queries, and other
            # useful information,  together with the profiling
            # information.
            info = (ProfilingOops, None, None)
            error_utility = getUtility(IErrorReportingUtility)
            oops_report = error_utility.raising(info, request)
            oopsid = oops_report['id']
        else:
            oops_report = request.oops
        filename = '%s-%s-%s-%s' % (
            timestamp, pageid, oopsid,
            threading.currentThread().getName())
        if 'callgrind' in actions:
            # The stats class looks at the filename to know to use
            # callgrind syntax.
            callgrind_path = os.path.join(
                dump_path, 'callgrind.out.' + filename)
            prof_stats.save(callgrind_path, callgrind=True)
            template_context['callgrind_path'] = os.path.abspath(
                callgrind_path)
        if 'pstats' in actions:
            pstats_path = os.path.join(
                dump_path, filename + '.prof')
            prof_stats.save(pstats_path)
            template_context['pstats_path'] = os.path.abspath(
                pstats_path)
        if is_html and 'show' in actions:
            # Generate rfc822 OOPS result (might be nice to have an html
            # serializer..).
            template_context['oops'] = ''.join(
                oops_datedir_repo.serializer_rfc822.to_chunks(oops_report))
            # Generate profile summaries.
            prof_stats.strip_dirs()
            for name in ('time', 'cumulative', 'calls'):
                prof_stats.sort(name)
                f = StringIO.StringIO()
                prof_stats.pprint(file=f)
                template_context[name] = f.getvalue()
        template_context['profile_count'] = prof_stats.count
        template_context['multiple_profiles'] = prof_stats.count > 1
        # Try to free some more memory.
        del prof_stats
    # Dump memory profiling info.
    if 'memory_profile_start' in actions:
        log = file(config.profiling.memory_profile_log, 'a')
        vss_start, rss_start = actions.pop('memory_profile_start')
        vss_end, rss_end = memory(), resident()
        if oopsid is None:
            oopsid = '-'
        log.write('%s %s %s %f %d %d %d %d\n' % (
            timestamp, pageid, oopsid, da.get_request_duration(),
            vss_start, rss_start, vss_end, rss_end))
        log.close()
    trace = None
    if 'sql' in actions:
        trace = da.stop_sql_logging() or ()
        # The trace is a list of dicts, each with the keys "sql" and "stack".
        # "sql" is a tuple of start time, stop time, database name (with a
        # "SQL-" prefix), sql statement and possibly a backtrace (backtraces
        # are included when the timeline storm trace is installed).  "stack" is
        # None or a tuple of filename, line number, function name, text, module
        # name, optional supplement dict, and optional info string.  The
        # supplement dict has keys 'source_url', 'line', 'column',
        # 'expression', 'warnings' (an iterable), and 'extra', any of which may
        # be None.
        top_sql = []
        top_python = []
        triggers = {}
        ix = 1  # We display these, so start at 1, not 0.
        last_stop_time = 0
        _heappushpop = heapq.heappushpop  # This is an optimization.
        for step in trace:
            # Set up an identifier for each trace step.
            step['id'] = ix
            step['sql'] = dict(zip(
                ('start', 'stop', 'name', 'statement', 'backtrace'),
                step['sql']))
            # NB: step['sql']['backtrace'] is a string backtrace, not a live
            # stack: less flexible than what the profiler gathers, but cheaper
            # to keep alive as it holds no references. For now, its not usable
            # here.
            if step['stack'] is not None:
                # Divide up the stack into the more unique (app) and less
                # unique (db) bits.
                app_stack = step['app_stack'] = []
                db_stack = step['db_stack'] = []
                storm_found = False
                for f in step['stack']:
                    f_data = dict(zip(
                        ('filename', 'lineno', 'name', 'line', 'module',
                         'supplement', 'info'), f))
                    storm_found = storm_found or (
                        f_data['module'] and
                        f_data['module'].startswith('storm.'))
                    if storm_found:
                        db_stack.append(f_data)
                    else:
                        app_stack.append(f_data)
                # Begin to gather what app code is triggering the most SQL
                # calls.
                trigger_key = tuple(
                    (f['filename'], f['lineno']) for f in app_stack)
                if trigger_key not in triggers:
                    triggers[trigger_key] = []
                triggers[trigger_key].append(ix)
            # Get the nbest (n=10) sql and python times
            step['python_time'] = step['sql']['start'] - last_stop_time
            step['sql_time'] = step['sql']['stop'] - step['sql']['start']
            python_data = (step['python_time'], ix, step)
            sql_data = (step['sql_time'], ix, step)
            if ix < 10:
                top_sql.append(sql_data)
                top_python.append(python_data)
            else:
                if ix == 10:
                    heapq.heapify(top_sql)
                    heapq.heapify(top_python)
                _heappushpop(top_sql, sql_data)
                _heappushpop(top_python, python_data)
            # Reset for next loop.
            last_stop_time = step['sql']['stop']
            ix += 1
        # Finish setting up top sql and python times.
        top_sql.sort(reverse=True)
        top_python.sort(reverse=True)
        top_sql_ids = []
        for rank, (key, ix, step) in enumerate(top_sql):
            step['sql_rank'] = rank + 1
            step['sql_class'] = (
                'sql_danger' if key > 500 else
                'sql_warning' if key > 100 else None)
            top_sql_ids.append(dict(
                value=key,
                ix=ix,
                rank=step['sql_rank'],
                cls=step['sql_class']))
        top_python_ids = []
        for rank, (key, ix, step) in enumerate(top_python):
            step['python_rank'] = rank + 1
            step['python_class'] = (
                'python_danger' if key > 500 else
                'python_warning' if key > 100 else None)
            top_python_ids.append(dict(
                value=key,
                ix=ix,
                rank=step['python_rank'],
                cls=step['python_class']))
        # Identify the repeated Python calls that generated SQL.
        triggers = triggers.items()
        triggers.sort(key=lambda x: len(x[1]))
        triggers.reverse()
        top_triggers = []
        for (key, ixs) in triggers:
            if len(ixs) == 1:
                break
            info = trace[ixs[0] - 1]['app_stack'][-1].copy()
            info['indexes'] = ixs
            info['count'] = len(ixs)
            top_triggers.append(info)
        template_context.update(dict(
            sqltrace=trace,
            top_sql=top_sql_ids,
            top_python=top_python_ids,
            top_triggers=top_triggers,
            sql_count=len(trace)))
        if actions['sql'] is True:
            template_context['sql_traceback_all'] = True
        elif actions['sql'] is False:
            template_context['sql_traceback_none'] = True
        else:
            conditions = actions['sql'].copy()
            del conditions['condition']
            template_context['sql_traceback_conditions'] = conditions
    template_context['dump_path'] = os.path.abspath(dump_path)
    if actions and is_html:
        # Hack the new HTML in at the end of the page.
        encoding = content_type_params.get('charset', 'utf-8')
        try:
            added_html = template(**template_context).encode(encoding)
        except (SystemExit, KeyboardInterrupt):
            raise
        except:
            error = ''.join(format_exception(*sys.exc_info(), as_html=True))
            added_html = (
                '<div class="profiling_info">' + error + '</div>')
        existing_html = request.response.consumeBody()
        e_start, e_close_body, e_end = existing_html.rpartition(
            '</body>')
        new_html = ''.join(
            (e_start, added_html, e_close_body, e_end))
        request.response.setResult(new_html)