示例#1
0
def nflxprofile_generate_flame_graph(file_path,
                                     range_start,
                                     range_end,
                                     package_name=False,
                                     flavor='standard'):
    try:
        f = get_file(file_path)
        profile = nflxprofile_pb2.Profile()
        profile.ParseFromString(f.read())
    except TypeError:
        abort(500, 'Failed to parse profile.')
    finally:
        f.close()

    stack_processor = FLAVORS.get(flavor, StackProcessor)

    start_time = profile.start_time
    if range_start is not None:
        range_start = (math.floor(start_time) + range_start)
    if range_end is not None:
        range_end = (math.floor(start_time) + range_end)

    return get_flame_graph(profile, {},
                           range_start=range_start,
                           range_end=range_end,
                           package_name=package_name,
                           stack_processor=stack_processor)
示例#2
0
    def test_nodejs1(self):
        profile = nflxprofile_pb2.Profile()
        with open("test/fixtures/nodejs1.nflxprofile", "rb") as f:
            profile.ParseFromString(f.read())

        expected = None
        with open("test/fixtures/nodejs1.json", "r") as f:
            expected = json.loads(f.read())

        fg = get_flame_graph(profile, None,
                             stack_processor=NodeJsStackProcessor)
        self.assertDictEqual(fg, expected)
示例#3
0
def parse_nodes(nodes):
    profile = nflxprofile_pb2.Profile()
    profile.nodes[0].function_name = 'fakenode'
    profile.nodes[0].hit_count = 0
    for node in nodes:
        node_id = node['id']
        function_name = node['callFrame']['functionName']
        children = node.get('children', None)
        hit_count = node.get('hitCount', 0)
        profile.nodes[node_id].function_name = function_name
        profile.nodes[node_id].hit_count = hit_count
        profile.nodes[node_id].libtype = ''
        if children:
            for child_id in children:
                profile.nodes[node_id].children.append(child_id)
    return profile.nodes
示例#4
0
def nflxprofile_generate_differential_flame_graph(file_path, range_start, range_end):
    try:
        f = get_file(file_path)
        profile = nflxprofile_pb2.Profile()
        profile.ParseFromString(f.read())
    except TypeError:
        abort(500, 'Failed to parse profile.')
    finally:
        f.close()

    start_time = profile.start_time
    if range_start is not None:
        range_start = (math.floor(start_time) + range_start)
    if range_end is not None:
        range_end = (math.floor(start_time) + range_end)

    return generate_flame_graph([profile], [0], [None], range_start, range_end)
示例#5
0
def nflxprofile_readoffsets(file_path):
    try:
        f = get_file(file_path)
        profile = nflxprofile_pb2.Profile()
        profile.ParseFromString(f.read())
    except TypeError:
        abort(500, 'Failed to parse profile.')
    finally:
        f.close()

    offsets = []
    current_time = profile.start_time

    for delta in profile.time_deltas:
        current_time += delta
        offsets.append(current_time)

    res = collections.namedtuple('offsets', ['start', 'end', 'offsets'])(
        profile.start_time, profile.end_time, offsets)
    return res
示例#6
0
def parse(profile):
    profile = get_cpuprofiles(profile)
    # TODO (mmarchini): support multiple profiles
    nodes = profile[0]

    profile = nflxprofile_pb2.Profile()
    profile.nodes[0].function_name = 'root'
    profile.nodes[0].hit_count = 0
    profile.nodes[0].children.append(1)
    profile.params['has_node_stack'] = 'true'

    profile.start_time = nodes['startTime']
    profile.end_time = nodes['endTime']
    profile.end_time = nodes['endTime']

    stacks = _generate_regular_stacks(nodes['nodes'], 1)

    idle_ids = get_idle_ids(nodes['nodes'])

    node_id_cache = []

    profile.time_deltas.append(0)
    for index, node_id in enumerate(nodes['samples']):
        if node_id in idle_ids:
            continue

        if node_id not in node_id_cache:
            stack = stacks[node_id]
            profile.nodes[node_id].function_name = '(root)'
            profile.nodes[node_id].hit_count = 0
            profile.nodes[node_id].stack.extend(stack)
            node_id_cache.append(node_id)

        profile.samples.append(node_id)
        profile.time_deltas.append(nodes['timeDeltas'][index])
        profile.nodes[node_id].hit_count += 1

    return profile
示例#7
0
def parse(data, **extra_options):
    """
    """
    v8_profiles = get_cpuprofiles(data)

    profile = nflxprofile_pb2.Profile()
    profile.nodes[0].function_name = 'root'
    profile.nodes[0].hit_count = 0
    profile.params['has_node_stack'] = 'true'
    profile.params['has_node_pid'] = 'true'
    profile.params['has_samples_pid'] = 'true'

    root_ids = []
    base_ids = []

    profile.start_time = profile.end_time = 0
    next_base_id = 0
    for v8_profile in v8_profiles:
        if profile.start_time == 0:
            profile.start_time = v8_profile['startTime']
        if profile.end_time == 0:
            profile.end_time = v8_profile['endTime']
        profile.start_time = min(profile.start_time, v8_profile['startTime'])
        profile.end_time = max(profile.end_time, v8_profile['endTime'])
        base_ids.append(next_base_id)
        highest_id = 0
        for index, node in enumerate(v8_profile['nodes']):
            highest_id = max(node['id'], highest_id)
        next_base_id += highest_id + 1

    samples = []

    for v8_profile_idx, v8_profile in enumerate(v8_profiles):
        comm = get_comm(v8_profile, v8_profile_idx, **extra_options)
        pid = get_pid(v8_profile, v8_profile_idx, **extra_options)
        base_id = base_ids[v8_profile_idx]

        # TODO(mmarchini): detect root instead of assuming it is 1
        root_ids.append(base_id + 1)
        stacks = _generate_regular_stacks(v8_profile['nodes'], 1)

        idle_ids = get_idle_ids(v8_profile['nodes'])

        node_id_cache = []

        last_timestamp = v8_profile['startTime']
        for index, node_id in enumerate(v8_profile['samples']):
            if node_id in idle_ids:
                continue


            stack = None
            if node_id not in node_id_cache:
                stack = stacks[node_id]
                node_id_cache.append(node_id)

            node_id = base_id + node_id

            if stack:
                profile.nodes[node_id].function_name = comm
                profile.nodes[node_id].pid = pid
                profile.nodes[node_id].hit_count = 0
                profile.nodes[node_id].stack.extend(stack)

            last_timestamp += v8_profile['timeDeltas'][index]

            samples.append((last_timestamp, node_id, pid))
            profile.nodes[node_id].hit_count += 1

    samples = sorted(samples, key=lambda e: e[0])
    last_timestamp = profile.start_time
    profile.start_time = profile.start_time / 1000000.
    profile.end_time = profile.end_time / 1000000.
    for timestamp, node_id, pid in samples:
        profile.samples.append(node_id)
        profile.samples_pid.append(pid)
        delta = (timestamp - last_timestamp) / 1000000.
        profile.time_deltas.append(delta)
        last_timestamp = timestamp

    return profile