Exemple #1
0
def parse_ftrace(filename, callback):
    fid = ftrace_open(filename)
    progress = ProgressDialog(title="ftrace",
                              message="loading %s..." %
                              (os.path.basename(filename)),
                              max=100,
                              show_time=True,
                              can_cancel=True)
    progress.open()
    try:
        fid.seek(0, 2)
    except ValueError:
        # gzip do not support seek end
        # do we uncompress everything. :-/
        # parsing is already far slower than uncompressing.
        while fid.read(1024):
            pass
    totsize = fid.tell()
    fid.seek(0, 0)
    last_percent = 0
    # the base regular expressions
    event_re = re.compile(
        r'\s*(.+)-([0-9]+)\s+\[([0-9]+)\][^:]*\s+([0-9.]+): ([^:]*): (.*)')
    function_re = re.compile(
        r'\s*(.+)-([0-9]+)\s+\[([0-9]+)\][^:]*\s+([0-9.]+): (.*) <-(.*)')
    last_timestamp = 0
    linenumber = 0
    for line in fid:
        ### print("DEBUG LINE: ",linenumber, line)
        percent = int(fid.tell() * 100. / totsize)
        if percent != last_percent:
            last_percent = percent
            (cont, skip) = progress.update(percent)
            if not cont or skip:
                break
        linenumber += 1
        line = line.rstrip()
        res = event_re.match(line.decode('utf-8'))
        if res:
            groups = res.groups()
            event_name = groups[4]
            event = {
                'linenumber': linenumber,
                'common_comm': groups[0],
                'common_pid': int(groups[1]),
                'common_cpu': int(groups[2]),
                'timestamp': int(float(groups[3]) * 1000000),
                'event': event_name,
                'event_arg': groups[5]
            }
            last_timestamp = event['timestamp']
            to_match = event['event_arg']
            try:
                for name, regex, func in events_re[event_name]:
                    res = regex.search(to_match)
                    if res:
                        func(event, res.groups())
            except KeyError:
                pass
            callback(Event(event))
            continue

        res = event_re.match(line.decode('utf-8'))
        if res:
            event = {
                'linenumber': linenumber,
                'common_comm': res.group(1),
                'common_pid': int(res.group(2)),
                'common_cpu': int(res.group(3)),
                'timestamp': int(float(res.group(4)) * 1000000),
                'event': 'function',
                'callee': res.group(5),
                'caller': res.group(6)
            }
            callback(Event(event))
            continue
    fid.close()
Exemple #2
0
    def finish_parsing(self):
        #put generated data in unresizable numpy format
        c_states = []
        i=0
        for tc in self.tmp_c_states:
            t = tcIdleState(name='cpu%d'%(i))
            while len(tc['start_ts'])>len(tc['end_ts']):
                tc['end_ts'].append(tc['start_ts'][-1])
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            c_states.append(t)
            i+=1
        self.c_states=c_states
        i=0
        p_states = []
        for tc in self.tmp_p_states:
            t = tcFrequencyState(name='cpu%d'%(i))
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            i+=1
            p_states.append(t)
        self.wake_events = numpy.array(self.wake_events,dtype=[('waker',tuple),('wakee',tuple),('time','uint64')])
        self.p_states=p_states
        processes = []
        last_ts = 0
        for pid,comm in self.tmp_process:
            tc = self.tmp_process[pid,comm]
            if len(tc['end_ts'])>0 and last_ts < tc['end_ts'][-1]:
                last_ts = tc['end_ts'][-1]
        if len(self.tmp_process) >0:
            progress = ProgressDialog(title="precomputing data", message="precomputing overview data...", max=len(self.tmp_process), show_time=False, can_cancel=False)
            progress.open()
        i = 0
        for pid,comm in self.tmp_process:
            tc = self.tmp_process[pid,comm]
            if tc['type'] in self.process_types:
                klass, order = self.process_types[tc['type']]
                t = klass(pid=pid,comm=tc['comm'],project=self)
            else:
                t = tcProcess(pid=pid,comm=comm,project=self)
            while len(tc['start_ts'])>len(tc['end_ts']):
                tc['end_ts'].append(last_ts)
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            t.cpus = numpy.array(tc['cpus'])
            t.comments = tc['comments'] #numpy.array(tc['comments'])
            t.process_type = tc["type"]
            # precompute 16 levels of overview cache
            t.get_overview_ts(1<<16)
            processes.append(t)
            progress.update(i)
            i += 1
        if len(self.tmp_process) > 0:
            progress.close()
            self.tmp_process = []
        def cmp_process(x,y):
            # sort process by type, pid, comm
            def type_index(t):
                try:
                    return self.process_types[t][1]
                except ValueError:
                    return len(order)+1
            c = cmp(type_index(x.process_type),type_index(y.process_type))
            if c != 0:
                return c
            c = cmp(x.pid,y.pid)
            if c != 0:
                return c
            c = cmp(x.comm,y.comm)
            return c

        ### ??? processes.sort(cmp_process)
        self.processes = processes
        self.p_states=p_states
        self.tmp_c_states = []
        self.tmp_p_states = []
        self.tmp_process = {}