Esempio n. 1
0
class tcProcess(tcGeneric):
    name = Property(String) # overide TimeChart
    # start_ts=CArray # inherited from TimeChart
    # end_ts=CArray # inherited from TimeChart
    # values = CArray   # inherited from TimeChart
    pid = Long
    ppid = Long
    selection_time = Long(0)
    selection_pc = Float(0)
    comm = String
    cpus = CArray
    comments = []
    has_comments = Bool(True)
    show = Bool(True)
    process_type = String
    project = None
    @cached_property
    def _get_name(self):
        return "%s:%d (%s)"%(self.comm,self.pid, _pretty_time(self.total_time))

    def get_comment(self,i):
        if len(self.comments)>i:
            return "%s"%(self.comments[int(i)])
        elif len(self.cpus)>i:
            return "%d"%(self.cpus[ int(i) ])
        else:
            return ""
    @cached_property
    def _get_max_latency(self):
        #print("DEBUG -- skip max_latency --- ", __file__)
        from timechart import raise_to_debug
        raise_to_debug(1, __file__)

        if self.pid==0 and self.comm.startswith("irq"):
            return 1000
        else:
            return 10

    @cached_property
    def _get_max_latency_ts(self):
        #print("DEBUG -- skip max_latency_ts ", __file__)
        from timechart import raise_to_debug
        raise_to_debug(1, __file__)

        if self.max_latency > 0:
            indices = np.nonzero((self.end_ts - self.start_ts) > self.max_latency)[0]
            return np.array(sorted([self.start_ts[i] for i in indices]))
        return []

    @cached_property
    def _get_default_bg_color(self):
        #if self.max_latency >0 and max(self.end_ts - self.start_ts)>self.max_latency:
        if True:
            return (1,.1,.1,1)
        return colors.get_traits_color_by_name(self.process_type+"_bg")

    def _get_bg_color(self):
        if self.project != None and self in self.project.selected:
            return  colors.get_traits_color_by_name("selected_bg")
        return self.default_bg_color
Esempio n. 2
0
class TestTraits(HasTraits):
    b = Bool(False)
    i = Int(7)
    l = Long(12345678901234567890)
    f = Float(math.pi)
    c = Complex(complex(1.01234, 2.3))
    n = Any
    s = Str('String')
    u = Unicode(u'Unicode')
    inst = Instance(A)
    tuple = Tuple
    list = List
    pure_list = List(list(range(5)))
    dict = Dict
    numeric = Array(value=numpy.ones((2, 2, 2), 'f'))
    ref = Array
    if TVTK_AVAILABLE:
        _tvtk = Instance(tvtk.Property, ())

    def __init__(self):
        self.inst = A()
        self.tuple = (1, 2, 'a', A())
        self.list = [1, 1.1, 'a', 1j, self.inst]
        self.dict = {'a': 1, 'b': 2, 'ref': self.inst}
        self.ref = self.numeric
Esempio n. 3
0
class PickedData(HasTraits):
    """This class stores the picked data."""

    # Was there a valid picked point?
    valid = Trait(false_bool_trait,
                  desc='specifies the validity of the pick event')
    # Id of picked point (-1 implies none was picked)
    point_id = Long(-1, desc='the picked point ID')
    # Id of picked cell (-1 implies none was picked)
    cell_id = Long(-1, desc='the picked cell ID')
    # World pick -- this has no ID.
    world_pick = Trait(false_bool_trait,
                       desc='specifies if the pick is a world pick.')
    # Coordinate of picked point.
    coordinate = Array('d', (3,), labels=['x', 'y', 'z'], cols=3,
                       desc='the coordinate of the picked point')

    # The picked data -- usually a tvtk.PointData or tvtk.CellData of
    # the object picked.  The user can use this data and extract any
    # necessary values.
    data = Any
Esempio n. 4
0
class nidaq_import(time_data_import):
    """
    This class provides an interface to import of measurement data 
    using NI-DAQmx.
    """

    #: Name of the NI task to use
    taskname = Str(desc="name of the NI task to use for the measurement")

    #: Sampling frequency, defaults to 48000.
    sample_freq = Float(48000.0, desc="sampling frequency")

    #: Number of time data samples, defaults to 48000.
    numsamples = Long(48000, desc="number of samples")

    #: Number of channels; is set automatically.
    numchannels = Long(0, desc="number of channels in the task")

    #: Number of devices; is set automatically.
    numdevices = Long(0, desc="number of devices in the task")

    #: Name of channels; is set automatically.
    namechannels = List(desc="names of channels in the task")

    #: Name of devices; is set automatically.
    namedevices = List(desc="names of devices in the task")

    #: Name of available and valid tasks.
    tasknames = List

    traits_view = View([
        Item('taskname{Task name}', editor=EnumEditor(name='tasknames')),
        ['sample_freq', 'numsamples', '-'],
        [
            [
                'numdevices~{count}',
                Item('namedevices~{names}', height=3), '-[Devices]'
            ],
            [
                'numchannels~{count}',
                Item('namechannels~{names}', height=3), '-[Channels]'
            ],
        ], '|[Task]'
    ],
                       title='NI-DAQmx data aquisition',
                       buttons=OKCancelButtons)

    def __init__(self, **traits):
        time_data_import.__init__(self, **traits)
        taskHandle = TaskHandle(0)
        buf_size = 1024
        #        buf = ctypes.create_string_buffer('\000' * buf_size)
        buf = ctypes.create_string_buffer(b'\000' * buf_size)
        DAQmxGetSysTasks(ctypes.byref(buf), buf_size)
        #        tasknamelist = buf.value.split(', ')
        tasknamelist = buf.value.split(b', ')

        self.tasknames = []
        for taskname in tasknamelist:
            # is task valid ?? try to load
            try:
                DAQmxLoadTask(taskname, ctypes.byref(taskHandle))
            except RuntimeError:
                continue
            self.tasknames.append(taskname)
            DAQmxClearTask(taskHandle)

    def _taskname_changed(self):
        taskHandle = TaskHandle(0)
        buf_size = 1024 * 4
        #        buf = ctypes.create_string_buffer('\000' * buf_size)
        buf = ctypes.create_string_buffer(b'\000' * buf_size)

        num = uInt32()
        fnum = float64()
        lnum = uInt64()
        try:
            DAQmxLoadTask(str.encode(self.taskname), ctypes.byref(taskHandle))
        except RuntimeError:
            return
        DAQmxGetTaskNumChans(taskHandle, ctypes.byref(num))
        self.numchannels = num.value
        # commented for compatibility with older NIDAQmx
        #~ DAQmxGetTaskNumDevices(taskHandle,ctypes.byref(num))
        #~ self.numdevices = num.value
        DAQmxGetTaskChannels(taskHandle, ctypes.byref(buf), buf_size)
        self.namechannels = buf.value.split(', ')
        DAQmxGetTaskDevices(taskHandle, ctypes.byref(buf), buf_size)
        self.namedevices = buf.value.split(', ')
        self.numdevices = len(self.namedevices)
        DAQmxGetSampClkRate(taskHandle, ctypes.byref(fnum))
        self.sample_freq = fnum.value
        DAQmxGetSampQuantSampMode(taskHandle, ctypes.byref(num))
        if num.value == DAQmx_Val_FiniteSamps:
            DAQmxGetSampQuantSampPerChan(taskHandle, ctypes.byref(lnum))
            self.numsamples = lnum.value
        DAQmxClearTask(taskHandle)

    def _sample_freq_changed(self, dispatch='ui'):
        taskHandle = TaskHandle(0)
        fnum = float64()
        try:
            DAQmxLoadTask(str.encode(self.taskname), ctypes.byref(taskHandle))
        except RuntimeError:
            return
        try:
            DAQmxSetSampClkRate(taskHandle, float64(self.sample_freq))
        except RuntimeError:
            pass
        DAQmxGetSampClkRate(taskHandle, ctypes.byref(fnum))
        self.sample_freq = fnum.value
        DAQmxClearTask(taskHandle)
        print(self.sample_freq)

    def get_data(self, td):
        """
        Main work is done here: loads data from buffer into
        :class:`~acoular.sources.TimeSamples` object `td` and saves also a 
        '*.h5' file.
        """
        taskHandle = TaskHandle(0)
        read = uInt32()
        fnum = float64()
        lnum = uInt64()
        try:
            DAQmxLoadTask(str.encode(self.taskname), ctypes.byref(taskHandle))
            if self.numchannels < 1:
                raise RuntimeError
            DAQmxSetSampClkRate(taskHandle, float64(self.sample_freq))
        except RuntimeError:
            # no valid task
            time_data_import.getdata(self, td)
            return
        #import data
        name = td.name
        if name == '':
            name = datetime.now().isoformat('_').replace(':', '-').replace(
                '.', '_')
            name = path.join(td_dir, name + '.h5')
        f5h = tables.open_file(name, mode='w')
        ac = f5h.create_earray(f5h.root, 'time_data',
                               tables.atom.Float32Atom(),
                               (0, self.numchannels))
        ac.set_attr('sample_freq', self.sample_freq)
        DAQmxSetSampQuantSampPerChan(taskHandle, uInt64(100000))
        DAQmxGetSampQuantSampPerChan(taskHandle, ctypes.byref(lnum))
        max_num_samples = lnum.value
        print("Puffergroesse: %i" % max_num_samples)
        data = numpy.empty((max_num_samples, self.numchannels),
                           dtype=numpy.float64)
        DAQmxStartTask(taskHandle)
        count = 0
        numsamples = self.numsamples
        while count < numsamples:
            #~ DAQmxReadAnalogF64(taskHandle,-1,float64(10.0),
            #~ DAQmx_Val_GroupByScanNumber,data.ctypes.data,
            #~ data.size,ctypes.byref(read),None)
            DAQmxReadAnalogF64(taskHandle, 1024, float64(10.0),
                               DAQmx_Val_GroupByScanNumber, data.ctypes.data,
                               data.size, ctypes.byref(read), None)
            ac.append(
                numpy.array(data[:min(read.value, numsamples - count)],
                            dtype=numpy.float32))
            count += read.value
            #~ if read.value>200:
            #~ print count, read.value
        DAQmxStopTask(taskHandle)
        DAQmxClearTask(taskHandle)
        f5h.close()
        td.name = name
        td.load_data()

    def get_single(self):
        """
        Gets one block of data
        """
        taskHandle = TaskHandle(0)
        read = uInt32()
        fnum = float64()
        lnum = uInt64()
        try:
            DAQmxLoadTask(str.encode(self.taskname), ctypes.byref(taskHandle))
            if self.numchannels < 1:
                raise RuntimeError
        except RuntimeError:
            # no valid task
            time_data_import.getdata(self, td)
            return
        #import data
        ac = numpy.empty((self.numsamples, self.numchannels), numpy.float32)
        DAQmxGetSampQuantSampPerChan(taskHandle, ctypes.byref(lnum))
        max_num_samples = lnum.value
        data = numpy.empty((max_num_samples, self.numchannels),
                           dtype=numpy.float64)
        DAQmxStartTask(taskHandle)
        count = 0
        numsamples = self.numsamples
        while count < numsamples:
            DAQmxReadAnalogF64(taskHandle, -1, float64(10.0),
                               DAQmx_Val_GroupByScanNumber, data.ctypes.data,
                               data.size, ctypes.byref(read), None)
            anz = min(read.value, numsamples - count)
            ac[count:count + anz] = numpy.array(data[:anz],
                                                dtype=numpy.float32)
            count += read.value
        DAQmxStopTask(taskHandle)
        DAQmxClearTask(taskHandle)
        return ac
Esempio n. 5
0
class LongTrait(HasTraits):
    value = Long(LONG_TYPE(99))
Esempio n. 6
0
class SoundDeviceSamplesGenerator(SamplesGenerator):
    """
    Controller for sound card hardware using sounddevice library

    Uses the device with index :attr:`device` to read samples
    from input stream, generates output stream via the generator
    :meth:`result`.
    """

    #: input device index, refers to sounddevice list
    device = Int(0, desc="input device index")

    #: Number of input channels, maximum depends on device
    numchannels = Long(
        1, desc="number of analog input channels that collects data")

    #: Number of samples to collect; defaults to -1.
    # If is set to -1 device collects till user breaks streaming by setting Trait: collectsamples = False
    numsamples = Long(-1, desc="number of samples to collect")

    #: Indicates if samples are collected, helper trait to break result loop
    collectsamples = Bool(True, desc="Indicates if samples are collected")

    #: Sampling frequency of the signal, changes with sinusdevices
    sample_freq = Property(desc="sampling frequency")

    #: Indicates that the sounddevice buffer has overflown
    overflow = Bool(False, desc="Indicates if sounddevice buffer overflow")

    #: Indicates that the stream is collecting samples
    running = Bool(False,
                   desc="Indicates that the stream is collecting samples")

    #: The sounddevice InputStream object for inspection
    stream = Any

    # internal identifier
    digest = Property(depends_on=['device', 'numchannels', 'numsamples'])

    @cached_property
    def _get_digest(self):
        return digest(self)

    # checks that numchannels are not more than device can provide
    @observe('device,numchannels')
    def _get_numchannels(self, change):
        self.numchannels = min(
            self.numchannels,
            sd.query_devices(self.device)['max_input_channels'])

    def _get_sample_freq(self):
        return sd.query_devices(self.device)['default_samplerate']

    def device_properties(self):
        """
        Returns
        -------
        Dictionary of device properties according to sounddevice
        """
        return sd.query_devices(self.device)

    def result(self, num):
        """
        Python generator that yields the output block-wise. Use at least a 
        block-size of one ring cache block. 
        
        Parameters
        ----------
        num : integer
            This parameter defines the size of the blocks to be yielded
            (i.e. the number of samples per block).
        
        Returns
        -------
        Samples in blocks of shape (num, :attr:`numchannels`). 
            The last block may be shorter than num.
        """
        print(self.device_properties(), self.sample_freq)
        self.stream = stream_obj = sd.InputStream(device=self.device,
                                                  channels=self.numchannels,
                                                  clip_off=True,
                                                  samplerate=self.sample_freq)

        with stream_obj as stream:
            self.running = True
            if self.numsamples == -1:
                while self.collectsamples:  # yield data as long as collectsamples is True
                    data, self.overflow = stream.read(num)
                    yield data[:num]

            elif self.numsamples > 0:  # amount of samples to collect is specified by user
                samples_count = 0  # numsamples counter
                while samples_count < self.numsamples:
                    anz = min(num, self.numsamples - samples_count)
                    data, self.overflow = stream.read(num)
                    yield data[:anz]
                    samples_count += anz
        self.running = False
        return
Esempio n. 7
0
class PCPlotData(ArrayPlotData):
    """Container for Principal Component scatterplot type data set.

    This container will be able to hold several sets of PC type data sets:
     * The actual matrix with PC1 to PCn
     * A list of PCDataSet objects that holds metadata for each PC matrix
    """

    # Metadata for each PC set
    plot_data = List(PCDataSet)
    group_names = List([''])
    plot_group = Unicode('')
    coloring_factor = Instance(Factor)
    # Number of PC in the data sets
    # Lowest number if we have severals sets
    n_pc = Long()
    # The PC for X the axis
    x_no = Int()
    # The PC for the Y axis
    y_no = Int()

    def add_PC_set(self, pc_ds, expl_vars, factor=None):
        """Add a PC data set with metadata"""
        set_n = len(self.plot_data)

        if set_n == 0:
            self.n_pc = pc_ds.n_vars
        else:
            self.n_pc = min(self.n_pc, pc_ds.n_vars)

        values = pc_ds.values.transpose()
        for j, row in enumerate(values):
            dict_name = 's{}pc{}'.format(set_n + 1, (j + 1))
            self.arrays[dict_name] = row

        # if factor is not None:
        #     self.coloring_factor = factor

        if len(pc_ds.subs) > 0:
            # FIXME: replaced by update_color_level_data()
            for gn in pc_ds.get_subset_groups():
                self.group_names.append(gn)
                subsets = pc_ds.get_subsets(gn)
                for ss in subsets:
                    sarray = pc_ds.get_subset_rows(ss)
                    values = sarray.values.transpose()
                    for j, row in enumerate(values):
                        dict_name = 's{}pc{}g{}c{}'.format(
                            set_n + 1, (j + 1), gn, ss.id)
                        self.arrays[dict_name] = row
                    pass
                pass
            pass

        labels = pc_ds.obj_n
        color = pc_ds.style.fg_color

        pcds = PCDataSet()
        if labels is not None:
            pcds.labels = labels
        if color is not None:
            pcds.color = color
        if expl_vars is not None:
            pcds.expl_vars = list(expl_vars.mat.xs('calibrated'))
        if pc_ds is not None:
            pcds.pc_ds = pc_ds
        self.plot_data.append(pcds)

        return set_n + 1

    def update_color_level_data(self, set_id):
        '''To handle added level data to the active coloring_factor

        Will run throug the levels and create new datasources for each of the levels.
        Also hav to delet the old data source or check if it already exist

        Heuristics:
        Must indicate which dataset to copy values from. Can check if the size of
        the wanted axis index for the dataset is lager than the largest index in the Factor
        '''

        if self.coloring_factor is not None:
            self.coloring_factor.default_ds_axis = 'row'
            # Assumes rows with obj and col with PC
            pdata = self.plot_data[set_id - 1]
            pcds = pdata.pc_ds
            facname = self.coloring_factor.name

            for lvn in self.coloring_factor.levels:
                # get the subset row data for this level
                submx = self.coloring_factor.get_values(pcds, lvn)
                selT = submx.T
                # enumerate values to get the various PC vectors (one row for each PC)
                for i, pcvec in enumerate(selT, 1):
                    # Create keynames for vectors
                    # 'ds{}fc{}lv{}pc{}' ds nummer, factor name, level name, PC nummer
                    kn = "ds{0}:fc{1}:lv{2}:pc{3}".format(
                        set_id, facname, lvn, i)
                    # intert key-vector into plot data array or update if data already exist
                    self.arrays[kn] = pcvec
            # Create datasorce for the points not in a level
            submx = self.coloring_factor.get_rest_values(pcds)
            selT = submx.T
            for i, pcvec in enumerate(selT, 1):
                # Create keynames for vectors
                # 'ds{}fc{}lv{}pc{}' ds nummer, factor name, level name, PC nummer
                kn = "ds{0}:fc{1}:lv{2}:pc{3}".format(set_id, facname, 'not',
                                                      i)
                # intert key-vector into plot data array or update if data already exist
                self.arrays[kn] = pcvec
Esempio n. 8
0
class tcProject(HasTraits):
    c_states = List(tcGeneric)
    p_states = List(tcGeneric)
    processes = List(tcProcess)
    selected =  List(tcProcess)
    filtered_processes = List(tcProcess)
    remove_filter = Button(image=ImageResource("clear.png"),width_padding=0,height_padding=0,style='toolbar')
    minimum_time_filter = Enum((0,1000,10000,50000,100000,500000,1000000,5000000,1000000,5000000,10000000,50000000))
    minimum_events_filter = Enum((0,2,4,8,10,20,40,100,1000,10000,100000,1000000))
    plot_redraw = Long()
    filter =  Str("")
    filter_invalid = Property(depends_on="filter")
    filename = Str("")
    power_event = CArray
    num_cpu = Property(Int,depends_on='c_states')
    num_process = Property(Int,depends_on='process')
    traits_view = View(
        VGroup(
            HGroup(
                Item('filter',invalid="filter_invalid",width=1,
                     tooltip='filter the process list using a regular expression,\nallowing you to quickly find a process'),
                Item('remove_filter', show_label=False, style='custom',
                     tooltip='clear the filter')
                ),
            HGroup(
                Item('minimum_time_filter',width=1,label='dur',
                     tooltip='filter the process list with minimum duration process is scheduled'),
                Item('minimum_events_filter',width=1,label='num',
                     tooltip='filter the process list with minimum number of events process is generating'),
                )
            ),
        Item( 'filtered_processes',
              show_label  = False,
              height=40,
              editor      = process_table_editor
              )
        )
    first_ts = 0
    def _get_filter_invalid(self):
        try:
            r = re.compile(self.filter)
        except:
            return True
        return False
    def _remove_filter_changed(self):
        self.filter=""
    def _filter_changed(self):
        try:
            r = re.compile(self.filter)
        except:
            r = None
        filtered_processes =self.processes
        if self.minimum_events_filter:
            filtered_processes = [p for p in filtered_processes if self.minimum_events_filter < len(p.start_ts)]
        if self.minimum_time_filter:
            filtered_processes = [p for p in filtered_processes if self.minimum_time_filter < p.total_time]
        if r:
            filtered_processes = [p for p in filtered_processes if r.search(p.comm)]
        self.filtered_processes = filtered_processes
    _minimum_time_filter_changed = _filter_changed
    _minimum_events_filter_changed = _filter_changed
    def _processes_changed(self):
        self._filter_changed()
    def _on_show(self):
        for i in self.selected:
            i.show = True
        self.plot_redraw +=1
    def _on_hide(self):
        for i in self.selected:
            i.show = False
        self.plot_redraw +=1
    def _on_select_all(self):
        if self.selected == self.filtered_processes:
            self.selected = []
        else:
            self.selected = self.filtered_processes
        self.plot_redraw +=1

    def _on_invert(self):
        for i in self.filtered_processes:
            i.show = not i.show
        self.plot_redraw +=1

    @cached_property
    def _get_num_cpu(self):
        return len(self.c_states)
    def _get_num_process(self):
        return len(self.processes)
    def process_list_selected(self, selection):
        print(selection)
######### stats part ##########

    def process_stats(self,start,end):
        fact = 100./(end-start)
        for tc in self.processes:
            starts,ends,types = tc.get_partial_tables(start,end)
            inds = np.where(types==colors.get_color_id("running"))
            tot = sum(ends[inds]-starts[inds])
            tc.selection_time = int(tot)
            tc.selection_pc = tot*fact
    def get_selection_text(self,start,end):
        low_line = -1
        high_line = -1
        low_i = searchsorted(self.timestamps,start)
        high_i = searchsorted(self.timestamps,end)
        low_line = self.linenumbers[low_i]
        high_line = self.linenumbers[high_i]
        return self.get_partial_text(self.filename, low_line, high_line)
######### generic parsing part ##########


    def generic_find_process(self,pid,comm,ptype,same_pid_match_timestamp=0):
        if (pid,comm) in self.tmp_process:
            return self.tmp_process[(pid,comm)]
        # else try to find if there has been a process with same pid recently, and different name. Only for user_process because other traces set pid to 0
        if same_pid_match_timestamp != 0 and ptype == "user_process":
            for k, p in list(self.tmp_process.items()):
                if k[0] == pid and p['type'] == "user_process":
                    if len(p['start_ts'])>0 and p['start_ts'][-1] > same_pid_match_timestamp:
                        p['comm'] = comm
                        self.tmp_process[(pid,comm)] = p
                        del self.tmp_process[k]
                        return p
        tmp = {'type':ptype,'comm':comm,'pid':pid,'start_ts':[],'end_ts':[],'types':[],'cpus':[],'comments':[]}
        if not (pid==0 and ptype == "user_process"):
            self.tmp_process[(pid,comm)] = tmp
        return tmp

    def generic_process_start(self,process,event, build_p_stack=True):
        if process['type']=="user_process" and process['pid']==0:
            return # ignore swapper event
        if len(process['start_ts'])>len(process['end_ts']):
            process['end_ts'].append(event.timestamp)
        if self.first_ts == 0:
            self.first_ts = event.timestamp
        self.cur_process_by_pid[process['pid']] = process
        assert True, 'Process_start'
        if build_p_stack :
            p_stack = self.cur_process[event.common_cpu]
            if p_stack:
                p = p_stack[-1]
                if len(p['start_ts'])>len(p['end_ts']):
                    p['end_ts'].append(event.timestamp)
                # mark old process to wait for cpu
                p['start_ts'].append(int(event.timestamp))
                p['types'].append(colors.get_color_id("waiting_for_cpu"))
                p['cpus'].append(event.common_cpu)
                p_stack.append(process)
            else:
                self.cur_process[event.common_cpu] = [process]
        # mark process to use cpu
        process['start_ts'].append(event.timestamp)
        process['types'].append(colors.get_color_id("running"))
        process['cpus'].append(event.common_cpu)


    def generic_process_end(self,process,event, build_p_stack=True):
        if process['type']=="user_process" and process['pid']==0:
            return # ignore swapper event
        if len(process['start_ts'])>len(process['end_ts']):
            process['end_ts'].append(event.timestamp)
        if build_p_stack :
            p_stack = self.cur_process[event.common_cpu]
            if p_stack:
                p = p_stack.pop()
                if p['pid'] != process['pid']:
                    print("warning: process premption stack following failure on CPU",event.common_cpu, p['comm'],p['pid'],process['comm'],process['pid'],["%s:%d"%(a['comm'],a['pid']) for a in p_stack],event.linenumber)
                    p_stack = []

                if p_stack:
                    p = p_stack[-1]
                    if len(p['start_ts'])>len(p['end_ts']):
                        p['end_ts'].append(event.timestamp)
                    # mark old process to run on cpu 
                    p['start_ts'].append(event.timestamp)
                    p['types'].append(colors.get_color_id("running"))
                    p['cpus'].append(event.common_cpu)
    def generic_process_single_event(self,process,event):
        if len(process['start_ts'])>len(process['end_ts']):
            process['end_ts'].append(event.timestamp)
        # mark process to use cpu
        process['start_ts'].append(event.timestamp)
        process['types'].append(colors.get_color_id("running"))
        process['cpus'].append(event.common_cpu)
        process['end_ts'].append(event.timestamp)

    def generic_add_wake(self,caller, callee, event):
        self.wake_events.append(((caller['comm'],caller['pid']),(callee['comm'],callee['pid']),event.timestamp))

    def do_function_default(self,event):
        process = self.generic_find_process(0,"kernel function:%s"%(event.callee),"function")
        self.generic_process_single_event(process,event)

    def do_event_default(self,event):
        event.name = event.event.split(":")[0]
        process = self.generic_find_process(0,"event:%s"%(event.name),"event")
        self.generic_process_single_event(process,event)
        process['comments'].append(event.event)


    def start_parsing(self, get_partial_text):
        # we build our data into python data formats, who are resizeable
        # once everything is parsed, we will transform it into numpy array, for fast access
        self.tmp_c_states = []
        self.tmp_p_states = []
        self.tmp_process = {}
        self.timestamps = []
        self.linenumbers = []
        self.cur_process_by_pid = {}
        self.wake_events = []
        self.cur_process = [None]*20
        self.last_irq={}
        self.last_spi=[]
        self.missed_power_end = 0
        self.get_partial_text = get_partial_text
        self.methods = {}
        import plugin
        colors.parse_colors(plugin.get_plugins_additional_colors())
        plugin.get_plugins_methods(self.methods)
        self.process_types = {
            "function":(tcProcess, plugin.MISC_TRACES_CLASS),
            "event":(tcProcess, plugin.MISC_TRACES_CLASS)}
        self.process_types.update(plugin.get_plugins_additional_process_types())
    def finish_parsing(self):
        #put generated data in unresizable numpy format
        c_states = []
        i=0
        for tc in self.tmp_c_states:
            t = tcIdleState(name='cpu%d'%(i))
            while len(tc['start_ts'])>len(tc['end_ts']):
                tc['end_ts'].append(tc['start_ts'][-1])
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            c_states.append(t)
            i+=1
        self.c_states=c_states
        i=0
        p_states = []
        for tc in self.tmp_p_states:
            t = tcFrequencyState(name='cpu%d'%(i))
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            i+=1
            p_states.append(t)
        self.wake_events = numpy.array(self.wake_events,dtype=[('waker',tuple),('wakee',tuple),('time','uint64')])
        self.p_states=p_states
        processes = []
        last_ts = 0
        for pid,comm in self.tmp_process:
            tc = self.tmp_process[pid,comm]
            if len(tc['end_ts'])>0 and last_ts < tc['end_ts'][-1]:
                last_ts = tc['end_ts'][-1]
        if len(self.tmp_process) >0:
            progress = ProgressDialog(title="precomputing data", message="precomputing overview data...", max=len(self.tmp_process), show_time=False, can_cancel=False)
            progress.open()
        i = 0
        for pid,comm in self.tmp_process:
            tc = self.tmp_process[pid,comm]
            if tc['type'] in self.process_types:
                klass, order = self.process_types[tc['type']]
                t = klass(pid=pid,comm=tc['comm'],project=self)
            else:
                t = tcProcess(pid=pid,comm=comm,project=self)
            while len(tc['start_ts'])>len(tc['end_ts']):
                tc['end_ts'].append(last_ts)
            t.start_ts = numpy.array(tc['start_ts'])
            t.end_ts = numpy.array(tc['end_ts'])
            t.types = numpy.array(tc['types'])
            t.cpus = numpy.array(tc['cpus'])
            t.comments = tc['comments'] #numpy.array(tc['comments'])
            t.process_type = tc["type"]
            # precompute 16 levels of overview cache
            t.get_overview_ts(1<<16)
            processes.append(t)
            progress.update(i)
            i += 1
        if len(self.tmp_process) > 0:
            progress.close()
            self.tmp_process = []
        def cmp_process(x,y):
            # sort process by type, pid, comm
            def type_index(t):
                try:
                    return self.process_types[t][1]
                except ValueError:
                    return len(order)+1
            c = cmp(type_index(x.process_type),type_index(y.process_type))
            if c != 0:
                return c
            c = cmp(x.pid,y.pid)
            if c != 0:
                return c
            c = cmp(x.comm,y.comm)
            return c

        ### ??? processes.sort(cmp_process)
        self.processes = processes
        self.p_states=p_states
        self.tmp_c_states = []
        self.tmp_p_states = []
        self.tmp_process = {}

    def ensure_cpu_allocated(self,cpu):
        # ensure we have enough per_cpu p/c_states timecharts
        while len(self.tmp_c_states)<=cpu:
            self.tmp_c_states.append({'start_ts':[],'end_ts':[],'types':[]})
        while len(self.tmp_p_states)<=cpu:
            self.tmp_p_states.append({'start_ts':[],'end_ts':[],'types':[]})

    def run_callbacks(self, callback, event):
        if callback in self.methods:
            for m in self.methods[callback]:
                try:


                    m(self,event)
                except AttributeError:
                    #print("DEBUG -- skip this except run_callbacks()", __file__)
                    from timechart import raise_to_debug
                    raise_to_debug(2, __file__)




                    pass


                    '''
                    if not hasattr(m,"num_exc"):
                        m.num_exc = 0
                    m.num_exc += 1
                    if m.num_exc <10:
                        print("bug in ", m, "still continue..")
                        traceback.print_exc()
                        print(event)
                    if m.num_exc == 10:
                        print(m, "is too buggy, disabling, please report bug!")
                        self.methods[callback].remove(m)
                        if len(self.methods[callback])==0:
                            del self.methods[callback]
                    '''
            return True
        return False

    def handle_trace_event(self,event):
        self.linenumbers.append(event.linenumber)
        self.timestamps.append(event.timestamp)
        if event.event=='function':
            callback = "do_function_"+event.callee
            self.run_callbacks("do_all_functions", event)
        else:
            callback = "do_event_"+event.event
            self.run_callbacks("do_all_events", event)

        if not self.run_callbacks(callback, event):
            if event.event=='function':
                self.do_function_default(event)
            else:
                self.do_event_default(event)