def hook_schedLockClient(self, event, payload):
        taskId = event["id"]
        timeAcq = event["ts_acquire"]
        cpuId = ExecutionModel.getCurrentCPUId()
        stack = self.getEventStack(event)

        # Emit event in the past. We know that no tracepoint can be emitted
        # between getting the lock and after the operation completes
        # (at this point as a client). Hence, it is safe the override the
        # standard path and enforce another tracepoint here.
        pastPayload = [(ExtraeEventTypes.RUNTIME_SUBSYSTEMS,
                        self.Status.SchedulerLockEnter)]
        ParaverTrace.emitEvent(timeAcq, cpuId, pastPayload)

        # Emit communication line (if present) before any current event as
        # requried by paraver. This line shows a relation between a worker
        # serving task and another worker being assigned work by it.
        if taskId != 0:
            timeRel = ExecutionModel.getCurrentTimestamp()
            (timeSend, cpuSendId) = self._servedTasks[taskId]
            ParaverTrace.emitCommunicationEvent(cpuSendId, timeSend, cpuId,
                                                timeRel)

        # Emit event at this point in time using the standard path
        payload.append((ExtraeEventTypes.RUNTIME_SUBSYSTEMS, stack[-1]))
Exemple #2
0
 def _process_event(self, ts, event):
     hookList = self.__hooks[event.name]
     for hook in hookList:
         hook(event, self.__payload)
     if self.__payload:
         cpuId = RuntimeModel.getVirtualCPUId(event)
         ParaverTrace.emitEvent(ts, cpuId, self.__payload)
     self.__payload.clear()
Exemple #3
0
    def hook_flush(self, event, _):
        start = event["start"]
        end = event["end"]

        # In this view we are not emiting events trough the "payload" variable,
        # but we are emitting events directly. That's because we don't want to
        # use the current event timestamp as the extare timestamp but we want
        # to use the event's fields as timestamps. It is safe to do so becaue
        # on flushing, we know that no events could be emitted between the last
        # processed event and now, basically because nanos6 was flushing the
        # buffer :-)

        cpuId = RuntimeModel.getVirtualCPUId(event)
        ParaverTrace.emitEvent(start, cpuId, [(ExtraeEventTypes.CTF_FLUSH, 1)])
        ParaverTrace.emitEvent(end, cpuId, [(ExtraeEventTypes.CTF_FLUSH, 0)])
    def hook_schedLockServer(self, event, payload):
        timeAcq = event["ts_acquire"]
        cpuId = ExecutionModel.getCurrentCPUId()
        stack = self.getEventStack(event)

        # Emit event in the past. We know that no tracepoint can be emitted
        # between before getting the lock and after the operation completes
        # (at this point as a server). Hence, it is safe the override the
        # standard path and enforce another tracepoint here.
        pastPayload = [(ExtraeEventTypes.RUNTIME_SUBSYSTEMS,
                        self.Status.SchedulerLockEnter)]
        ParaverTrace.emitEvent(timeAcq, cpuId, pastPayload)

        # Emit event at this point in time
        payload.append((ExtraeEventTypes.RUNTIME_SUBSYSTEMS,
                        self.Status.SchedulerLockServing))

        # Add current new status to the event stack
        stack.append(self.Status.SchedulerLockServing)
Exemple #5
0
    def _process_first_message(self):
        msg = next(self._it)
        if type(msg) is bt2._EventMessageConst:
            clk = msg.default_clock_snapshot
            absoluteStartTime = clk.clock_class.offset.seconds
            ts = clk.value - 1  # see comments below
            assert (ts >= 0)
            ncpus = msg.event.stream.trace.environment["ncpus"]
            binaryName = msg.event.stream.trace.environment["binary_name"]
            pid = msg.event.stream.trace.environment["pid"]
            traceName = "trace_" + str(binaryName) + "_" + str(pid)

            # initialize paraver trace
            ParaverTrace.addTraceName(traceName)
            ParaverTrace.addAbsoluteStartTime(absoluteStartTime)
            ParaverTrace.addStartTime(ts)
            ParaverTrace.addNumberOfCPUs(ncpus)
            ParaverTrace.initalizeTraceFiles()

            # install event processing hooks
            RuntimeModel.initialize(ncpus)
            self.installHooks(RuntimeModel.hooks())
            for view in self.__paraverViews:
                self.installHooks(view.hooks())

            # redirect message processing
            self.__process_message = self._process_other_message

            # compute set of starting events
            for view in self.__paraverViews:
                view.start(self.__payload)
            # We emit a set of initial events one nanosecond before the first
            # event is encountered to avoid overlapping with the extrae events
            # derived from the first ctf event.
            ParaverTrace.emitEvent(ts, 0, self.__payload)
            self.__payload.clear()

        self._consume_message(msg)
Exemple #6
0
    def _process_first_message(self):
        msg = next(self._it)
        if type(msg) is bt2._EventMessageConst:
            clk = msg.default_clock_snapshot
            absoluteStartTime = clk.clock_class.offset.seconds
            ts = clk.value - 1  # see comments below
            assert (ts >= 0)
            cpuList = str(msg.event.stream.trace.environment["cpu_list"])
            binaryName = msg.event.stream.trace.environment["binary_name"]
            pid = msg.event.stream.trace.environment["pid"]
            traceName = "trace_" + str(binaryName) + "_" + str(pid)

            # initialize paraver trace
            ParaverTrace.addTraceName(traceName)
            ParaverTrace.addAbsoluteStartTime(absoluteStartTime)
            ParaverTrace.addStartTime(ts)
            ParaverTrace.addCPUList(cpuList)
            ParaverTrace.addBinaryName(binaryName)
            ParaverTrace.initalizeTraceFiles()

            # Initialize both Kernel and Runtime Models. The Kernel Model must
            # be initalized before Paravare views are created
            KernelModel.initialize()
            RuntimeModel.initialize()

            # Create Paraver Views
            self.__paraverViews = [
                pv.ParaverViewRuntimeCode(),
                pv.ParaverViewRuntimeBusyWaiting(),
                pv.ParaverViewRuntimeTasks(),
                pv.ParaverViewTaskLabel(),
                pv.ParaverViewTaskSource(),
                pv.ParaverViewTaskId(),
                pv.ParaverViewHardwareCounters(),
                pv.ParaverViewThreadId(),
                pv.ParaverViewRuntimeSubsystems(),
                pv.ParaverViewCTFFlush(),
                #pv.ParaverViewNumberOfReadyTasks(),
                pv.ParaverViewNumberOfCreatedTasks(),
                pv.ParaverViewNumberOfBlockedTasks(),
                pv.ParaverViewNumberOfRunningTasks(),
                pv.ParaverViewNumberOfCreatedThreads(),
                pv.ParaverViewNumberOfRunningThreads(),
                pv.ParaverViewNumberOfBlockedThreads(),
                pv.ParaverViewKernelThreadID(),
                pv.ParaverViewKernelPreemptions(),
                pv.ParaverViewKernelSyscalls(),
            ]

            # Install event processing hooks
            self.installHooks(KernelModel.preHooks())
            self.installHooks(RuntimeModel.preHooks())
            for view in self.__paraverViews:
                self.installHooks(view.hooks())
            self.installHooks(RuntimeModel.postHooks())
            self.installHooks(KernelModel.postHooks())

            # redirect message processing
            self.__process_message = self._process_other_message

            # compute set of starting events
            for view in self.__paraverViews:
                view.start(self.__payload)
            # We emit a set of initial events one nanosecond before the first
            # event is encountered to avoid overlapping with the extrae events
            # derived from the first ctf event.
            ParaverTrace.emitEvent(ts, 0, self.__payload)
            self.__payload.clear()

        self._consume_message(msg)