예제 #1
0
def printKeys(trace_path) :

    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        # print(event.name, " : ",event.keys(),"\n")

        if re.search("net",event.name) :
            print("\n", event.name, " : ")
            if 'network_header' in event.keys() :
                print(event['network_header'])
            if 'magic' in event.keys() :
                print(event['magic'])
            if 'packet_size' in event.keys() :
                print(event['packet_size'])
            if 'skbaddr' in event.keys() :
                print(event['skbaddr'])
            if 'events_discarded' in event.keys() :
                print(event['events_discarded'])
            # print(event['content_size'])
            # print(event['v'])


    print("Fin de l'analyse")
예제 #2
0
def printExecveNet(trace_path) :
    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    # compt1=0
    # compt2=0
    # compt3=0

    # for event in trace_collection.events:
    #     if (re.search("net_dev_queue",event.name) and compt1<10):
    #         compt1 +=1
    #         print(preprocessEventsklearn(event))

    #     elif (re.search("execve",event.name) and compt2<10):
    #         compt2 += 1
    #         print(preprocessEventsklearn(event))

    #     elif (re.search("execve",event.name) and compt3<10):
    #         compt3 += 1
    #         print(preprocessEventsklearn(event))

    for event in trace_collection.events:
        if (re.search("net_dev_queue",event.name) and 'network_header' in event.keys()):
            print("\n", event.name, " : ")
            for key in event.keys() :
                print("\t",key," : ",event[key])

        elif (re.search("execve",event.name)and 'filename' in event.keys()):
            print("\n", event.name, " : ")
            for key in event.keys() :
                print("\t",key," : ",event[key])
예제 #3
0
    def convert(self, from_dir, to_dir):
        tc = babeltrace.TraceCollection()
        tc.add_trace(from_dir, "ctf")

        clock = CTFWriter.Clock("clk")
        writer = CTFWriter.Writer(to_dir)
        writer.add_clock(clock)
        stream_class = CTFWriter.StreamClass('trace')
        stream_class.clock = clock
        stream_class.packet_context_type.add_field(CTFWriter.IntegerFieldDeclaration(32), "cpu_id")

        for id, clslst in self.handlers.items():
            for cls in clslst:
                for ev in cls.get_generated_events():
                    stream_class.add_event_class(ev)

        stream = {}

        for e in tc.events:
            clock.time = e.timestamp
            id = e["cpu_id"]
            if id not in self.threads:
                self.threads[id] = ThreadState(self.handlers)
                stream[id] = writer.create_stream(stream_class)
                stream[id].packet_context.field("cpu_id").value = id
            evl = self.threads[id].consume(e)
            for ev in evl:
                stream[id].append_event(ev)

        for s in stream.values():
            s.flush()
예제 #4
0
def getEventsSynthetic(trace_path):
    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        yield preprocessMoreEventsklearn(event)
예제 #5
0
def GBTPredict(trace_path):
    
    modele = "./modeles/GBT.p"
    dictVec = "./modeles/dictVec.p"
    
    clf = joblib.load(modele)
    vec = joblib.load(dictVec)

    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    listeMachines = []
    dicTid = {}
    dictCPUid = {}

    tempsDebut = datetime.now().time()
    print("\tTemps debut : "+ str(tempsDebut))

    for event in trace_collection.events:
        try :
            eventpreprocessed = preprocessMoreEventsklearn(event, listeMachines,dicTid,dictCPUid)
            if eventpreprocessed != {}:
                try:
                    if clf.predict(vec.transform(eventpreprocessed).toarray()) != [0]:   # and eventpreprocessed["a_nomEvent"]  != "net_dev_queue"
                        # print("Alerte Intrusion sur le système :")
                    # print(eventpreprocessed)
                    # print(datetime.now().time())
                    # print("----------------------------------")
                        pass
                    # print("Après predict")
                except KeyError:
                    pass
        except TypeError:
            pass
예제 #6
0
def getExecve(trace_path) :
    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        if (re.search("execve",event.name)):
            yield preprocessEventsklearn(event)
예제 #7
0
def getNet(trace_path) :
    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        if (re.search("net_dev_queue",event.name)):
            return event
예제 #8
0
def getEventsRegleAbsolue(trace_path):
    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        regles.addRegleAbsolues(event)
        yield preprocessEventsklearn(event)
예제 #9
0
def generate_diagram(tracefile):
    # a trace collection holds one to many traces
    col = babeltrace.TraceCollection()

    # add the trace provided by the user
    # (LTTng traces always have the 'ctf' format)
    if col.add_trace(sys.argv[1], 'ctf') is None:
        raise RuntimeError('Cannot add trace')

    base = None
    threads = set()
    switches = []

    for event in col.events:
        if base is None and event.name == 'atlas_job_submit':
            base = event.timestamp

        if base is None:
            continue

        if event.name == 'atlas_job_submit':
            threads.add(event['tid'])

        if event.name != 'sched_switch':
            continue

        cpu = event['cpu_id']
        time = event.timestamp - base

        switches.append({
            'time': time,
            'reason': 'D',
            'tid': event['prev_tid'],
            'cpu': cpu
        })
        switches.append({
            'time': time,
            'reason': 'S',
            'tid': event['next_tid'],
            'cpu': cpu
        })

    switches = [switch for switch in switches if switch['tid'] in threads]

    # header
    print_line("time", threads)
    last = dict((thread, float('nan')) for thread in threads)

    for cs in switches:
        time = cs['time']
        tid = cs['tid']
        last[tid] = cs['cpu']
        # print(time, cs)
        print_line(time, [last[tid] for tid in threads])

        if cs['reason'] == 'D':
            last[tid] = float('nan')
            print_line(time, [last[tid] for tid in threads])
예제 #10
0
def top5proc():
    if len(sys.argv) != 2:
        msg = 'Usage: python {} TRACEPATH'.format(sys.argv[0])
        raise ValueError(msg)

    # a trace collection holds one to many traces
    col = babeltrace.TraceCollection()

    # add the trace provided by the user
    # (LTTng traces always have the 'ctf' format)
    if col.add_trace(sys.argv[1], 'ctf') is None:
        raise RuntimeError('Cannot add trace')

    # this counter dict will hold execution times:
    #
    #   task command name -> total execution time (ns)
    exec_times = Counter()

    # this holds the last `sched_switch` timestamp
    last_ts = None

    # iterate events
    for event in col.events:
        # keep only `sched_switch` events
        if event.name != 'sched_switch':
            continue

        # keep only events which happened on CPU 0
        if event['cpu_id'] != 0:
            continue

        # event timestamp
        cur_ts = event.timestamp

        if last_ts is None:
            # we start here
            last_ts = cur_ts

        # previous task command (short) name
        prev_comm = event['prev_comm']

        # initialize entry in our dict if not yet done
        if prev_comm not in exec_times:
            exec_times[prev_comm] = 0

        # compute previous command execution time
        diff = cur_ts - last_ts

        # update execution time of this command
        exec_times[prev_comm] += diff

        # update last timestamp
        last_ts = cur_ts

    # display top 10
    for name, ns in exec_times.most_common(5):
        s = ns / 1000000000
        print('{:20}{} s'.format(name, s))
    def babeltrace(self):
        """ Gets the babeltrace trace object for the test

        The test needs to have been stopped to get the trace.
        """
        col = babeltrace.TraceCollection()
        col.add_traces_recursive(self.lttng.output_dir, 'ctf')

        return col
예제 #12
0
def comptEvent(trace_path):
    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')
    nbEvent = 0

    print(trace_path)
    for event in trace_collection.events:
        nbEvent += 1
    print(nbEvent)
예제 #13
0
def get_trace_ctf_events(trace_directory: str) -> Iterable[BabeltraceEvent]:
    """
    Get the events of a trace.

    :param trace_directory: the path to the main/top trace directory
    :return: events iterable
    """
    tc = babeltrace.TraceCollection()
    tc.add_traces_recursive(trace_directory, 'ctf')
    return tc.events
예제 #14
0
def analyze_trace(path):
    col = babeltrace.TraceCollection()
    if col.add_trace(path, 'ctf') is None:
        raise RuntimeError('Cannot add trace')

    buffers = {}

    for event in col.events:
        if event.name.startswith("aethercast_"):
            buffer_timestamp = event["timestamp"]

            current_buffer = {}

            if buffer_timestamp in buffers.keys():
                current_buffer = buffers[buffer_timestamp]
            else:
                current_buffer['timestamp'] = buffer_timestamp

            current_buffer[event.name] = event.timestamp

            buffers[buffer_timestamp] = current_buffer

    rendering_times = []
    encoding_times = []
    packetizing_times = []
    sending_times = []

    for buffer_timestamp in buffers:
        buffer = buffers[buffer_timestamp]

        start = buffer["aethercast_encoder:received_input_buffer"]
        usec_per_sec = 1000000

        renderer_end = buffer["aethercast_renderer:finished_frame"]
        rendering_times.append(((renderer_end - start) / usec_per_sec))

        encoder_end = buffer["aethercast_encoder:finished_frame"]
        encoding_times.append(((encoder_end - start) / usec_per_sec))

        packetizer_end = buffer["aethercast_packetizer:packetized_frame"]
        packetizing_times.append(((packetizer_end - start) / usec_per_sec))

        sender_end = buffer["aethercast_sender:sent_packet"]
        sending_times.append(((sender_end - start) / usec_per_sec))

    def dump_statistics(name, data):
        print("%s time max: %f ms min: %f ms mean: %f ms stdev: %f ms" %
              (name, max(data), min(data), statistics.mean(data),
               statistics.stdev(data)))

    dump_statistics("Rendering", rendering_times)
    dump_statistics("Encoding", encoding_times)
    dump_statistics("Packetizing", packetizing_times)
    dump_statistics("Sending", sending_times)
예제 #15
0
def main():

    trace_path = sys.argv[1]

    print(trace_path)

    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    getSomeEventsCSV(trace_path)
예제 #16
0
def load_trace(path):
    """Load the trace located in path.

    Args:
        path (string): Path to the LTTng trace folder.

    Returns:
        babeltrace.TraceCollection: a collection of one trace.
    """
    trace_collection = bt.TraceCollection()
    trace_collection.add_trace(path, 'ctf')
    return trace_collection
예제 #17
0
def afficherDictEvents(trace_path):
    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        if (re.search("net_dev_",event.name)):
            print("\n", event.name, " : ")
            for key in event.keys() :
                    print("\t",key," : ",event[key])
            print("dictEvent output : ",preprocessEventsklearn(event),"\n")
            print("\n\n\n-----------------------------------------------------------------------------------------------------\n\n\n")
예제 #18
0
def read(trace_path) :

    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        if (re.search("syscall_entry_access",event.name) or re.search("syscall_entry_newstat",event.name) or re.search("syscall_entry_execve",event.name)) :
            print("\n", event.name, " : ")
            for key in event.keys() :
                print("\t",key," : ",event[key])
    print("Fin de l'analyse")
def generate_diagram(tracefile):
    # a trace collection holds one to many traces
    col = babeltrace.TraceCollection()

    # add the trace provided by the user
    # (LTTng traces always have the 'ctf' format)
    if col.add_trace(sys.argv[1], 'ctf') is None:
        raise RuntimeError('Cannot add trace')

    base = None
    timeline = []
    active_cpus = set()

    print_line("time", "cpus")

    atlas_threads = set(
        [e['tid'] for e in col.events if e.name == 'atlas_job_submit'])

    for event in col.events:
        if base is None and event.name == 'atlas_job_submit':
            base = event.timestamp
            print_line(0, 0)
            continue

        if base is None:
            continue

        if event.name != 'sched_switch':
            continue

        time = event.timestamp
        cpu = event['cpu_id']

        if event['prev_tid'] in atlas_threads:
            # deselect
            if cpu in active_cpus:
                active_cpus.remove(cpu)

        if event['next_tid'] in atlas_threads:
            # select
            active_cpus.add(event['cpu_id'])

        if event['next_tid'] in atlas_threads or event[
                'prev_tid'] in atlas_threads:
            timeline.append({'time': time, 'num_cpus': len(active_cpus)})

    timeline = [{
        'time': e['time'] - base,
        'num_cpus': e['num_cpus']
    } for e in timeline if e['time'] > base]
    for e in timeline:
        print_line(e['time'], e['num_cpus'])
def test_load_trace():
    traces = babeltrace.TraceCollection()
    ret = traces.add_trace("nosetests/data/traceback/ust/uid/1000/64-bit/",
                           "ctf")
    assert (ret != None)
    ecount = 0
    for event in traces.events:
        ecount += 1
        frame = event['frames'][0]
        assert (type(frame.get('co_name')) == str)
        assert (type(frame.get('co_filename')) == str)
        assert (type(frame.get('lineno')) == int)
    assert (ecount == 1)
예제 #21
0
def load_trace(paths):
    if isinstance(paths, str):
        paths = [paths]
    trace = babeltrace.TraceCollection()
    trace.size = 0
    for path in paths:
        if not os.path.isdir(path):
            raise IOError("Path is not a directory")
        for dir in find(path, "metadata"):
            ret = trace.add_trace(dir, "ctf")
            if ret == None:
                raise IOError("failed to load trace %s" % (repr(path)))
            trace.size += getFolderSize(dir)
    return trace
예제 #22
0
    def __init__(self, path, notifiers, stat_collector):
        self.trace_collection = babeltrace.TraceCollection()
        self.trace = self.trace_collection.add_traces_recursive(path, 'ctf')
        self.begin_ts = self.trace_collection.timestamp_begin
        self.end_ts = self.trace_collection.timestamp_end

        self.stat_collector = weakref.ref(stat_collector)

        state = State()
        self.analysers = [
            SchedAnalyser(notifiers, state),
            SyscallAnalyser(notifiers, state),
            IrqAnalyser(notifiers, state),
        ]
예제 #23
0
def readNet(trace_path) :

    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        # print(event.name)

        if (re.search("net_dev_queue",event.name)):
                print("\n", event.name, " : ")
                for key in event.keys() :
                    print("\t",key," : ",event[key])
    print("Fin de l'analyse")
예제 #24
0
def main():
    if len(sys.argv) != 3:
        print("usage: ./ctf_to_catapult.py trace_directory out.json")
        exit(0)
    dir = sys.argv[1]
    tr = bt.TraceCollection()
    tr.add_trace(dir, 'ctf')
    catapult_objects = list()
    for event in tr.events:
        if event['id'] <= 4:
            catapult_objects.append(event_to_catapult(event))
    catapult = {'displayTimeUnit': 'ns', 'traceEvents': catapult_objects}
    with open(sys.argv[2], "w") as out:
        out.write(json.dumps(catapult))
예제 #25
0
def is_trace_directory(path: str) -> bool:
    """
    Check recursively if a path is a trace directory.

    :param path: the path to check
    :return: `True` if it is a trace directory, `False` otherwise
    """
    path = os.path.expanduser(path)
    if not os.path.isdir(path):
        return False
    tc = babeltrace.TraceCollection()
    # Could still return an empty dict even if it is not a trace directory (recursively)
    traces = tc.add_traces_recursive(path, 'ctf')
    return traces is not None and len(traces) > 0
예제 #26
0
def check_trace_expected_timestamps(trace_paths, expected_timestamps):
    traces = babeltrace.TraceCollection(intersect_mode=True)
    for trace_path in trace_paths:
        trace_handle = traces.add_trace(trace_path, 'ctf')
        if trace_handle is None:
            print('Failed to open trace at {}'.format(trace_path))
            return False
    for event in traces.events:
        expected_timestamp = expected_timestamps.pop(0)
        if event.timestamp != expected_timestamp:
            print('# Unexpected timestamp ({}), expected {}'.format(
                event.timestamp, expected_timestamp))
            return False
    return True
예제 #27
0
def main(args):
    parser = argparse.ArgumentParser(description='Analyzer tool')
    parser.add_argument('--path', )
    parser.add_argument('--output', )
    args = parser.parse_args(args)

    traces = babeltrace.TraceCollection()
    lttng_input = args.path
    if args.output:
        output_path = args.output
    else:
        output_path = "result.json"
    ret = traces.add_trace(lttng_input, "ctf")

    result = OrderedDict()
    for event in traces.events:
        if 'trace_id' not in event:
            continue
        trace_id = event['trace_id']
        span_id = event['span_id']
        parent_span_id = event['parent_span_id']
        #       init new op
        if trace_id not in result:
            result[trace_id] = OrderedDict()
            if parent_span_id not in result[trace_id]:
                result[trace_id][parent_span_id] = OrderedDict()
            result[trace_id][parent_span_id][span_id] = init_zipkin_data(event)
            zipkin_data = result[trace_id][parent_span_id][span_id]
            result[trace_id]["start_timestamp"] = event.timestamp
            result[trace_id]["latency"] = 0
            if 'event' in event:
                zipkin_data["events"][
                    event['event']] = event.timestamp - result[trace_id][
                        "start_timestamp"]
            elif 'key' in event and 'val' in event:
                zipkin_data[event['key']] = event['val']


#       add this tracepoint into a leveled op
        else:
            result[trace_id]["latency"] = event.timestamp - result[trace_id][
                "start_timestamp"]
            init_zipkin_data_by_parent_span_id(
                parent_span_id, span_id, result[trace_id], event,
                result[trace_id]["start_timestamp"])

    with open(output_path, 'w') as f:
        json.dump(result, f, indent=4)
예제 #28
0
def getExecveNet(trace_path) :
    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    for event in trace_collection.events:
        if (re.search("net_dev_queue",event.name)):
            print(preprocessMoreEventsklearn(event))
            yield preprocessMoreEventsklearn(event)

        elif (re.search("execve",event.name)):
            print(preprocessMoreEventsklearn(event))
            yield preprocessMoreEventsklearn(event)

        elif (re.search("execve",event.name)):
            print(preprocessMoreEventsklearn(event))
            yield preprocessMoreEventsklearn(event)
예제 #29
0
def get_pruned_data(file_path, capacity):

    my_deque = deque(maxlen=capacity)
    trace_collection = babeltrace.TraceCollection()

    if trace_collection.add_trace(file_path, 'ctf') is None:
        raise RuntimeError('Cannot add trace')

    for event in trace_collection.events:
        temp_dict = {}
        for key in event:
            temp_dict[key] = event[key]
        temp_dict['event_name'] = event.name
        if (len(my_deque) >= capacity):
            my_deque.popleft()
        my_deque.append(temp_dict)
    return my_deque
예제 #30
0
def readAllEvents(trace_path) :

    trace_collection = babeltrace.TraceCollection()

    trace_handle = trace_collection.add_trace(trace_path, 'ctf')

    return trace_collection

    # for event in trace_collection.events:

    #     if re.search("writeback",event.name) :
    #             print("--")
    #     else:
    #         print(event.name)
    #         for key in event.keys() :
    #             print("\t",key," : ",event[key])
           

    print("Fin de l'analyse")