示例#1
0
def _merge_sysouts(exp_dir):
    """Merges sys.out (sysctl) files into a single file.

    The format of the new file is:

        Sender 0:
        /sys/...=...

        Sender 1:
        /sys/...=...

    Args:
        exp_dir: The experiment's output directory.
    """
    merged_file = open(os.path.join(exp_dir, 'sys.out'), 'w')
    merged_file.write('Module Params\n')
    merged_file.write('=============\n')
    for d, f in sorted(all_files(exp_dir, name='mod.out')):
        if d == exp_dir:
            continue

        sender_id = d[len(exp_dir) + 1:]
        mod_f = open(os.path.join(d, f))
        lines = mod_f.readlines()
        merged_file.write('Sender %s\n' % sender_id)
        merged_file.write('---------\n')
        merged_file.writelines(lines)
        merged_file.write('\n')
        mod_f.close()

    merged_file.write('Sysctl Params\n')
    merged_file.write('=============\n')
    for d, f in sorted(all_files(exp_dir, name='sys.out')):
        if d == exp_dir:
            continue

        sender_id = d[len(exp_dir) + 1:]
        sys_f = open(os.path.join(d, f))
        lines = sys_f.readlines()
        merged_file.write('Sender %s\n' % sender_id)
        merged_file.write('---------\n')
        merged_file.writelines(lines)
        merged_file.write('\n\n')
        sys_f.close()

    merged_file.close()
示例#2
0
def _merge_pcaps(exp_dir):
    """Merges all the pcaps in the experiment directory."""
    pcaps = {}
    for d, f in all_files(exp_dir, regex=r'.*\.pcap$'):
        if d == exp_dir:
            continue
        if f not in pcaps:
            pcaps[f] = []
        pcaps[f].append(os.path.join(d, f))
    procs = []
    for f in pcaps:
        procs.append(
            shell.bg('mergecap -F pcap -w %s %s' %
                     (os.path.join(exp_dir, 'all.' + f), ' '.join(pcaps[f]))))
    for p in procs:
        shell.wait(p)
示例#3
0
def gen_xplots(data_dir):
    """Generates xplots for all the experiments in the data directory."""
    for _, _, _, _, exp_dir in cfgutil.exps(data_dir):
        xpl_paths = []
        conn_info = outparser.ConnInfo([
            os.path.join(d, f) for d, f in all_files(exp_dir, name='conn.info')
        ])
        rcv_ip = outparser.RecvInfo(os.path.join(exp_dir, 'R', 'recv.info')).ip
        ports = conn_info.ports()
        all_lines = []
        procs = []
        for d, f in all_files(exp_dir, regex=r'.*\.pcap$'):
            if d == exp_dir:
                continue
            procs.append(
                shell.bg('tcptrace -CRSzxy --output_dir="%s" "%s"' %
                         (d, os.path.join(d, f))))
        for p in procs:
            shell.wait(p)

        for d, f in all_files(exp_dir, regex=r'.*\.pcap$'):
            for xd, xf in all_files(d, regex=r'.*\.xpl$'):
                # Only process time sequence graphs.
                if xf.find('_tsg') == -1:
                    continue

                xplf = open(os.path.join(xd, xf))
                lines = xplf.readlines()

                # The first 3 lines in the xplot are for the title.
                # The last line is the draw command. The rest (3:-1)
                # is data. We save the rest in all_lines in order to
                # create one xplot that contains the time seqeuence
                # graphs for all flows.
                all_lines += lines[3:-1]

                # Parse the ip and port from the xplot's title. Note that the
                # addresses may be either IPv4 or IPv6.
                parts = lines[2].split('_==>_')[0].split(':')
                ip_base = ':'.join(parts[:-1])
                port = int(parts[-1])
                try:
                    ip = socket.getaddrinfo(ip_base, 0, socket.AF_INET,
                                            socket.SOCK_STREAM,
                                            socket.IPPROTO_TCP)[0][4][0]
                except socket.gaierror:
                    ip = socket.getaddrinfo(ip_base, 0, socket.AF_INET6,
                                            socket.SOCK_STREAM,
                                            socket.IPPROTO_TCP)[0][4][0]

                # If the ip and port are not from this experiment ignore this
                # file.
                if ip == rcv_ip or port not in ports:
                    continue

                # Rewrite the title of the explot as:
                #   ==> CC -- IP:PORT
                addr, _, cc, _, _, _, _ = conn_info.conn_info(port)
                lines[2] = '==>%s -- %s:%s\n' % (cc, addr, port)

                # Save the file.
                xpath = os.path.join(xd, 'out-%s.xpl' % port)
                xpl_paths.append(xpath)
                oxplf = open(xpath, 'w')
                oxplf.writelines(lines)
                oxplf.close()

        # Prepend the title to all_lines and append the draw command (ie, go).
        all_lines = (['dtime signed\n', 'title\n', '===> All flows\n'] +
                     all_lines + ['go'])
        axpath = os.path.join(exp_dir, 'out-all.xpl')
        xpl_paths.append(axpath)
        axplf = open(axpath, 'w')
        axplf.writelines(all_lines)
        axplf.close()

        shell.run('tar -C %s -cvjf %s %s' %
                  (exp_dir, os.path.join(exp_dir, 'xplots.tbz2'), ' '.join(
                      [os.path.relpath(p, exp_dir) for p in xpl_paths])))
示例#4
0
def gen_exp(exp, exp_dir, has_xplot=False, skip_pcap_scan=False):
    """Generates all the pages for the experiment.

    Args:
        exp: The experiment object.
        exp_dir: The experiment's output directory.
        has_xplot: Whether the xplot is generated for the experiment.
        skip_pcap_scan: Whether to skip pcap scan.

    Returns:
        The tuple of (metrics, test case errors).
    """
    visitors = [
        # Order is important here. Keep MetricPublishers at the head of the list
        # and non-publisher Visitors at the end, so that metrics are published
        # before the visitors are ended.
        KlogMetricsPublisher(),
        RetxRateMetricPublisher(),
        ConvergenceMetricPublisher(),
        RTTMetricPublisher(),
        SerialDelayMetricPublisher(),
        TputMetricPublisher(),
        AppLatencyMetricPublisher(),
        UtilMetricAndPageGenerator(),
        TimeSeqPageGenerator(),
        SummaryPageGenerator(has_xplot),
        KlogPageGenerator(),
        DashboardPageGenerator(),
        KlogCompressor(),
    ]

    _dump_js_files(exp_dir)
    _merge_pcaps(exp_dir)
    _merge_sysouts(exp_dir)

    rcv_ip = outparser.RecvInfo(os.path.join(exp_dir, 'R', 'recv.info')).ip
    conn_info = outparser.ConnInfo(
        [os.path.join(d, f) for d, f in all_files(exp_dir, name='conn.info')])

    pcaps = []
    for i in range(exp.nsenders()):
        snd_dir = os.path.join(exp_dir, str(i))
        snd_pcaps = [
            os.path.join(d, f)
            for d, f in all_files(snd_dir, regex=r'.*\.pcap$')
        ]
        # If the machine has eth1 or eth2 interfaces, we have a bonding/slave
        # config. Otherwise, we have one physical interface that are not
        # bonded. In the former case, we use the pcap of the slaves and for the
        # latter we use pcaps from the physical interface eth0.
        is_bonded = len([
            f for f in snd_pcaps
            if f.endswith('eth1.pcap') or f.endswith('eth2.pcap')
        ])
        if not is_bonded:
            pcaps += snd_pcaps
        else:
            pcaps += [f for f in snd_pcaps if not f.endswith('eth0.pcap')]
    pcap_parser = outparser.Pcap(pcaps)
    klogs = [
        os.path.join(d, f)
        for d, f in all_files(exp_dir, name='kern-debug.log')
    ]
    klog_parser = outparser.KernLog(klogs)

    for visitor in visitors:
        visitor.begin(exp, exp_dir, rcv_ip)

    for port in conn_info.ports():
        ip, tool, cc, start, dur, tput, params = conn_info.conn_info(port)
        for visitor in visitors:
            visitor.visit_conn(ip, port, tool, cc, params, start, dur, tput)

    start_times = {}
    if not skip_pcap_scan:
        exp_start = False
        exp_start_time = 0
        for time, packet in pcap_parser.packets():
            if IP not in packet and IPv6 not in packet:
                continue
            if IPv6 not in packet:
                ip = packet[IP]
            else:
                ip = packet[IPv6]

            if TCP in packet:
                l4_hdr = packet[TCP]
            elif UDP in packet:
                l4_hdr = packet[UDP]
            else:
                continue
            port = l4_hdr.dport if ip.src == rcv_ip else l4_hdr.sport

            # Whether this is SYN sent by sender or not
            sender_syn = ip.src != rcv_ip and TCP in packet \
                         and (l4_hdr.flags&0x2)

            # Process pkt only if experiment has started (from sender
            # perspective) i.e. SYN packet sent by atleast one sender
            if not exp_start:
                if not sender_syn:
                    continue
                exp_start_time = time
                exp_start = True

            # Adjust time relative to start of the experiment
            time -= exp_start_time

            # We need to store the port start time for adjusting the klog times.
            if port not in start_times and sender_syn:
                start_times[port] = time

            for visitor in visitors:
                visitor.visit_packet(time, packet)
    else:
        ss_logs = []
        for i in range(exp.nsenders()):
            ss_log = os.path.join(exp_dir, str(i))
            ss_log = os.path.join(ss_log, 'ss.log')
            if os.path.exists(ss_log):
                ss_logs.append(ss_log)
        sslog_parser = outparser.SsLog(ss_logs)
        for time, data in sslog_parser.entries():
            if 'port' in data:
                port = data['port']
                if port not in start_times:
                    start_times[port] = time
            for visitor in visitors:
                visitor.visit_ss_log(time, data)

    for time, line, match in klog_parser.lines():
        # Kernel log times are relative to the kernel log entries. We need
        # to add the start times based on pcap data in order to get a timestamp
        # that is relative to the beginning of the experiment. Thus, we use
        # "time + start_time" instead of time.
        port = int(match['port'])
        start_time = start_times[port] if port in start_times else 0
        for visitor in visitors:
            visitor.visit_klog(time + start_time, line, match)

    metrics = {}
    for visitor in visitors:
        for mt in metrics.values():
            visitor.visit_metric(mt)

        visitor.end()

        if isinstance(visitor, metric.MetricPublisher):
            for mt in visitor.publish_metrics():
                metrics[mt.name()] = mt

    _dump_metrics(exp_dir, metrics)
    _log_metrics(exp, metrics)

    case = TestCase()
    errs = []
    try:
        exp.check(exp, metrics, case)
    except Exception, e:
        errs.append(str(e))