Пример #1
0
def launch_thread(collect_agent, url, config, qos_metrics, stop_compression,
                  proxy_add, proxy_port):
    binary_path = config['driver']['binary_path']
    binary_type = config['driver']['binary_type']
    try:
        my_driver = init_driver(binary_path, binary_type, stop_compression,
                                proxy_add, proxy_port)
    except Exception as ex:
        message = 'ERROR when initializing the web driver: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        exit(message)
    if my_driver is not None:
        timestamp = int(time.time() * 1000)
        statistics = compute_qos_metrics(my_driver, url, qos_metrics)
        my_driver.quit()
        statistics['compression_savings'] = 1 - (
            statistics['encoded_body_size'] / statistics['decoded_body_size'])
        statistics['overhead'] = statistics['transfer_size'] - statistics[
            'encoded_body_size']
        s = '# Report for web page ' + url + ' #'
        print('\n' + s)
        print_metrics(statistics, config)
        collect_agent.send_stat(timestamp, **statistics, suffix=url)

    else:
        message = 'Sorry, specified driver is not available. For now, only Firefox driver is supported'
        collect_agent.send_log(syslog.LOG_ERR, message)
        exit(message)
Пример #2
0
def send_stats(filename):
    print(filename)
    with open(filename) as statistics:
        try:
            # Parse the first line independently
            # so we can update os.ENVIRON
            statistic = json.loads(next(statistics))
        except StopIteration:
            return  # File was empty

        # Setup os.ENVIRON for register_collect to work properly
        metadata = statistic.pop('_metadata')
        timestamp = metadata['time']
        suffix = metadata.get('suffix')
        for name in ENVIRON_METADATA:
            # This way rstats will be aware and will not locally store the
            # stats again
            if name == 'job_name':
                metadata[name] = 'send_stats-' + str(metadata[name])

            os.environ[name.upper()] = str(metadata[name])

        # Recreate connection with rstats
        success = collect_agent.register_collect(CONF_FILE, new=True)
        if not success:
            message = 'Cannot communicate with rstats'
            collect_agent.send_log(syslog.LOG_ERR, message)
            raise ConnectionError(message)
        collect_agent.send_stat(timestamp, suffix=suffix, **statistic)
        for line in statistics:
            statistic = json.loads(line)
            metadata = statistic.pop('_metadata')
            timestamp = metadata['time']
            suffix = metadata.get('suffix')
            collect_agent.send_stat(timestamp, suffix=suffix, **statistic)
Пример #3
0
def compute_and_send_statistics(packets, to, metrics_interval, suffix,
                                stat_time):
    packets_in_window = list(
        filter(
            lambda pkt: to < float(pkt.sniff_timestamp) * 1000 < to +
            metrics_interval, packets))
    bit_rate, packet_rate = 0.0, 0
    statistics = {'bit_rate': bit_rate, 'packet_rate': packet_rate}
    # Check if it is the last sample for this flow
    if (float(packets[-1].sniff_timestamp) * 1000 <= to + metrics_interval):
        flow_duration = 1000 * (float(packets[-1].sniff_timestamp) -
                                float(packets[0].sniff_timestamp))
        statistics.update({'flow_duration': int(flow_duration)})
    # Cumalative metrics
    cum_packets = list(
        filter(
            lambda pkt: float(pkt.sniff_timestamp) * 1000 < to +
            metrics_interval, packets))
    if cum_packets:
        statistics.update({
            'packets_count': len(cum_packets),
            'bytes_count': packets_length(cum_packets)
        })
        statistics.update({
            'avg_packet_length':
            int(packets_length(cum_packets) / len(cum_packets))
        })
    # Instantanous metrics
    if packets_in_window:
        if metrics_interval > 0:
            total_packets_length = packets_length(packets_in_window)
            bit_rate = (total_packets_length * 8 /
                        1024) * 1000 / metrics_interval
            packet_rate = int(1000 * len(packets_in_window) / metrics_interval)
            statistics.update({'bit_rate': bit_rate})
            statistics.update({'packet_rate': packet_rate})
        delay = sum(
            float(packet.sniff_timestamp) - float(previous.sniff_timestamp)
            for previous, packet in pairwise(packets_in_window))
        avg_inter_packets_delay = delay / (
            len(packets_in_window) - 1) if len(packets_in_window) > 1 else None
        if avg_inter_packets_delay is not None:
            statistics.update({
                'avg_inter_packets_delay':
                int(avg_inter_packets_delay * 1000)
            })

    collect_agent.send_stat(stat_time, suffix=suffix, **statistics)
Пример #4
0
def main(destination_ip, count, interval, interface, packetsize, ttl, n_mean):
    cmd = ['fping', destination_ip, '-e', '-D']
    if count == 0:
        cmd += ['-l']
    else:
        cmd += ['-c', str(count)]
    cmd.extend(command_line_flag_for_argument(interval, '-p'))
    cmd.extend(command_line_flag_for_argument(interface, '-I'))
    cmd.extend(command_line_flag_for_argument(packetsize, '-b'))
    cmd.extend(command_line_flag_for_argument(ttl, '-t'))

    pattern = re.compile(
        r'\[(\d+\.\d+)\] {} : \[\d+\], \d+ bytes, (\d+\.?\d*) '.format(
            destination_ip))
    measurements = []

    # launch command
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    while True:
        # read output
        output = p.stdout.readline().decode().rstrip()
        if not output:
            # this should be the blank line before summary
            if p.poll() is not None:
                # Process ended gracefully
                break
            continue

        match = re.match(pattern, output)
        if match is None:
            message = 'Unrecognised fping output: {}'.format(output)
            collect_agent.send_log(syslog.LOG_WARNING, message)
            continue

        try:
            timestamp, rtt_data = map(float, match.groups())
        except ValueError as exception:
            message = 'ERROR on line \'{}\': {}'.format(output, exception)
            collect_agent.send_log(syslog.LOG_ERR, message)
        else:
            measurements.append(rtt_data)
            if len(measurements) == n_mean:
                collect_agent.send_stat(int(timestamp * 1000),
                                        rtt=mean(measurements))
                measurements.clear()
Пример #5
0
def monitor(chain, mutex, previous):
    # Refresh the table (allowing to update the stats)
    table = iptc.Table(iptc.Table.FILTER)
    table.refresh()

    # Get the rule (Attention, the rule shall be in first position)
    rule = chain.rules[0]

    # Get the stats
    now = int(time.time() * 1000)
    timestamp = int(time.perf_counter() * 1000)
    bytes_count = rule.get_counters()[1]

    # Get previous stats and update them
    with mutex:
        previous_timestamp, previous_bytes_count = previous
        previous[:] = timestamp, bytes_count

    diff_timestamp = (timestamp - previous_timestamp) / 1000  # in seconds
    rate = (bytes_count - previous_bytes_count) * 8 / diff_timestamp

    # Send the stat to the Collector
    collect_agent.send_stat(now, rate=rate)
Пример #6
0
def one_file(capture_file, src_ip, dst_ip, src_port, dst_port, proto,
             metrics_interval):
    """Analyze packets from pcap file located at capture_file and comptute statistics.
    Only consider packets matching the specified fields.
    """
    display_filter = build_display_filter(src_ip, dst_ip, src_port, dst_port,
                                          proto)
    To = now()
    try:
        with closing(
                pyshark.FileCapture(
                    capture_file, display_filter=display_filter)) as cap_file:
            flow_id_funct = lambda pkt: (pkt.ip.src, pkt[
                pkt.transport_layer].srcport, pkt.ip.dst, pkt[
                    pkt.transport_layer].dstport, pkt.transport_layer)
            packets = [
                packet for packet in cap_file if 'IP' in str(packet.layers)
                and packet.transport_layer is not None
            ]
            key_funct = lambda pkt: pkt.sniff_timestamp
            if (packets):
                grouped_packets = sort_and_group(packets, key=flow_id_funct)
                flow_id_to_flow = dict((flow_id, sorted(flow, key=key_funct))
                                       for flow_id, flow in grouped_packets)
                all_flows = list()
                to = float(packets[0].sniff_timestamp) * 1000
                samples_count = 1

                while to < float(packets[-1].sniff_timestamp) * 1000:
                    time = to + metrics_interval
                    ids_of_new_flows_at_time = [
                        x[0] for x in list(
                            filter(
                                lambda item:
                                (item[0] not in all_flows and float(item[1][
                                    0].sniff_timestamp) * 1000 < time),
                                flow_id_to_flow.items()))
                    ]
                    all_flows.extend(ids_of_new_flows_at_time)

                    flows_count = 0
                    total_flows_count = 0
                    flow_number = 1
                    total_flow_duration = 0
                    for flow_id in all_flows:
                        flow = flow_id_to_flow[flow_id]
                        stat_time = To + samples_count * metrics_interval
                        # If the flow always exists
                        if (float(flow[-1].sniff_timestamp) * 1000 > to):
                            compute_and_send_statistics(
                                flow, to, metrics_interval,
                                suffix(flow_number), stat_time)
                            flow_duration = 1000 * (
                                float(flow[-1].sniff_timestamp) -
                                float(flow[0].sniff_timestamp))
                            flows_count += 1
                        flow_number += 1
                        total_flow_duration += 1000 * (
                            float(flow[-1].sniff_timestamp) -
                            float(flow[0].sniff_timestamp))
                        total_flows_count += 1

                    statistics = {'flows_count': flows_count}
                    # Check if it the last sample
                    if total_flows_count > 0 and float(
                            packets[-1].sniff_timestamp) * 1000 <= time:
                        statistics.update({
                            'avg_flow_duration':
                            int(total_flow_duration / total_flows_count)
                        })
                        statistics.update({
                            'total_packets':
                            len(packets),
                            'total_bytes':
                            packets_length(packets)
                        })
                    collect_agent.send_stat(stat_time, **statistics)
                    samples_count += 1
                    to = time

    except Exception as ex:
        message = 'ERROR when analyzing: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    sys.exit(0)  # Explicitly exit properly. This is required by pyshark module
Пример #7
0
def gilbert_elliot(capture_file, second_capture_file, src_ip, dst_ip, src_port,
                   dst_port, proto):
    display_filter = build_display_filter(src_ip, dst_ip, src_port, dst_port,
                                          proto)

    try:
        with closing(
                pyshark.FileCapture(
                    capture_file,
                    display_filter=display_filter)) as cap_file_sent:
            cap_file_sent_iter = iter(cap_file_sent)
            goods = []
            bads = []
            with closing(
                    pyshark.FileCapture(
                        second_capture_file,
                        display_filter=display_filter)) as cap_file_received:
                try:
                    current_packet_sent = get_next_packet(cap_file_sent_iter)
                    total_good = 0
                    total_bad = 0
                    for packet in cap_file_received:
                        if not ('IP' in str(packet.layers)
                                and packet.transport_layer is not None):
                            continue
                        if packet.ip.id == current_packet_sent:
                            total_good += 1
                            if total_bad:
                                bads.append(total_bad)
                                total_bad = 0
                        while packet.ip.id != current_packet_sent:
                            if total_good:
                                goods.append(total_good)
                                total_good = 0
                            total_bad += 1
                            current_packet_sent = get_next_packet(
                                cap_file_sent_iter)
                        current_packet_sent = get_next_packet(
                            cap_file_sent_iter)
                except StopIteration:
                    pass
                if total_good:
                    goods.append(total_good)
                if total_bad:
                    bads.append(total_bad)

        statistics = {
            'gilbert_elliot_sent': sum(goods) + sum(bads),
            'gilbert_elliot_received': sum(goods)
        }
        if goods or bads:
            statistics['gilbert_elliot_lost_rate'] = sum(bads) / (sum(goods) +
                                                                  sum(bads))

        if goods:
            g = sum(goods) / len(
                goods)  # average number of steps when we stay in good state
            statistics['gilbert_elliot_p'] = 1 / g
        else:
            collect_agent.send_log(
                syslog.LOG_WARNING,
                "Cannot compute p parameter. Maybe the capture files are too short."
            )

        if bads:
            b = sum(bads) / len(
                bads)  # average number of steps when we stay in bad state
            statistics['gilbert_elliot_r'] = 1 / b
        else:
            collect_agent.send_log(
                syslog.LOG_WARNING,
                "Cannot compute r parameter. Maybe the capture files are too short."
            )

        collect_agent.send_stat(now(), **statistics)

    except Exception as ex:
        message = 'ERROR when analyzing: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    sys.exit(0)  # Explicitly exit properly. This is required by pyshark module
Пример #8
0
def main(target_address,
         log_address,
         dest_path,
         granularity,
         traffic_type='UDP',
         port=8999,
         signal_port=9000,
         packet_size=512,
         packet_rate=1000,
         bandwidth='0',
         duration=10,
         data_size='0',
         meter='owdm'):

    # Clean previous log and set up the D-ITG LogServer*
    if os.path.isfile('/tmp/ITGRecv.log'):
        os.remove('/tmp/ITGRecv.log')
    if os.path.isfile('/tmp/ITGSend.log'):
        os.remove('/tmp/ITGSend.log')

    proc_log = run_command('ITGLog', 'Popen')

    # Get the reference time for changing the stats generated by D-ITG
    time_ref = int(round(time.time() * 1000))

    #Set packet_rate depending on bandwidth parameter
    if bandwidth != '0':
        if 'K' == 'K' in bandwidth:
            packet_rate = 1000 * int(bandwidth.replace('K', ''))
        elif 'M' == 'M' in bandwidth:
            packet_rate = 1000 * 1000 * int(bandwidth.replace('M', ''))
        elif 'G' == 'G' in bandwidth:
            packet_rate = 1000 * 1000 * 1000 * int(bandwidth.replace('G', ''))
        else:
            packet_rate = int(bandwidth)
        packet_rate = packet_rate / (8 * packet_size)

    # Build and launch the D-ITGSend command
    cmd_send = [
        'ITGSend', '-a', target_address, '-L', log_address, '-X', log_address,
        '-T', traffic_type, '-c',
        str(packet_size), '-C',
        str(packet_rate), '-t',
        str(duration * 1000), '-m', meter, '-Sdp',
        str(signal_port), '-Ssp',
        str(signal_port), '-rp',
        str(port)
    ]

    #Set number of KBytes to generate
    if data_size != '0':
        if 'M' == 'M' in data_size:
            data_size_f = 1024 * float(data_size.replace('M', ''))
        elif 'G' == 'G' in data_size:
            data_size_f = 1024 * 1024 * float(data_size.replace('G', ''))
        else:
            data_size_f = float(data_size.replace('K', ''))
        cmd_send.extend(['-k', str(data_size_f)])

    run_command(cmd_send)

    # Terminate the process of the D-ITG LogServer
    proc_log.terminate()

    # Clear potential old stats
    if os.path.isfile(os.path.join(dest_path, 'RCV')):
        os.remove(os.path.join(dest_path, 'RCV'))
    if os.path.isfile(os.path.join(dest_path, 'SND')):
        os.remove(os.path.join(dest_path, 'SND'))

    # Get the stats from the logs
    cmd_rcv_cb = [
        'ITGDec', '/tmp/ITGRecv.log', '-c',
        str(granularity),
        os.path.join(dest_path, 'RCV')
    ]
    run_command(cmd_rcv_cb)
    cmd_snd_cb = [
        'ITGDec', '/tmp/ITGSend.log', '-c',
        str(granularity),
        os.path.join(dest_path, 'SND')
    ]
    run_command(cmd_snd_cb)

    # Send the stats of the receiver to the collector
    path_RCV = os.path.join(dest_path, 'RCV')

    try:
        stats = open(path_RCV, "r")
    except Exception as ex:
        message = 'Error opening file {} : {}'.format(path_RCV, ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    owd_r = []
    with stats:
        for line in stats:
            txt = line.strip()
            txt = txt.split(' ')

            # Get the timestamp (in ms)
            timestamp = txt[0].replace('.', '')
            timestamp = int(timestamp[:-3])
            timestamp = timestamp + time_ref

            # Get the bitrate (in bps)
            bitrate = txt[1]
            bitrate = float(bitrate) * 1024
            statistics = {'bitrate_receiver': bitrate}
            collect_agent.send_stat(timestamp, **statistics)

            # Get the delay (in ms)
            delay = txt[2]
            delay = float(delay) * 1000
            owd_r.append(delay)
            statistics = {'owd_receiver': delay}
            collect_agent.send_stat(timestamp, **statistics)

            # Get the jitter (in ms)
            jitter = txt[3]
            jitter = float(jitter) * 1000
            statistics = {'jitter_receiver': jitter}
            collect_agent.send_stat(timestamp, **statistics)

            # Get the packetloss
            pck_loss = txt[4]
            pck_loss = float(pck_loss)
            statistics = {'packetloss_receiver': pck_loss}
            collect_agent.send_stat(timestamp, **statistics)

            # Calculate packet_loss_rate
            pck_loss_per_sec = pck_loss * 1000 / granularity
            plr = (pck_loss_per_sec / packet_rate) * 100
            statistics = {'packetloss_rate_receiver': plr}
            collect_agent.send_stat(timestamp, **statistics)

    # Send the stats of the sender to the collector
    path_SND = os.path.join(dest_path, 'SND')

    try:
        stats = open(path_SND, "r")
    except Exception as ex:
        message = 'Error opening file {} : {}'.format(path_SND, ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    owd_s = []
    timetab = []

    with stats:
        for line in stats:
            txt = line.strip()
            txt = txt.split(' ')

            # Get the timestamp (in ms)
            timestamp = txt[0].replace('.', '')
            timestamp = int(timestamp[:-3])
            timestamp = timestamp + time_ref

            # Get the bitrate (in bps)
            bitrate = txt[1]
            bitrate = float(bitrate) * 1024
            statistics = {'bitrate_sender': bitrate}
            collect_agent.send_stat(timestamp, **statistics)

            if meter.upper() == "RTTM":
                # Get the delay (in ms)
                delay = txt[2]
                delay = float(delay) * 1000
                owd_s.append(delay)
                timetab.append(timestamp)
                statistics = {'rtt_sender': delay}
                collect_agent.send_stat(timestamp, **statistics)

                # Get the jitter (in ms)
                jitter = txt[3]
                jitter = float(jitter) * 1000
                statistics = {'jitter_sender': jitter}
                collect_agent.send_stat(timestamp, **statistics)

                # Get the packetloss
                pck_loss = txt[4]
                pck_loss = float(pck_loss)
                statistics = {'packetloss_sender': pck_loss}
                collect_agent.send_stat(timestamp, **statistics)

                # Calculate packet_loss_rate
                pck_loss_per_sec = pck_loss * 1000 / granularity
                plr = (pck_loss_per_sec / packet_rate) * 100
                statistics = {'packetloss_rate_sender': plr}
                collect_agent.send_stat(timestamp, **statistics)

    if meter.upper() == 'RTTM':
        for time_tab, owdr, owds in zip(timetab, owd_r, owd_s):
            owd_return = owds - owdr
            statistics = {'owd_return': owd_return}
            collect_agent.send_stat(time_tab, **statistics)
Пример #9
0
def receiver(cmd):
    p = run_process(cmd)
    flow_map = defaultdict(AutoIncrementFlowNumber())

    first_line = {}
    total_sent_data = {}

    for flow_number in repeat(None):
        line = p.stdout.readline().decode()
        tokens = BRACKETS.sub('', line).split()
        if not tokens:
            if p.poll() is not None:
                break
            continue

        timestamp = int(time.time() * 1000)
        try:
            try:
                # check if it is a line with total download time
                if len(tokens) < 2:
                    continue
                flow = tokens[0]
                interval_begin, interval_end = map(float, tokens[1].split("-"))
                try:
                    flow_number = flow_map[int(flow)]
                except ValueError:
                    if flow.upper() != "SUM":
                        continue
                if interval_begin == 0 and flow in first_line:
                    statistics = {'download_time': interval_end}
                    collect_agent.send_stat(timestamp,
                                            suffix=flow_number,
                                            **statistics)
                    del first_line[flow]
                    continue

                # otherwise test if TCP or UDP traffic
                flow, duration, _, transfer, transfer_units, bandwidth, bandwidth_units, jitter, jitter_units, packets_stats, datagrams = tokens
                jitter = float(jitter)
                datagrams = float(datagrams[1:-2])
                lost, total = map(int, packets_stats.split('/'))
            except ValueError:
                udp = False
                flow, duration, _, transfer, transfer_units, bandwidth, bandwidth_units = tokens
            else:
                udp = True
            transfer = float(transfer)
            bandwidth = float(bandwidth)
            interval_begin, interval_end = map(float, duration.split('-'))
        except ValueError:
            # filter out non-stats lines
            continue

        try:
            flow_number = flow_map[int(flow)]
        except ValueError:
            if flow.upper() != "SUM":
                continue

        first_line[flow] = True
        if flow not in total_sent_data:
            total_sent_data[flow] = 0
        total_sent_data[flow] += transfer * multiplier(transfer_units, 'Bytes')

        statistics = {
            'sent_data': total_sent_data[flow],
            'throughput': bandwidth * multiplier(bandwidth_units, 'bits/sec'),
        }
        if udp:
            statistics['jitter'] = jitter * multiplier(jitter_units, 's')
            statistics['lost_pkts'] = lost
            statistics['sent_pkts'] = total
            statistics['plr'] = datagrams
        collect_agent.send_stat(timestamp, suffix=flow_number, **statistics)

    error_log = p.stderr.readline()
    if error_log:
        error_msg = 'Error when launching iperf3: {}'.format(error_log)
        collect_agent.send_log(syslog.LOG_ERR, error_msg)
        sys.exit(error_msg)
    p.wait()
Пример #10
0
def main(path, port, interval, readonly):
    # Build stat names
    stats_list = [
        'cwnd_monitoring',
        'ssthresh_monitoring',
        'sndwnd_monitoring',
        'rtt_monitoring',
        'rcvwnd_monitoring',
    ]

    collect_agent.send_log(
        syslog.LOG_DEBUG, 'DEBUG: the following stats have been '
        'built --> {}'.format(stats_list))

    ## if in monitoring mode (listening on port(s)
    if not readonly:
        # Unload existing tcp_probe job and/or module (if exists)
        cmd = 'PID=`cat /var/run/tcpprobe_monitoring.pid`; kill -TERM $PID; rm '
        cmd += '/var/run/tcpprobe_monitoring.pid'
        try:
            os.system(cmd)
        except Exception as exe_error:
            collect_agent.send_log(
                syslog.LOG_DEBUG,
                'No previous tcp_probe job to kill before launching the job: %s'
                % exe_error)
            exit('No previous tcp_probe job to kill before launching the job')

        cmd = 'rmmod tcp_probe > /dev/null 2>&1'
        try:
            os.system(cmd)
        except Exception as exe_error:
            collect_agent.send_log(
                syslog.LOG_ERROR,
                'Existing tcp_probe cannot be unloaded: %s' % exe_error)

        # Monitoring setup
        cmd = ('modprobe tcp_probe port={}'
               ' full=1 > /dev/null 2>&1'.format(port))

        # The reference time
        init_time = int(time.time() * 1000)
        initTime_file = open('/tmp/tcpprobe_initTime.txt', 'w')
        initTime_file.write(str(init_time))
        initTime_file.close()

        try:
            os.system(cmd)
        except Exception as exe_error:
            collect_agent.send_log(
                syslog.LOG_ERROR,
                'tcp_probe cannot be executed: %s' % exe_error)
            exit('tcp_probe cannot be executed')

        cmd = 'chmod 444 /proc/net/tcpprobe'
        os.system(cmd)
        cmd = 'PID=`cat /proc/net/tcpprobe > ' + path + ' & echo $!`; echo $PID >'
        cmd += ' /var/run/tcpprobe_monitoring.pid'
        os.system(cmd)
    #if readonly check if the file is pre-existing
    elif not os.path.isfile(path):
        message = "file from argument 'path' does not exist, can not readonly a non existing file"
        collect_agent.send_log(syslog.LOG_ERR, message)
        exit(message)

    collect_agent.send_log(syslog.LOG_DEBUG, "Finished setting up probe")

    ## if monitoring or reading only one port
    ## when listening to all ports, do not send stats
    for i, row in enumerate(watch(path)):
        if i % interval == 0:
            data = row.split()
            if len(data) == 11 and port != 0:
                timestamp = data[0].strip('\x00')
                timestamp_sec, timestamp_nsec = timestamp.split('.', 1)
                initTime_file = open('/tmp/tcpprobe_initTime.txt', 'r')
                init_time = int(initTime_file.read())
                initTime_file.close()
                timestamp_real = init_time + int(timestamp_sec) * 1000 + int(
                    timestamp_nsec[:3])
                try:
                    # if the port in the monitoring file is the port
                    # we want to monitor
                    if str(data[2].split(":")[1]) == str(port):
                        collect_agent.send_stat(
                            timestamp_real,
                            cwnd_monitoring=data[6],
                            ssthresh_monitoring=data[7],
                            sndwnd_monitoring=data[8],
                            rtt_monitoring=data[9],
                            rcvwnd_monitoring=data[10],
                        )
                except Exception as connection_err:
                    message = 'ERROR: {}'.format(connection_err)
                    collect_agent.send_log(syslog.LOG_ERR, message)
                    exit(message)