Exemple #1
0
def compute_qos_metrics(driver, url_to_fetch, qos_metrics):
    """
    Having retrieved the web page, this method computes QoS metrics by executing their associated javascript scripts.
    Args:
        driver(WebDriver): an initialized Selenium WebDriver.
        url_to_fetch(str): the url address to retrieve prior to compute the different metrics.
        qos_metrics(dict(str,str)): a dictionary where keys are metric names and values are javascript methods.
    Returns: 
        results(dict(str,object)): a dictionary containing the different metrics/values.
    """
    results = dict()
    try:
        driver.get(url_to_fetch)
        for key, value in qos_metrics.items():
            results[key] = driver.execute_script(value)
        for request in driver.requests:
            if request.url in (url_to_fetch, url_to_fetch + '/'):
                results['status_code'] = request.response.status_code
                if results['status_code'] == 404:
                    message = 'Warning : Fetched url {} returned response code 404 (Not Found)'.format(
                        url_to_fetch)
                    collect_agent.send_log(syslog.LOG_WARNING, message)
                    print(message)
                break
    except WebDriverException as ErrorMessage:
        message = 'ERROR when getting url: {}'.format(ErrorMessage)
        print(message)
        collect_agent.send_log(syslog.LOG_ERR, message)
        driver.quit()
        exit(message)
    return results
Exemple #2
0
def launch_thread(collect_agent, url, config, qos_metrics, stop_compression,
                  proxy_add, proxy_port):
    binary_path = config['driver']['binary_path']
    binary_type = config['driver']['binary_type']
    try:
        my_driver = init_driver(binary_path, binary_type, stop_compression,
                                proxy_add, proxy_port)
    except Exception as ex:
        message = 'ERROR when initializing the web driver: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        exit(message)
    if my_driver is not None:
        timestamp = int(time.time() * 1000)
        statistics = compute_qos_metrics(my_driver, url, qos_metrics)
        my_driver.quit()
        statistics['compression_savings'] = 1 - (
            statistics['encoded_body_size'] / statistics['decoded_body_size'])
        statistics['overhead'] = statistics['transfer_size'] - statistics[
            'encoded_body_size']
        s = '# Report for web page ' + url + ' #'
        print('\n' + s)
        print_metrics(statistics, config)
        collect_agent.send_stat(timestamp, **statistics, suffix=url)

    else:
        message = 'Sorry, specified driver is not available. For now, only Firefox driver is supported'
        collect_agent.send_log(syslog.LOG_ERR, message)
        exit(message)
Exemple #3
0
def main(sampling_interval,
         chain_name,
         source_ip=None,
         destination_ip=None,
         protocol=None,
         in_interface=None,
         out_interface=None,
         dport=None,
         sport=None):
    table = iptc.Table(iptc.Table.FILTER)
    chains = [chain for chain in table.chains if chain.name == chain_name]
    try:
        chain, = chains
    except ValueError:
        message = 'ERROR: {} does not exist in FILTER table'.format(chain_name)
        collect_agent.send_log(syslog.LOG_ERR, message)
        exit(message)

    # Creation of the Rule
    rule = iptc.Rule(chain=chain)
    signal.signal(signal.SIGTERM, partial(signal_term_handler, chain, rule))

    # Add Matchs
    if source_ip is not None:
        rule.src = source_ip
    if destination_ip is not None:
        rule.dst = destination_ip
    if protocol is not None:
        rule.protocol = protocol
    if in_interface is not None:
        rule.in_interface = in_interface
    if out_interface is not None:
        rule.out_interface = out_interface
    if sport is not None:
        match = iptc.Match(rule, protocol)
        match.sport = sport
        rule.add_match(match)
    if dport is not None:
        match = iptc.Match(rule, protocol)
        match.dport = dport
        rule.add_match(match)

    # Add the Target
    rule.create_target('')
    chain.insert_rule(rule)

    collect_agent.send_log(syslog.LOG_DEBUG,
                           "Added iptables rule for monitoring")

    # Save the first stats for computing the rate
    mutex = threading.Lock()
    previous = [int(time.perf_counter() * 1000), rule.get_counters()[1]]

    # Monitoring
    sched = BlockingScheduler()
    sched.add_job(monitor,
                  'interval',
                  seconds=sampling_interval,
                  args=(chain, mutex, previous))
    sched.start()
Exemple #4
0
def send_stats(filename):
    print(filename)
    with open(filename) as statistics:
        try:
            # Parse the first line independently
            # so we can update os.ENVIRON
            statistic = json.loads(next(statistics))
        except StopIteration:
            return  # File was empty

        # Setup os.ENVIRON for register_collect to work properly
        metadata = statistic.pop('_metadata')
        timestamp = metadata['time']
        suffix = metadata.get('suffix')
        for name in ENVIRON_METADATA:
            # This way rstats will be aware and will not locally store the
            # stats again
            if name == 'job_name':
                metadata[name] = 'send_stats-' + str(metadata[name])

            os.environ[name.upper()] = str(metadata[name])

        # Recreate connection with rstats
        success = collect_agent.register_collect(CONF_FILE, new=True)
        if not success:
            message = 'Cannot communicate with rstats'
            collect_agent.send_log(syslog.LOG_ERR, message)
            raise ConnectionError(message)
        collect_agent.send_stat(timestamp, suffix=suffix, **statistic)
        for line in statistics:
            statistic = json.loads(line)
            metadata = statistic.pop('_metadata')
            timestamp = metadata['time']
            suffix = metadata.get('suffix')
            collect_agent.send_stat(timestamp, suffix=suffix, **statistic)
Exemple #5
0
def check_timeout(start_time, timeout):
    elapsed = perf_counter() - start_time
    if elapsed >= timeout:
        message = 'Timeout: the agent is taking too long to synchronize '
        'or the offset is too demanding. Try increasing the timeout or '
        'the offset for this job.'
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)
Exemple #6
0
def run_command(cmd):
    p = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
    if p.returncode:
        message = "Error when executing command '{}': '{}'".format(
            ' '.join(cmd), p.stderr.decode())
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)
    return p.returncode, p.stdout.decode()
Exemple #7
0
def run_process(cmd):
    try:
        p = subprocess.Popen(cmd,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    except Exception as ex:
        message = 'Error running {} : {}'.format(cmd, ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    return p
Exemple #8
0
def run_command(cmd, mode='run'):
    try:
        if mode == 'Popen':
            p = subprocess.Popen(cmd, stderr=subprocess.PIPE)
        else:
            p = subprocess.run(cmd, stderr=subprocess.PIPE)
    except Exception as ex:
        message = 'Error running {} : {}'.format(cmd, ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    return p
Exemple #9
0
def main(log_buffer_size, signal_port=9000):
    if log_buffer_size:
        cmd = ['ITGRecv', '-Sp', str(signal_port), '-q', str(log_buffer_size)]
    else:
        cmd = ['ITGRecv', '-Sp', str(signal_port)]

    try:
        subprocess.run(cmd, stderr=subprocess.PIPE)
    except Exception as ex:
        message = 'Error running {} : {}'.format(cmd, ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)
Exemple #10
0
def main(operation, iface, address_mask=None):
    command = ['ip', 'address', operation]
    if address_mask is not None:
        command.append(str(address_mask))
    command.extend(['dev', str(iface)])

    try:
        p = subprocess.run(command, stderr=subprocess.PIPE)
    except subprocess.CalledProcessError as ex:
        message = 'ERROR: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)
    if p.returncode:
        error = p.stderr.decode()
        if any(err in error for err in {'File exists', 'No such process'}):
            message = 'WARNING: {} exited with non-zero return value ({}): {}'.format(
                command, p.returncode, error)
            collect_agent.send_log(syslog.LOG_WARNING, message)
            sys.exit(0)
        else:
            message = 'ERROR: {} exited with non-zero return value ({})'.format(
                command, p.returncode)
            collect_agent.send_log(syslog.LOG_ERR, message)
            sys.exit(message)
    else:
        collect_agent.send_log(
            syslog.LOG_DEBUG,
            '{} address {} to iface {}'.format(operation, address_mask, iface))
Exemple #11
0
def run_command(command):
    try:
        p = subprocess.run(command,
                           stderr=subprocess.PIPE,
                           stdout=subprocess.PIPE)
    except subprocess.CalledProcessError as ex:
        message = 'ERROR: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    if p.returncode:
        error = p.stderr.decode()
        if 'No such process' in error:
            message = 'WARNING: {} exited with non-zero return value ({}): {}'.format(
                command, p.returncode, error)
            collect_agent.send_log(syslog.LOG_WARNING, message)
            sys.exit(0)
        else:
            message = 'ERROR: {} exited with non-zero return value ({}): {}'.format(
                command, p.returncode, error)
            collect_agent.send_log(syslog.LOG_ERR, message)
            sys.exit(message)
    else:
        collect_agent.send_log(syslog.LOG_DEBUG,
                               'Applied successfully : ' + ' '.join(command))

    return p.stdout.decode()
Exemple #12
0
def run_command(rule):
    """ Run a command, return error """
    list_rule = rule.split()
    cmd = ["iptables"] + list_rule
    try:
        p = subprocess.run(cmd, stderr=subprocess.PIPE)
    except subprocess.CalledProcessError as ex:
        collect_agent.send_log(
            syslog.LOG_ERR,
            "Error when executing command {}: {}".format(cmd, ex))
    if p.returncode:
        message = 'WARNING: {} exited with non-zero return value ({}): {}'.format(
            cmd, p.returncode, p.stderr.decode())
        collect_agent.send_log(syslog.LOG_WARNING, message)
        sys.exit(0)
Exemple #13
0
def send_logs(filename, send_log):
    with open(os.path.join(LOGS_DIR, filename)) as log:
        for line in log:
            message = (
                    '<{line[pri]}>{line[timestamp]} '
                    '{line[hostname]} {line[programname]}'
                    '[{line[procid]}]: {line[msg]}'
                    .format(line=json.loads(line))
            )
            try:
                send_log(message.encode())
            except socket.error as error:
                collect_agent.send_log(
                        syslog.LOG_NOTICE,
                        'Error code: {}, Message {}'.format(*error))
                raise
Exemple #14
0
def main(synchro_offset, timeout, retries=None, sleep_time=None):
    start_time = perf_counter()
    while abs(get_ntp_offset(retries, sleep_time)) > synchro_offset:
        check_timeout(start_time, timeout)

        try:
            subprocess.run(['systemctl', 'stop', 'ntp.service'], check=True)
            subprocess.run(['ntpd', '-gq'], check=True)
        except subprocess.CalledProcessError as e:
            message = 'Error when interacting with the NTP daemon: {}'.format(e)
            collect_agent.send_log(syslog.LOG_ERR, message)
            sys.exit(message)
        else:
            check_timeout(start_time, timeout)
        finally:
            subprocess.run(['systemctl', 'start', 'ntp.service'])
Exemple #15
0
def stop(signalNumber, frame):
    """
    Stop apache2
    Args: 
    Returns:
       NoneType
    """
    cmd = ["systemctl", "stop", "apache2"]
    global STOP_JOB
    try:
        p = subprocess.Popen(cmd, stderr=subprocess.PIPE)
        STOP_JOB = True
    except Exception as ex:
        message = "Error when stopping apache2: {}".format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)
Exemple #16
0
def client(metrics_interval,
           port,
           num_flows,
           server_ip,
           window_size,
           tos,
           time_duration,
           transmitted_size,
           protocol,
           reverse,
           bandwidth=None,
           cong_control=None,
           mss=None,
           udp_size=None):

    cmd = ['stdbuf', '-oL', 'iperf3', '-c', server_ip, '-f', 'k']
    cmd.extend(_command_build_helper('-i', metrics_interval))
    cmd.extend(_command_build_helper('-w', window_size))
    cmd.extend(_command_build_helper('-p', port))
    if reverse:
        cmd.append('-R')
    if protocol == "udp":
        cmd.append('-u')
        cmd.extend(_command_build_helper('-b', bandwidth))
        cmd.extend(_command_build_helper('--length', udp_size))
    else:
        cmd.extend(_command_build_helper('-C', cong_control))
        cmd.extend(_command_build_helper('-M', mss))

    cmd.extend(_command_build_helper('-t', time_duration))
    if time_duration is None:
        if transmitted_size is not None and _parse_to_bytes(
                transmitted_size) < 1024 * 1024:
            message = 'Error : the number of bytes to transmit is too low.'
            collect_agent.send_log(syslog.LOG_ERR, message)
            sys.exit(message)

        cmd.extend(_command_build_helper('-n', transmitted_size))

    cmd.extend(_command_build_helper('-P', num_flows))
    cmd.extend(_command_build_helper('-S', tos))

    if reverse:
        receiver(cmd)
    else:
        sender(cmd)
Exemple #17
0
def main(destination_ip, count, interval, interface, packetsize, ttl, n_mean):
    cmd = ['fping', destination_ip, '-e', '-D']
    if count == 0:
        cmd += ['-l']
    else:
        cmd += ['-c', str(count)]
    cmd.extend(command_line_flag_for_argument(interval, '-p'))
    cmd.extend(command_line_flag_for_argument(interface, '-I'))
    cmd.extend(command_line_flag_for_argument(packetsize, '-b'))
    cmd.extend(command_line_flag_for_argument(ttl, '-t'))

    pattern = re.compile(
        r'\[(\d+\.\d+)\] {} : \[\d+\], \d+ bytes, (\d+\.?\d*) '.format(
            destination_ip))
    measurements = []

    # launch command
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    while True:
        # read output
        output = p.stdout.readline().decode().rstrip()
        if not output:
            # this should be the blank line before summary
            if p.poll() is not None:
                # Process ended gracefully
                break
            continue

        match = re.match(pattern, output)
        if match is None:
            message = 'Unrecognised fping output: {}'.format(output)
            collect_agent.send_log(syslog.LOG_WARNING, message)
            continue

        try:
            timestamp, rtt_data = map(float, match.groups())
        except ValueError as exception:
            message = 'ERROR on line \'{}\': {}'.format(output, exception)
            collect_agent.send_log(syslog.LOG_ERR, message)
        else:
            measurements.append(rtt_data)
            if len(measurements) == n_mean:
                collect_agent.send_stat(int(timestamp * 1000),
                                        rtt=mean(measurements))
                measurements.clear()
Exemple #18
0
def restore_route(old_route, destination, operation, signal, frame):
    if operation == Operations.ADD.value:
        # Delete added route
        cmd = ['ip', 'route', 'del', str(destination)]
    elif operation == Operations.DELETE.value:
        # Add deleted route
        cmd = ['ip', 'route', 'add'] + old_route
    else:
        # Restore previous route
        if old_route:
            cmd = ['ip', 'route', operation] + old_route
        else:
            cmd = ['ip', 'route', 'del', str(destination)]

    run_command(cmd)
    message = 'Stopped job ip_route. Previous route has been restored.'
    collect_agent.send_log(syslog.LOG_DEBUG, message)
    sys.exit(message)
Exemple #19
0
def multiplier(unit, base):
    if unit == base:
        return 1
    if unit.startswith('GBytes'):
        return 1024 * 1024 * 1024
    if unit.startswith('MBytes'):
        return 1024 * 1024
    if unit.startswith('KBytes'):
        return 1024
    if unit.startswith('m'):
        return 0.001
    if unit.startswith('Gbits'):
        return 1000 * 1000 * 1000
    if unit.startswith('Mbits'):
        return 1000 * 1000
    if unit.startswith('Kbits'):
        return 1000
    collect_agent.send_log(syslog.LOG_ERR,
                           'Units of iperf metrics are not available/correct')
    return 1
Exemple #20
0
def main(nb_runs, max_threads, stop_compression, proxy_address, proxy_port,
         urls):
    # Set signal handler
    signal_handler_partial = partial(kill_all, os.getpid())
    signal.signal(signal.SIGTERM, signal_handler_partial)
    signal.signal(signal.SIGINT, signal_handler_partial)
    # Init local variables
    qos_metrics_lists = dict()
    qos_metrics = dict()
    # Load config from config.yaml
    config_filepath = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                                   'config.yml')
    with open(config_filepath) as config_file:
        config = yaml.safe_load(config_file)
    if not urls:
        urls = config['web_pages_to_fetch']
    for metric in config['qos_metrics']:
        qos_metrics[metric] = config['qos_metrics'][metric]['js']
    # Compute qos metrics for each url 'nb_runs' times
    thread_list = []
    try:
        for i in range(1, args.nb_runs + 1, 1):
            for url in urls:
                # condition of "max_threads + 1" because seleniumwire uses 1 additional inner thread to monitor requests
                while threading.active_count() > max_threads + 1:
                    time.sleep(1)
                t = threading.Thread(target=launch_thread,
                                     args=(collect_agent, url, config,
                                           qos_metrics, stop_compression,
                                           proxy_address, proxy_port))
                thread_list.append(t)
                t.start()
                time.sleep(2)
    except Exception as ex:
        message = 'An unexpected error occured: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        exit(message)
    finally:
        for t in thread_list:
            t.join()
        kill_children(os.getpid())
Exemple #21
0
def main(src_ip, dst_ip, src_port, dst_port, proto, interface, capture_file,
         duration):
    """Capture packets on a live network interface. Only consider packets matching the specified fields."""
    capture_filter = build_capture_filter(src_ip, dst_ip, src_port, dst_port,
                                          proto)
    copy = False
    if capture_file == "":
        capture_file = "/tmp/tcpdump_capture.pcap"
        copy = True
    signal_handler_partial = partial(save_pcap, capture_file, copy,
                                     os.getpid())
    original_sigint_handler = signal.getsignal(signal.SIGINT)
    original_sigterm_handler = signal.getsignal(signal.SIGTERM)
    signal.signal(signal.SIGTERM, signal_handler_partial)
    signal.signal(signal.SIGINT, signal_handler_partial)
    try:
        parent = pathlib.Path(capture_file).parent
        pathlib.Path(parent).mkdir(parents=True, exist_ok=True)
        subprocess.run(["rm", capture_file])
        cmd = [
            'tcpdump', '-i', interface, capture_filter, '-w', capture_file,
            '-Z', 'root'
        ]
        if duration:
            cmd += ['-G', str(duration), '-W', str(1)]
        p = subprocess.run(cmd)
        if p.returncode != 0:
            message = 'ERROR when lauching tcpdump: {}'.format(p.stderr)
            collect_agent.send_log(syslog.LOG_ERR, message)
            sys.exit(message)

    except Exception as ex:
        message = 'ERROR when capturing: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)
    collect_agent.store_files(int(time.time() * 1000),
                              pcap_file=capture_file,
                              copy=copy)
    signal.signal(signal.SIGTERM, original_sigint_handler)
    signal.signal(signal.SIGINT, original_sigterm_handler)
Exemple #22
0
def use_configuration(filepath):
    success = collect_agent.register_collect(filepath)
    if not success:
        message = 'ERROR connecting to collect-agent'
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)
    collect_agent.send_log(syslog.LOG_DEBUG, 'Starting job ' + os.environ.get('JOB_NAME', '!'))
    try:
        yield
    except Exception:
        message = traceback.format_exc()
        collect_agent.send_log(syslog.LOG_CRIT, message)
        raise
    except SystemExit as e:
        if e.code != 0:
            collect_agent.send_log(syslog.LOG_CRIT, 'Abrupt program termination: ' + str(e.code))
        raise
Exemple #23
0
def build_socket_sender():
    try:
        collector = get_collector_infos()
    except yaml.YAMLError:
        collect_agent.send_log(
                syslog.LOG_NOTICE,
                'Collector configuration file is malformed')
        raise
    except FileNotFoundError:
        collect_agent.send_log(
                syslog.LOG_NOTICE,
                'Collector configuration file not found')
        raise

    try:
        address = collector['address']
        port = collector['logs']['port']
        sock_type = {
            'tcp': socket.SOCK_STREAM,
            'udp': socket.SOCK_DGRAM,
        }[collector['logstash']['mode']]
    except KeyError:
        collect_agent.send_log(
                syslog.LOG_NOTICE,
                'Collector configuration file is malformed')
        raise

    logstash = (address, int(port))
    try:
        sock = socket.socket(socket.AF_INET, sock_type)
    except socket.error:
        collect_agent.send_log(syslog.LOG_NOTICE, 'Failed to create socket')
        raise

    if sock_type == socket.SOCK_STREAM:
        sock.connect(logstash)
        sender = sock.send
    else:
        def sender(message):
            sock.sendto(message, logstash)

    return sock, sender
Exemple #24
0
def start():
    """
    Start apache2 which will listen http/1.1 requests on port 8081 and http2 on port 8082
    Args:
    Returns:
        NoneType
    """

    cmd = ["systemctl", "is-active", "apache2"]
    try:
        p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
        line = p.stdout.readline().decode()
        if "inactive" in line:
            cmd = ["systemctl", "start", "apache2"]
            p = subprocess.Popen(cmd, stderr=subprocess.PIPE)
        else:
            message = "An Apache instance is already running, stopping this instance"
            collect_agent.send_log(syslog.LOG_ERR, message)
            return

    except Exception as ex:
        message = "Error when starting apache2: {}".format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)
    
    # Set signal handler
    signal.signal(signal.SIGTERM, stop)
    signal.signal(signal.SIGINT, stop)
    # Wait for status to change to active
    cmd = ['systemctl', 'show', '-p', 'SubState', '-p', 'ActiveState', 'apache2']
    global STOP_JOB
    while True:
        time.sleep(0.5)
        status = subprocess.run(cmd, stdout=subprocess.PIPE)
        output = status.stdout.decode()
        if not 'SubState=running' in output:
            if not STOP_JOB:
                message = "Apache2 stopped abnormally : {}".format(output)
                collect_agent.send_log(syslog.LOG_ERR, message)
                sys.exit(message)
            else:
                return
Exemple #25
0
def one_file(capture_file, src_ip, dst_ip, src_port, dst_port, proto,
             metrics_interval):
    """Analyze packets from pcap file located at capture_file and comptute statistics.
    Only consider packets matching the specified fields.
    """
    display_filter = build_display_filter(src_ip, dst_ip, src_port, dst_port,
                                          proto)
    To = now()
    try:
        with closing(
                pyshark.FileCapture(
                    capture_file, display_filter=display_filter)) as cap_file:
            flow_id_funct = lambda pkt: (pkt.ip.src, pkt[
                pkt.transport_layer].srcport, pkt.ip.dst, pkt[
                    pkt.transport_layer].dstport, pkt.transport_layer)
            packets = [
                packet for packet in cap_file if 'IP' in str(packet.layers)
                and packet.transport_layer is not None
            ]
            key_funct = lambda pkt: pkt.sniff_timestamp
            if (packets):
                grouped_packets = sort_and_group(packets, key=flow_id_funct)
                flow_id_to_flow = dict((flow_id, sorted(flow, key=key_funct))
                                       for flow_id, flow in grouped_packets)
                all_flows = list()
                to = float(packets[0].sniff_timestamp) * 1000
                samples_count = 1

                while to < float(packets[-1].sniff_timestamp) * 1000:
                    time = to + metrics_interval
                    ids_of_new_flows_at_time = [
                        x[0] for x in list(
                            filter(
                                lambda item:
                                (item[0] not in all_flows and float(item[1][
                                    0].sniff_timestamp) * 1000 < time),
                                flow_id_to_flow.items()))
                    ]
                    all_flows.extend(ids_of_new_flows_at_time)

                    flows_count = 0
                    total_flows_count = 0
                    flow_number = 1
                    total_flow_duration = 0
                    for flow_id in all_flows:
                        flow = flow_id_to_flow[flow_id]
                        stat_time = To + samples_count * metrics_interval
                        # If the flow always exists
                        if (float(flow[-1].sniff_timestamp) * 1000 > to):
                            compute_and_send_statistics(
                                flow, to, metrics_interval,
                                suffix(flow_number), stat_time)
                            flow_duration = 1000 * (
                                float(flow[-1].sniff_timestamp) -
                                float(flow[0].sniff_timestamp))
                            flows_count += 1
                        flow_number += 1
                        total_flow_duration += 1000 * (
                            float(flow[-1].sniff_timestamp) -
                            float(flow[0].sniff_timestamp))
                        total_flows_count += 1

                    statistics = {'flows_count': flows_count}
                    # Check if it the last sample
                    if total_flows_count > 0 and float(
                            packets[-1].sniff_timestamp) * 1000 <= time:
                        statistics.update({
                            'avg_flow_duration':
                            int(total_flow_duration / total_flows_count)
                        })
                        statistics.update({
                            'total_packets':
                            len(packets),
                            'total_bytes':
                            packets_length(packets)
                        })
                    collect_agent.send_stat(stat_time, **statistics)
                    samples_count += 1
                    to = time

    except Exception as ex:
        message = 'ERROR when analyzing: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    sys.exit(0)  # Explicitly exit properly. This is required by pyshark module
Exemple #26
0
def gilbert_elliot(capture_file, second_capture_file, src_ip, dst_ip, src_port,
                   dst_port, proto):
    display_filter = build_display_filter(src_ip, dst_ip, src_port, dst_port,
                                          proto)

    try:
        with closing(
                pyshark.FileCapture(
                    capture_file,
                    display_filter=display_filter)) as cap_file_sent:
            cap_file_sent_iter = iter(cap_file_sent)
            goods = []
            bads = []
            with closing(
                    pyshark.FileCapture(
                        second_capture_file,
                        display_filter=display_filter)) as cap_file_received:
                try:
                    current_packet_sent = get_next_packet(cap_file_sent_iter)
                    total_good = 0
                    total_bad = 0
                    for packet in cap_file_received:
                        if not ('IP' in str(packet.layers)
                                and packet.transport_layer is not None):
                            continue
                        if packet.ip.id == current_packet_sent:
                            total_good += 1
                            if total_bad:
                                bads.append(total_bad)
                                total_bad = 0
                        while packet.ip.id != current_packet_sent:
                            if total_good:
                                goods.append(total_good)
                                total_good = 0
                            total_bad += 1
                            current_packet_sent = get_next_packet(
                                cap_file_sent_iter)
                        current_packet_sent = get_next_packet(
                            cap_file_sent_iter)
                except StopIteration:
                    pass
                if total_good:
                    goods.append(total_good)
                if total_bad:
                    bads.append(total_bad)

        statistics = {
            'gilbert_elliot_sent': sum(goods) + sum(bads),
            'gilbert_elliot_received': sum(goods)
        }
        if goods or bads:
            statistics['gilbert_elliot_lost_rate'] = sum(bads) / (sum(goods) +
                                                                  sum(bads))

        if goods:
            g = sum(goods) / len(
                goods)  # average number of steps when we stay in good state
            statistics['gilbert_elliot_p'] = 1 / g
        else:
            collect_agent.send_log(
                syslog.LOG_WARNING,
                "Cannot compute p parameter. Maybe the capture files are too short."
            )

        if bads:
            b = sum(bads) / len(
                bads)  # average number of steps when we stay in bad state
            statistics['gilbert_elliot_r'] = 1 / b
        else:
            collect_agent.send_log(
                syslog.LOG_WARNING,
                "Cannot compute r parameter. Maybe the capture files are too short."
            )

        collect_agent.send_stat(now(), **statistics)

    except Exception as ex:
        message = 'ERROR when analyzing: {}'.format(ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    sys.exit(0)  # Explicitly exit properly. This is required by pyshark module
Exemple #27
0
def main(rule):
    collect_agent.send_log(
        syslog.LOG_DEBUG,
        'Starting iptables job with rule: iptables {}'.format(rule))
    run_command(rule)
Exemple #28
0
def apply_conf(interfaces,
               mode,
               delay=None,
               jitter=None,
               delay_distribution=None,
               bandwidth=None,
               loss_model=None,
               loss_model_params=None,
               buffer_size=None):
    collect_agent.send_log(
        syslog.LOG_DEBUG,
        'Starting tc_configure_link job (apply {})'.format(mode))

    if mode == 'egress' or mode == 'all':
        for interface in interfaces.split(','):
            handle = ['root', 'handle', '1:']
            # Delete existing qdisc
            # Check if a root qdisc exists, if so remove it on interface
            cmd = ['tc', 'qdisc', 'show', 'dev', interface]
            _, output = run_command(cmd)
            output = re.findall('qdisc netem', output)
            if output:
                delete_qdisc(interface, 'root')
            # Add qlen
            run_command(
                ['ip', 'link', 'set', interface, 'qlen',
                 str(buffer_size)])
            # Add bandwidth if relevant
            if bandwidth:
                if not re.findall(r'^[0-9]+[KM]$', bandwidth):
                    collect_agent.send_log(
                        syslog.LOG_ERR,
                        "Invalid format for bandwidth: expecting "
                        "'{}', found '{}'".format('{VALUE}{M|K}', bandwidth))
                    sys.exit(1)
                add_qdisc_bandwidth(interface, bandwidth)
                handle = ['parent', '1:11', 'handle', '10:']
            # Add delay
            add_qdisc_delay(interface, delay, jitter, delay_distribution,
                            loss_model, loss_model_params, handle, buffer_size)
    if mode == 'ingress' or mode == 'all':
        # Ingress configuration
        clear_ingress(interfaces)
        run_command(['modprobe', '-r', 'ifb'])
        run_command([
            'modprobe', 'ifb',
            'numifbs={}'.format(str(len(interfaces.split(','))))
        ])
        for index, interface in enumerate(interfaces.split(',')):
            handle = ['root', 'handle', '1:']
            # Clear ingress configuration and add a new one
            add_qdisc_ingress(interface, IFB.format(str(index)), buffer_size)
            # Add bandwidth if relevant
            if bandwidth:
                if not re.findall(r'^[0-9]+[KM]$', bandwidth):
                    collect_agent.send_log(
                        syslog.LOG_ERR,
                        "Invalid format for bandwidth: expecting "
                        "'{}', found '{}'".format('{VALUE}{M|K}', bandwidth))
                    sys.exit(1)
                add_qdisc_bandwidth(IFB.format(str(index)), bandwidth)
                handle = ['parent', '1:11', 'handle', '10:']
            # Add delay
            add_qdisc_delay(IFB.format(str(index)), delay, jitter,
                            delay_distribution, loss_model, loss_model_params,
                            handle, buffer_size)
Exemple #29
0
def main(target_address,
         log_address,
         dest_path,
         granularity,
         traffic_type='UDP',
         port=8999,
         signal_port=9000,
         packet_size=512,
         packet_rate=1000,
         bandwidth='0',
         duration=10,
         data_size='0',
         meter='owdm'):

    # Clean previous log and set up the D-ITG LogServer*
    if os.path.isfile('/tmp/ITGRecv.log'):
        os.remove('/tmp/ITGRecv.log')
    if os.path.isfile('/tmp/ITGSend.log'):
        os.remove('/tmp/ITGSend.log')

    proc_log = run_command('ITGLog', 'Popen')

    # Get the reference time for changing the stats generated by D-ITG
    time_ref = int(round(time.time() * 1000))

    #Set packet_rate depending on bandwidth parameter
    if bandwidth != '0':
        if 'K' == 'K' in bandwidth:
            packet_rate = 1000 * int(bandwidth.replace('K', ''))
        elif 'M' == 'M' in bandwidth:
            packet_rate = 1000 * 1000 * int(bandwidth.replace('M', ''))
        elif 'G' == 'G' in bandwidth:
            packet_rate = 1000 * 1000 * 1000 * int(bandwidth.replace('G', ''))
        else:
            packet_rate = int(bandwidth)
        packet_rate = packet_rate / (8 * packet_size)

    # Build and launch the D-ITGSend command
    cmd_send = [
        'ITGSend', '-a', target_address, '-L', log_address, '-X', log_address,
        '-T', traffic_type, '-c',
        str(packet_size), '-C',
        str(packet_rate), '-t',
        str(duration * 1000), '-m', meter, '-Sdp',
        str(signal_port), '-Ssp',
        str(signal_port), '-rp',
        str(port)
    ]

    #Set number of KBytes to generate
    if data_size != '0':
        if 'M' == 'M' in data_size:
            data_size_f = 1024 * float(data_size.replace('M', ''))
        elif 'G' == 'G' in data_size:
            data_size_f = 1024 * 1024 * float(data_size.replace('G', ''))
        else:
            data_size_f = float(data_size.replace('K', ''))
        cmd_send.extend(['-k', str(data_size_f)])

    run_command(cmd_send)

    # Terminate the process of the D-ITG LogServer
    proc_log.terminate()

    # Clear potential old stats
    if os.path.isfile(os.path.join(dest_path, 'RCV')):
        os.remove(os.path.join(dest_path, 'RCV'))
    if os.path.isfile(os.path.join(dest_path, 'SND')):
        os.remove(os.path.join(dest_path, 'SND'))

    # Get the stats from the logs
    cmd_rcv_cb = [
        'ITGDec', '/tmp/ITGRecv.log', '-c',
        str(granularity),
        os.path.join(dest_path, 'RCV')
    ]
    run_command(cmd_rcv_cb)
    cmd_snd_cb = [
        'ITGDec', '/tmp/ITGSend.log', '-c',
        str(granularity),
        os.path.join(dest_path, 'SND')
    ]
    run_command(cmd_snd_cb)

    # Send the stats of the receiver to the collector
    path_RCV = os.path.join(dest_path, 'RCV')

    try:
        stats = open(path_RCV, "r")
    except Exception as ex:
        message = 'Error opening file {} : {}'.format(path_RCV, ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    owd_r = []
    with stats:
        for line in stats:
            txt = line.strip()
            txt = txt.split(' ')

            # Get the timestamp (in ms)
            timestamp = txt[0].replace('.', '')
            timestamp = int(timestamp[:-3])
            timestamp = timestamp + time_ref

            # Get the bitrate (in bps)
            bitrate = txt[1]
            bitrate = float(bitrate) * 1024
            statistics = {'bitrate_receiver': bitrate}
            collect_agent.send_stat(timestamp, **statistics)

            # Get the delay (in ms)
            delay = txt[2]
            delay = float(delay) * 1000
            owd_r.append(delay)
            statistics = {'owd_receiver': delay}
            collect_agent.send_stat(timestamp, **statistics)

            # Get the jitter (in ms)
            jitter = txt[3]
            jitter = float(jitter) * 1000
            statistics = {'jitter_receiver': jitter}
            collect_agent.send_stat(timestamp, **statistics)

            # Get the packetloss
            pck_loss = txt[4]
            pck_loss = float(pck_loss)
            statistics = {'packetloss_receiver': pck_loss}
            collect_agent.send_stat(timestamp, **statistics)

            # Calculate packet_loss_rate
            pck_loss_per_sec = pck_loss * 1000 / granularity
            plr = (pck_loss_per_sec / packet_rate) * 100
            statistics = {'packetloss_rate_receiver': plr}
            collect_agent.send_stat(timestamp, **statistics)

    # Send the stats of the sender to the collector
    path_SND = os.path.join(dest_path, 'SND')

    try:
        stats = open(path_SND, "r")
    except Exception as ex:
        message = 'Error opening file {} : {}'.format(path_SND, ex)
        collect_agent.send_log(syslog.LOG_ERR, message)
        sys.exit(message)

    owd_s = []
    timetab = []

    with stats:
        for line in stats:
            txt = line.strip()
            txt = txt.split(' ')

            # Get the timestamp (in ms)
            timestamp = txt[0].replace('.', '')
            timestamp = int(timestamp[:-3])
            timestamp = timestamp + time_ref

            # Get the bitrate (in bps)
            bitrate = txt[1]
            bitrate = float(bitrate) * 1024
            statistics = {'bitrate_sender': bitrate}
            collect_agent.send_stat(timestamp, **statistics)

            if meter.upper() == "RTTM":
                # Get the delay (in ms)
                delay = txt[2]
                delay = float(delay) * 1000
                owd_s.append(delay)
                timetab.append(timestamp)
                statistics = {'rtt_sender': delay}
                collect_agent.send_stat(timestamp, **statistics)

                # Get the jitter (in ms)
                jitter = txt[3]
                jitter = float(jitter) * 1000
                statistics = {'jitter_sender': jitter}
                collect_agent.send_stat(timestamp, **statistics)

                # Get the packetloss
                pck_loss = txt[4]
                pck_loss = float(pck_loss)
                statistics = {'packetloss_sender': pck_loss}
                collect_agent.send_stat(timestamp, **statistics)

                # Calculate packet_loss_rate
                pck_loss_per_sec = pck_loss * 1000 / granularity
                plr = (pck_loss_per_sec / packet_rate) * 100
                statistics = {'packetloss_rate_sender': plr}
                collect_agent.send_stat(timestamp, **statistics)

    if meter.upper() == 'RTTM':
        for time_tab, owdr, owds in zip(timetab, owd_r, owd_s):
            owd_return = owds - owdr
            statistics = {'owd_return': owd_return}
            collect_agent.send_stat(time_tab, **statistics)
def main(job_instance_ids, statistics_names, aggregations_periods, bins_sizes,
         offset, maximum, stats_with_suffixes, axis_labels, figures_titles,
         legends_titles, use_legend, add_global, pickle):
    file_ext = 'pickle' if pickle else 'png'
    statistics = Statistics.from_default_collector()
    statistics.origin = 0
    with tempfile.TemporaryDirectory(
            prefix='openbach-temporal-binning-histogram-') as root:
        for job, fields, aggregations, bin_sizes, labels, titles, legend_titles in itertools.zip_longest(
                job_instance_ids,
                statistics_names,
                aggregations_periods,
                bins_sizes,
                axis_labels,
                figures_titles,
                legends_titles,
                fillvalue=[]):
            data_collection = statistics.fetch(
                job_instances=job,
                suffix=None if stats_with_suffixes else '',
                fields=fields)

            # Drop multi-index columns to easily concatenate dataframes from their statistic names
            df = pd.concat([
                plot.dataframe.set_axis(
                    plot.dataframe.columns.get_level_values('statistic'),
                    axis=1,
                    inplace=False) for plot in data_collection
            ])
            # Recreate a multi-indexed columns so the plot can function properly
            df.columns = pd.MultiIndex.from_tuples(
                [('', '', '', '', stat) for stat in df.columns],
                names=['job', 'scenario', 'agent', 'suffix', 'statistic'])
            plot = _Plot(df)

            if not fields:
                fields = list(df.columns.get_level_values('statistic'))

            metadata = itertools.zip_longest(fields, labels, bin_sizes,
                                             aggregations, legend_titles,
                                             titles)
            for field, label, bin_size, aggregation, legend, title in metadata:
                if field not in df.columns.get_level_values('statistic'):
                    message = 'job instances {} did not produce the statistic {}'.format(
                        job, field)
                    collect_agent.send_log(syslog.LOG_WARNING, message)
                    print(message)
                    continue

                if label is None:
                    collect_agent.send_log(
                        syslog.LOG_WARNING,
                        'no y-axis label provided for the {} statistic of job '
                        'instances {}: using the empty string instead'.format(
                            field, job))
                    label = ''

                if aggregation is None:
                    collect_agent.send_log(
                        syslog.LOG_WARNING,
                        'invalid aggregation value of {} for the {} '
                        'statistic of job instances {}: choose from {}, using '
                        '"hour" instead'.format(aggregation, field, job,
                                                TIME_OPTIONS))
                    aggregation = 'hour'

                if legend is None and use_legend:
                    collect_agent.send_log(
                        syslog.LOG_WARNING,
                        'no legend title provided for the {} statistic of job '
                        'instances {}: using the empty string instead'.format(
                            field, job))
                    legend = ''

                if bin_size is None:
                    collect_agent.send_log(
                        syslog.LOG_WARNING,
                        'no bin size provided for the {} statistic of job '
                        'instances {}: using the default value 100 instead'.
                        format(field, job))
                    bin_size = 100

                figure, axis = plt.subplots()
                axis = plot.plot_temporal_binning_histogram(
                    axis, label, field, None, bin_size, offset, maximum,
                    aggregation, add_global, use_legend, legend)
                if title is not None:
                    axis.set_title(title)
                filepath = os.path.join(
                    root,
                    'temporal_binning_histogram_{}.{}'.format(field, file_ext))
                save(figure, filepath, pickle, False)
                collect_agent.store_files(now(), figure=filepath)