示例#1
0
 def post_data(self, test, variables):
     var = copy.deepcopy(self.data_format)
     self.parse_data_format_dict(var, variables)
     if 'URL' not in var.keys():
         return
     if test not in var.keys():
         return
     URL = ''
     for value in var['URL'].values():
         URL = URL + value
     HEADERS = {
         'X-Requested-With': 'Python requests',
         'Content-type': 'application/rapid'
     }
     if 'Format' in var.keys():
         if var['Format'] == 'PushGateway':
             data = "\n".join("{} {}".format(k, v)
                              for k, v in var[test].items()) + "\n"
             response = requests.post(url=URL, data=data, headers=HEADERS)
         elif var['Format'] == 'Xtesting':
             data = var[test]
             response = requests.post(url=URL, json=data)
         else:
             return
     else:
         return
     if (response.status_code != 202) and (response.status_code != 200):
         RapidLog.info('Cannot send metrics to {}'.format(URL))
         RapidLog.info(data)
示例#2
0
 def IsDeployed(self, stack_name):
     for stack in self.heatclient.stacks.list():
         if stack.stack_name == stack_name:
             RapidLog.info('Stack already existing: {}'.format(stack_name))
             self.stack = stack
             return True
     return False
示例#3
0
 def create_key(self):
     keypair = self.nova_client.keypairs.create(name=self.key_name)
     # Create a file for writing that can only be read and written by owner
     fp = os.open(self.private_key_filename, os.O_WRONLY | os.O_CREAT,
                  0o600)
     with os.fdopen(fp, 'w') as f:
         f.write(keypair.private_key)
     RapidLog.info('Keypair {} created'.format(self.key_name))
示例#4
0
def main():
    """Main function.
    """
    test_params = RapidTestManager.get_defaults()
    # When no cli is used, the process_cli can be replaced by code modifying
    # test_params
    test_params = RapidCli.process_cli(test_params)
    log_file = 'RUN{}.{}.log'.format(test_params['environment_file'],
            test_params['test_file'])
    RapidLog.log_init(log_file, test_params['loglevel'],
            test_params['screenloglevel'] , test_params['version']  )
    test_result = RapidTestManager.run_tests(test_params)
    RapidLog.info('Test result is : {}'.format(test_result))
示例#5
0
 def connect_socket(self):
     attempts = 1
     RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
             attempt: %d" % (self._ip, attempts))
     sock = None
     while True:
         sock = self.prox_sock()
         if sock is not None:
             break
         attempts += 1
         if attempts > 20:
             RapidLog.exception("Failed to connect to PROX on %s after %d \
                     attempts" % (self._ip, attempts))
         time.sleep(2)
         RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
                 attempt: %d" % (self._ip, attempts))
     RapidLog.info("Connected to PROX on %s" % self._ip)
     return sock
示例#6
0
 def create_key(self):
     if os.path.exists(self.key_name):
         public_key_file = "{}.pub".format(self.key_name)
         if not os.path.exists(public_key_file):
             RapidLog.critical('Keypair {}.pub does not exist'.format(
                 self.key_name))
         with open(public_key_file, mode='rb') as public_file:
             public_key = public_file.read()
     else:
         public_key = None
     keypair = self.nova_client.keypairs.create(name=self.key_name,
                                                public_key=public_key)
     # Create a file for writing that can only be read and written by owner
     if not os.path.exists(self.key_name):
         fp = os.open(self.key_name, os.O_WRONLY | os.O_CREAT, 0o600)
         with os.fdopen(fp, 'w') as f:
             f.write(keypair.private_key)
     RapidLog.info('Keypair {} created'.format(self.key_name))
示例#7
0
 def run(self, **kwargs):
     try:
         test_params = RapidTestManager.get_defaults()
         for key in kwargs:
             test_params[key] = kwargs[key]
         os.makedirs(self.res_dir, exist_ok=True)
         log_file = '{}/RUN{}.{}.log'.format(
             self.res_dir, test_params['environment_file'],
             test_params['test_file'])
         RapidLog.log_init(log_file, test_params['loglevel'],
                           test_params['screenloglevel'],
                           test_params['version'])
         test_manager = RapidTestManager()
         self.start_time = time.time()
         self.result, self.details = test_manager.run_tests(test_params)
         self.result = 100 * self.result
         RapidLog.info('Test result is : {}'.format(self.result))
         self.stop_time = time.time()
     except Exception:  # pylint: disable=broad-except
         print("Unexpected error:", sys.exc_info()[0])
         self.result = 0
         self.stop_time = time.time()
示例#8
0
 def warm_up(self):
     # Running at low speed to make sure the ARP messages can get through.
     # If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
     # Note however that if we would run the test steps during a very long time, the ARP would expire in the switch.
     # PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
     imix = self.test['warmupimix']
     FLOWSIZE = self.test['warmupflowsize']
     WARMUPSPEED = self.test['warmupspeed']
     WARMUPTIME = self.test['warmuptime']
     RapidLog.info(("Warming up during {} seconds..., packet size = {},"
         " flows = {}, speed = {}").format(WARMUPTIME, imix, FLOWSIZE,
             WARMUPSPEED))
     self.gen_machine.set_generator_speed(WARMUPSPEED)
     self.set_background_speed(self.background_machines, WARMUPSPEED)
     self.gen_machine.set_udp_packet_size(imix)
     self.set_background_size(self.background_machines, imix)
     _ = self.gen_machine.set_flows(FLOWSIZE)
     self.set_background_flows(self.background_machines, FLOWSIZE)
     self.gen_machine.start()
     self.start_background_traffic(self.background_machines)
     time.sleep(WARMUPTIME)
     self.stop_background_traffic(self.background_machines)
     self.gen_machine.stop()
示例#9
0
 def run_tests(self, test_params):
     test_params = RapidConfigParser.parse_config(test_params)
     RapidLog.debug(test_params)
     monitor_gen = monitor_sut = False
     background_machines = []
     sut_machine = gen_machine = None
     configonly = test_params['configonly']
     for machine_params in test_params['machines']:
         if 'gencores' in machine_params.keys():
             machine = RapidGeneratorMachine(
                 test_params['key'], test_params['user'],
                 test_params['password'], test_params['vim_type'],
                 test_params['rundir'], test_params['resultsdir'],
                 machine_params, configonly, test_params['ipv6'])
             if machine_params['monitor']:
                 if monitor_gen:
                     RapidLog.exception("Can only monitor 1 generator")
                     raise Exception("Can only monitor 1 generator")
                 else:
                     monitor_gen = True
                     gen_machine = machine
             else:
                 background_machines.append(machine)
         else:
             machine = RapidMachine(test_params['key'], test_params['user'],
                                    test_params['password'],
                                    test_params['vim_type'],
                                    test_params['rundir'],
                                    test_params['resultsdir'],
                                    machine_params, configonly)
             if machine_params['monitor']:
                 if monitor_sut:
                     RapidLog.exception("Can only monitor 1 sut")
                     raise Exception("Can only monitor 1 sut")
                 else:
                     monitor_sut = True
                     if machine_params['prox_socket']:
                         sut_machine = machine
         self.machines.append(machine)
     try:
         prox_executor = concurrent.futures.ThreadPoolExecutor(
             max_workers=len(self.machines))
         self.future_to_prox = {
             prox_executor.submit(machine.start_prox): machine
             for machine in self.machines
         }
         if configonly:
             concurrent.futures.wait(self.future_to_prox,
                                     return_when=ALL_COMPLETED)
             sys.exit()
         socket_executor = concurrent.futures.ThreadPoolExecutor(
             max_workers=len(self.machines))
         future_to_connect_prox = {
             socket_executor.submit(machine.connect_prox): machine
             for machine in self.machines
         }
         concurrent.futures.wait(future_to_connect_prox,
                                 return_when=ALL_COMPLETED)
         result = 0
         for test_param in test_params['tests']:
             RapidLog.info(test_param['test'])
             if test_param['test'] in [
                     'flowsizetest', 'TST009test', 'fixed_rate',
                     'increment_till_fail'
             ]:
                 test = FlowSizeTest(
                     test_param, test_params['lat_percentile'],
                     test_params['runtime'], test_params['TestName'],
                     test_params['environment_file'], gen_machine,
                     sut_machine, background_machines,
                     test_params['sleep_time'])
             elif test_param['test'] in ['corestatstest']:
                 test = CoreStatsTest(test_param, test_params['runtime'],
                                      test_params['TestName'],
                                      test_params['environment_file'],
                                      self.machines)
             elif test_param['test'] in ['portstatstest']:
                 test = PortStatsTest(test_param, test_params['runtime'],
                                      test_params['TestName'],
                                      test_params['environment_file'],
                                      self.machines)
             elif test_param['test'] in ['impairtest']:
                 test = ImpairTest(
                     test_param, test_params['lat_percentile'],
                     test_params['runtime'], test_params['TestName'],
                     test_params['environment_file'], gen_machine,
                     sut_machine, background_machines)
             elif test_param['test'] in ['irqtest']:
                 test = IrqTest(test_param, test_params['runtime'],
                                test_params['TestName'],
                                test_params['environment_file'],
                                self.machines)
             elif test_param['test'] in ['warmuptest']:
                 test = WarmupTest(test_param, gen_machine)
             else:
                 RapidLog.debug('Test name ({}) is not valid:'.format(
                     test_param['test']))
             single_test_result, result_details = test.run()
             result = result + single_test_result
         for machine in self.machines:
             machine.close_prox()
         concurrent.futures.wait(self.future_to_prox,
                                 return_when=ALL_COMPLETED)
     except (ConnectionError, KeyboardInterrupt) as e:
         result = result_details = None
         socket_executor.shutdown(wait=False)
         socket_executor._threads.clear()
         prox_executor.shutdown(wait=False)
         prox_executor._threads.clear()
         concurrent.futures.thread._threads_queues.clear()
         RapidLog.error("Test interrupted: {} {}".format(
             type(e).__name__, e))
     return (result, result_details)
示例#10
0
 def parse_config(test_params):
     testconfig = configparser.RawConfigParser()
     testconfig.read(test_params['test_file'])
     test_params['required_number_of_test_machines'] = int(
         testconfig.get('TestParameters', 'total_number_of_test_machines'))
     test_params['number_of_tests'] = int(
         testconfig.get('TestParameters', 'number_of_tests'))
     test_params['TestName'] = testconfig.get('TestParameters', 'name')
     if testconfig.has_option('TestParameters', 'lat_percentile'):
         test_params['lat_percentile'] = old_div(
             float(testconfig.get('TestParameters', 'lat_percentile')),
             100.0)
     else:
         test_params['lat_percentile'] = 0.99
     RapidLog.info('Latency percentile at {:.0f}%'.format(
         test_params['lat_percentile'] * 100))
     if testconfig.has_option('TestParameters', 'ipv6'):
         test_params['ipv6'] = testconfig.getboolean(
             'TestParameters', 'ipv6')
     else:
         test_params['ipv6'] = False
     config = configparser.RawConfigParser()
     config.read(test_params['environment_file'])
     test_params['vim_type'] = config.get('Varia', 'vim')
     test_params['key'] = config.get('ssh', 'key')
     test_params['user'] = config.get('ssh', 'user')
     test_params['total_number_of_machines'] = int(
         config.get('rapid', 'total_number_of_machines'))
     tests = []
     test = {}
     for test_index in range(1, test_params['number_of_tests'] + 1):
         test.clear()
         section = 'test%d' % test_index
         options = testconfig.options(section)
         for option in options:
             if option in ['imix', 'imixs', 'flows']:
                 test[option] = ast.literal_eval(
                     testconfig.get(section, option))
             elif option in [
                     'maxframespersecondallingress', 'stepsize', 'flowsize'
             ]:
                 test[option] = int(testconfig.get(section, option))
             elif option in [
                     'startspeed', 'step', 'drop_rate_threshold',
                     'lat_avg_threshold', 'lat_perc_threshold',
                     'lat_max_threshold', 'accuracy', 'maxr', 'maxz',
                     'pass_threshold', 'ramp_step'
             ]:
                 test[option] = float(testconfig.get(section, option))
             else:
                 test[option] = testconfig.get(section, option)
         tests.append(dict(test))
     for test in tests:
         if test['test'] in ['flowsizetest', 'TST009test']:
             if 'drop_rate_threshold' not in test.keys():
                 test['drop_rate_threshold'] = 0
             latency_thresholds = [
                 'lat_avg_threshold', 'lat_perc_threshold',
                 'lat_max_threshold'
             ]
             for threshold in latency_thresholds:
                 if threshold not in test.keys():
                     test[threshold] = 'inf'
     test_params['tests'] = tests
     if test_params['required_number_of_test_machines'] > test_params[
             'total_number_of_machines']:
         RapidLog.exception(
             "Not enough VMs for this test: %d needed and only %d available"
             % (required_number_of_test_machines, total_number_of_machines))
         raise Exception(
             "Not enough VMs for this test: %d needed and only %d available"
             % (required_number_of_test_machines, total_number_of_machines))
     machine_map = configparser.RawConfigParser()
     machine_map.read(test_params['machine_map_file'])
     machines = []
     machine = {}
     for test_machine in range(
             1, test_params['required_number_of_test_machines'] + 1):
         machine.clear()
         section = 'TestM%d' % test_machine
         options = testconfig.options(section)
         for option in options:
             if option in ['prox_socket', 'prox_launch_exit', 'monitor']:
                 machine[option] = testconfig.getboolean(section, option)
             elif option in ['cores', 'gencores', 'latcores']:
                 machine[option] = ast.literal_eval(
                     testconfig.get(section, option))
             elif option in ['bucket_size_exp']:
                 machine[option] = int(testconfig.get(section, option))
             else:
                 machine[option] = testconfig.get(section, option)
             for key in ['prox_socket', 'prox_launch_exit']:
                 if key not in machine.keys():
                     machine[key] = True
         if 'monitor' not in machine.keys():
             machine['monitor'] = True
         index = int(
             machine_map.get('TestM%d' % test_machine, 'machine_index'))
         section = 'M%d' % index
         options = config.options(section)
         for option in options:
             machine[option] = config.get(section, option)
         machines.append(dict(machine))
     for machine in machines:
         dp_ports = []
         if 'dest_vm' in machine.keys():
             index = 1
             while True:
                 dp_ip_key = 'dp_ip{}'.format(index)
                 dp_mac_key = 'dp_mac{}'.format(index)
                 if dp_ip_key in machines[int(machine['dest_vm'])-1].keys() and \
                         dp_mac_key in machines[int(machine['dest_vm'])-1].keys():
                     dp_port = {
                         'ip':
                         machines[int(machine['dest_vm']) - 1][dp_ip_key],
                         'mac':
                         machines[int(machine['dest_vm']) - 1][dp_mac_key]
                     }
                     dp_ports.append(dict(dp_port))
                     index += 1
                 else:
                     break
                 machine['dest_ports'] = list(dp_ports)
         gw_ips = []
         if 'gw_vm' in machine.keys():
             index = 1
             while True:
                 gw_ip_key = 'dp_ip{}'.format(index)
                 if gw_ip_key in machines[int(machine['gw_vm']) - 1].keys():
                     gw_ip = machines[int(machine['gw_vm']) - 1][gw_ip_key]
                     gw_ips.append(gw_ip)
                     index += 1
                 else:
                     break
                 machine['gw_ips'] = list(gw_ips)
     test_params['machines'] = machines
     return (test_params)
 def run(self):
 #    fieldnames = ['PROXID','Time','Received','Sent','NonDPReceived','NonDPSent','Delta','NonDPDelta','Dropped']
 #    writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
 #    writer.writeheader()
     RapidLog.info("+------------------------------------------------------------------------------------------------------------------+")
     RapidLog.info("| Measuring core statistics on 1 or more PROX instances                                                            |")
     RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
     RapidLog.info("| PROX ID   |    Time   |    RX      |     TX     | non DP RX  | non DP TX  |   TX - RX  | nonDP TX-RX|  DROP TOT  |")
     RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
     duration = self.runtime
     tot_drop = []
     old_rx = []; old_non_dp_rx = []; old_tx = []; old_non_dp_tx = []; old_drop = []; old_tx_fail = []; old_tsc = []
     new_rx = []; new_non_dp_rx = []; new_tx = []; new_non_dp_tx = []; new_drop = []; new_tx_fail = []; new_tsc = []
     machines_to_go = len (self.machines)
     for machine in self.machines:
         machine.reset_stats()
         tot_drop.append(0)
         old_rx.append(0); old_non_dp_rx.append(0); old_tx.append(0); old_non_dp_tx.append(0); old_drop.append(0); old_tx_fail.append(0); old_tsc.append(0)
         old_rx[-1], old_non_dp_rx[-1], old_tx[-1], old_non_dp_tx[-1], old_drop[-1], old_tx_fail[-1], old_tsc[-1], tsc_hz = machine.core_stats()
         new_rx.append(0); new_non_dp_rx.append(0); new_tx.append(0); new_non_dp_tx.append(0); new_drop.append(0); new_tx_fail.append(0); new_tsc.append(0)
     while (duration > 0):
         time.sleep(0.5)
         # Get statistics after some execution time
         for i, machine in enumerate(self.machines, start=0):
             new_rx[i], new_non_dp_rx[i], new_tx[i], new_non_dp_tx[i], new_drop[i], new_tx_fail[i], new_tsc[i], tsc_hz = machine.core_stats()
             drop = new_drop[i]-old_drop[i]
             rx = new_rx[i] - old_rx[i]
             tx = new_tx[i] - old_tx[i]
             non_dp_rx = new_non_dp_rx[i] - old_non_dp_rx[i]
             non_dp_tx = new_non_dp_tx[i] - old_non_dp_tx[i]
             tsc = new_tsc[i] - old_tsc[i]
             if tsc == 0 :
                 continue
             machines_to_go -= 1
             old_drop[i] = new_drop[i]
             old_rx[i] = new_rx[i]
             old_tx[i] = new_tx[i]
             old_non_dp_rx[i] = new_non_dp_rx[i]
             old_non_dp_tx[i] = new_non_dp_tx[i]
             old_tsc[i] = new_tsc[i]
             tot_drop[i] = tot_drop[i] + tx - rx
             RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop[i]) +' |')
 #            writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NonDPReceived':non_dp_rx,'NonDPSent':non_dp_tx,'Delta':tx-rx,'NonDPDelta':non_dp_tx-non_dp_rx,'Dropped':tot_drop[i]})
             if self.pushgateway:
                 URL = self.pushgateway+ '/metrics/job/' + TestName + '/instance/' + self.environment_file + str(i)
                 DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNonDPReceived {}\nNonDPSent {}\nDelta {}\nNonDPDelta {}\nDropped {}\n'.format(i,duration,rx,tx,non_dp_rx,non_dp_tx,tx-rx,non_dp_tx-non_dp_rx,tot_drop[i])
                 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'text/xml'}
                 response = requests.post(url=URL, data=DATA,headers=HEADERS)
                 if (response.status_code != 202) and (response.status_code != 200):
                     RapidLog.info('Cannot send metrics to {}'.format(URL))
                     RapidLog.info(DATA)
             if machines_to_go == 0:
                 duration = duration - 1
                 machines_to_go = len (self.machines)
     RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
     return (True)
             
示例#12
0
    def run_iteration(self, requested_duration, flow_number, size, speed):
        BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
        LAT_PERCENTILE = self.test['lat_percentile']
        r = 0
        sleep_time = 2
        while (r < self.test['maxr']):
            time.sleep(sleep_time)
            # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
            t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats(
            )
            t1_dp_rx = t1_rx - t1_non_dp_rx
            t1_dp_tx = t1_tx - t1_non_dp_tx
            self.gen_machine.set_generator_speed(0)
            self.gen_machine.start_gen_cores()
            if self.background_machines:
                self.set_background_speed(self.background_machines, 0)
                self.start_background_traffic(self.background_machines)
            if 'ramp_step' in self.test.keys():
                ramp_speed = self.test['ramp_step']
            else:
                ramp_speed = speed
            while ramp_speed < speed:
                self.gen_machine.set_generator_speed(ramp_speed)
                if self.background_machines:
                    self.set_background_speed(self.background_machines,
                                              ramp_speed)
                time.sleep(2)
                ramp_speed = ramp_speed + self.test['ramp_step']
            self.gen_machine.set_generator_speed(speed)
            if self.background_machines:
                self.set_background_speed(self.background_machines, speed)
            time.sleep(
                2
            )  ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
            start_bg_gen_stats = []
            for bg_gen_machine in self.background_machines:
                bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats(
                )
                bg_gen_stat = {
                    "bg_dp_rx": bg_rx - bg_non_dp_rx,
                    "bg_dp_tx": bg_tx - bg_non_dp_tx,
                    "bg_tsc": bg_tsc
                }
                start_bg_gen_stats.append(dict(bg_gen_stat))
            if self.sut_machine != None:
                t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                )
            t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats(
            )
            tx = t2_tx - t1_tx
            dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx)
            dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
            tot_dp_drop = dp_tx - dp_rx
            if tx == 0:
                RapidLog.critical(
                    "TX = 0. Test interrupted since no packet has been sent.")
            if dp_tx == 0:
                RapidLog.critical(
                    "Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent."
                )
            # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
            # Measure latency statistics per second
            lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
            )
            lat_samples = sum(buckets)
            sample_count = 0
            for sample_percentile, bucket in enumerate(buckets, start=1):
                sample_count += bucket
                if sample_count > (lat_samples * LAT_PERCENTILE):
                    break
            percentile_max = (sample_percentile == len(buckets))
            sample_percentile = sample_percentile * float(
                2**BUCKET_SIZE_EXP) / (old_div(float(lat_hz), float(10**6)))
            if self.test['test'] == 'fixed_rate':
                RapidLog.info(
                    self.report_result(flow_number, size, speed, None, None,
                                       None, None, lat_avg, sample_percentile,
                                       percentile_max, lat_max, dp_tx, dp_rx,
                                       None, None))
            tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
            lat_avg = used_avg = 0
            buckets_total = buckets
            tot_lat_samples = sum(buckets)
            tot_lat_measurement_duration = float(0)
            tot_core_measurement_duration = float(0)
            tot_sut_core_measurement_duration = float(0)
            tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
            lat_avail = core_avail = sut_avail = False
            while (tot_core_measurement_duration - float(requested_duration) <=
                   0.1) or (tot_lat_measurement_duration -
                            float(requested_duration) <= 0.1):
                time.sleep(0.5)
                lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
                )
                # Get statistics after some execution time
                if t3_lat_tsc != t2_lat_tsc:
                    single_lat_measurement_duration = (
                        t3_lat_tsc - t2_lat_tsc
                    ) * 1.0 / lat_hz  # time difference between the 2 measurements, expressed in seconds.
                    # A second has passed in between to lat_stats requests. Hence we need to process the results
                    tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
                    if lat_min > lat_min_sample:
                        lat_min = lat_min_sample
                    if lat_max < lat_max_sample:
                        lat_max = lat_max_sample
                    lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration  # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
                    used_avg = used_avg + used_sample * single_lat_measurement_duration  # and give it more weigth.
                    lat_samples = sum(buckets)
                    tot_lat_samples += lat_samples
                    sample_count = 0
                    for sample_percentile, bucket in enumerate(buckets,
                                                               start=1):
                        sample_count += bucket
                        if sample_count > lat_samples * LAT_PERCENTILE:
                            break
                    percentile_max = (sample_percentile == len(buckets))
                    bucket_size = float(2**BUCKET_SIZE_EXP) / (old_div(
                        float(lat_hz), float(10**6)))
                    sample_percentile = sample_percentile * bucket_size
                    buckets_total = [
                        buckets_total[i] + buckets[i]
                        for i in range(len(buckets_total))
                    ]
                    t2_lat_tsc = t3_lat_tsc
                    lat_avail = True
                t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats(
                )
                if t3_tsc != t2_tsc:
                    single_core_measurement_duration = (
                        t3_tsc - t2_tsc
                    ) * 1.0 / tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                    tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
                    delta_rx = t3_rx - t2_rx
                    tot_rx += delta_rx
                    delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
                    tot_non_dp_rx += delta_non_dp_rx
                    delta_tx = t3_tx - t2_tx
                    tot_tx += delta_tx
                    delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
                    tot_non_dp_tx += delta_non_dp_tx
                    delta_dp_tx = delta_tx - delta_non_dp_tx
                    delta_dp_rx = delta_rx - delta_non_dp_rx
                    delta_dp_drop = delta_dp_tx - delta_dp_rx
                    tot_dp_drop += delta_dp_drop
                    delta_drop = t3_drop - t2_drop
                    tot_drop += delta_drop
                    t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
                    core_avail = True
                if self.sut_machine != None:
                    t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                    )
                    if t3_sut_tsc != t2_sut_tsc:
                        single_sut_core_measurement_duration = (
                            t3_sut_tsc - t2_sut_tsc
                        ) * 1.0 / tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                        tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
                        tot_sut_rx += t3_sut_rx - t2_sut_rx
                        tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
                        delta_sut_tx = t3_sut_tx - t2_sut_tx
                        tot_sut_tx += delta_sut_tx
                        delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
                        tot_sut_non_dp_tx += delta_sut_non_dp_tx
                        t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
                        sut_avail = True
                if self.test['test'] == 'fixed_rate':
                    if lat_avail == core_avail == True:
                        lat_avail = core_avail = False
                        pps_req_tx = (
                            delta_tx + delta_drop - delta_rx
                        ) / single_core_measurement_duration / 1000000
                        pps_tx = delta_tx / single_core_measurement_duration / 1000000
                        if self.sut_machine != None and sut_avail:
                            pps_sut_tx = delta_sut_tx / single_sut_core_measurement_duration / 1000000
                            sut_avail = False
                        else:
                            pps_sut_tx = None
                        pps_rx = delta_rx / single_core_measurement_duration / 1000000
                        RapidLog.info(
                            self.report_result(
                                flow_number, size, speed, pps_req_tx, pps_tx,
                                pps_sut_tx, pps_rx, lat_avg_sample,
                                sample_percentile, percentile_max,
                                lat_max_sample, delta_dp_tx, delta_dp_rx,
                                tot_dp_drop, single_core_measurement_duration))
                        variables = {
                            'Flows': flow_number,
                            'Size': size,
                            'RequestedSpeed': self.get_pps(speed, size),
                            'CoreGenerated': pps_req_tx,
                            'SentByNIC': pps_tx,
                            'FwdBySUT': pps_sut_tx,
                            'RevByCore': pps_rx,
                            'AvgLatency': lat_avg_sample,
                            'PCTLatency': sample_percentile,
                            'MaxLatency': lat_max_sample,
                            'PacketsSent': delta_dp_tx,
                            'PacketsReceived': delta_dp_rx,
                            'PacketsLost': tot_dp_drop,
                            'bucket_size': bucket_size,
                            'buckets': buckets
                        }

                        self.post_data('rapid_flowsizetest', variables)
            end_bg_gen_stats = []
            for bg_gen_machine in self.background_machines:
                bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats(
                )
                bg_gen_stat = {
                    "bg_dp_rx": bg_rx - bg_non_dp_rx,
                    "bg_dp_tx": bg_tx - bg_non_dp_tx,
                    "bg_tsc": bg_tsc,
                    "bg_hz": bg_hz
                }
                end_bg_gen_stats.append(dict(bg_gen_stat))
            i = 0
            bg_rates = []
            while i < len(end_bg_gen_stats):
                bg_rates.append(0.000001 *
                                (end_bg_gen_stats[i]['bg_dp_rx'] -
                                 start_bg_gen_stats[i]['bg_dp_rx']) /
                                ((end_bg_gen_stats[i]['bg_tsc'] -
                                  start_bg_gen_stats[i]['bg_tsc']) * 1.0 /
                                 end_bg_gen_stats[i]['bg_hz']))
                i += 1
            if len(bg_rates):
                avg_bg_rate = sum(bg_rates) / len(bg_rates)
                RapidLog.debug(
                    'Average Background traffic rate: {:>7.3f} Mpps'.format(
                        avg_bg_rate))
            else:
                avg_bg_rate = None
            #Stop generating
            self.gen_machine.stop_gen_cores()
            r += 1
            lat_avg = old_div(lat_avg, float(tot_lat_measurement_duration))
            used_avg = old_div(used_avg, float(tot_lat_measurement_duration))
            t4_tsc = t2_tsc
            while t4_tsc == t2_tsc:
                t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats(
                )
            if self.test['test'] == 'fixed_rate':
                t4_lat_tsc = t2_lat_tsc
                while t4_lat_tsc == t2_lat_tsc:
                    lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
                    )
                sample_count = 0
                lat_samples = sum(buckets)
                for percentile, bucket in enumerate(buckets, start=1):
                    sample_count += bucket
                    if sample_count > lat_samples * LAT_PERCENTILE:
                        break
                percentile_max = (percentile == len(buckets))
                percentile = percentile * bucket_size
                lat_max = lat_max_sample
                lat_avg = lat_avg_sample
                delta_rx = t4_rx - t2_rx
                delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
                delta_tx = t4_tx - t2_tx
                delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
                delta_dp_tx = delta_tx - delta_non_dp_tx
                delta_dp_rx = delta_rx - delta_non_dp_rx
                dp_tx = delta_dp_tx
                dp_rx = delta_dp_rx
                tot_dp_drop += delta_dp_tx - delta_dp_rx
                pps_req_tx = None
                pps_tx = None
                pps_sut_tx = None
                pps_rx = None
                drop_rate = 100.0 * (dp_tx - dp_rx) / dp_tx
                tot_core_measurement_duration = None
                break  ## Not really needed since the while loop will stop when evaluating the value of r
            else:
                sample_count = 0
                buckets = buckets_total
                for percentile, bucket in enumerate(buckets_total, start=1):
                    sample_count += bucket
                    if sample_count > tot_lat_samples * LAT_PERCENTILE:
                        break
                percentile_max = (percentile == len(buckets_total))
                percentile = percentile * bucket_size
                pps_req_tx = (
                    tot_tx + tot_drop - tot_rx
                ) / tot_core_measurement_duration / 1000000.0  # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
                pps_tx = tot_tx / tot_core_measurement_duration / 1000000.0  # tot_tx is all generated packets actually accepted by the interface
                pps_rx = tot_rx / tot_core_measurement_duration / 1000000.0  # tot_rx is all packets received by the nop task = all packets received in the gen VM
                if self.sut_machine != None and sut_avail:
                    pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
                else:
                    pps_sut_tx = None
                dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
                dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
                tot_dp_drop = dp_tx - dp_rx
                drop_rate = 100.0 * tot_dp_drop / dp_tx
                if ((drop_rate < self.test['drop_rate_threshold']) or
                    (tot_dp_drop == self.test['drop_rate_threshold'] == 0)
                        or (tot_dp_drop > self.test['maxz'])):
                    break
        return (pps_req_tx, pps_tx, pps_sut_tx, pps_rx, lat_avg, percentile,
                percentile_max, lat_max, dp_tx, dp_rx, tot_dp_drop,
                (t4_tx_fail - t1_tx_fail), drop_rate, lat_min, used_avg, r,
                tot_core_measurement_duration, avg_bg_rate, bucket_size,
                buckets)
示例#13
0
    def run_iteration(self, requested_duration, flow_number, size, speed):
        BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
        sleep_time = self.test['sleep_time']
        LAT_PERCENTILE = self.test['lat_percentile']
        iteration_data = {}
        time_loop_data = {}
        iteration_data['r'] = 0

        while (iteration_data['r'] < self.test['maxr']):
            self.gen_machine.start_latency_cores()
            time.sleep(sleep_time)
            # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
            t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats(
            )
            t1_dp_rx = t1_rx - t1_non_dp_rx
            t1_dp_tx = t1_tx - t1_non_dp_tx
            self.gen_machine.set_generator_speed(0)
            self.gen_machine.start_gen_cores()
            self.set_background_speed(self.background_machines, 0)
            self.start_background_traffic(self.background_machines)
            if 'ramp_step' in self.test.keys():
                ramp_speed = self.test['ramp_step']
            else:
                ramp_speed = speed
            while ramp_speed < speed:
                self.gen_machine.set_generator_speed(ramp_speed)
                self.set_background_speed(self.background_machines, ramp_speed)
                time.sleep(2)
                ramp_speed = ramp_speed + self.test['ramp_step']
            self.gen_machine.set_generator_speed(speed)
            self.set_background_speed(self.background_machines, speed)
            iteration_data['speed'] = speed
            time_loop_data['speed'] = speed
            time.sleep(
                2
            )  ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
            start_bg_gen_stats = []
            for bg_gen_machine in self.background_machines:
                bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats(
                )
                bg_gen_stat = {
                    "bg_dp_rx": bg_rx - bg_non_dp_rx,
                    "bg_dp_tx": bg_tx - bg_non_dp_tx,
                    "bg_tsc": bg_tsc
                }
                start_bg_gen_stats.append(dict(bg_gen_stat))
            if self.sut_machine != None:
                t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                )
            t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats(
            )
            tx = t2_tx - t1_tx
            iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx)
            iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx -
                                                        t1_non_dp_rx)
            iteration_data['abs_dropped'] = iteration_data[
                'abs_tx'] - iteration_data['abs_rx']
            if tx == 0:
                RapidLog.critical(
                    "TX = 0. Test interrupted since no packet has been sent.")
            if iteration_data['abs_tx'] == 0:
                RapidLog.critical(
                    "Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent."
                )
            # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
            # Measure latency statistics per second
            iteration_data.update(self.gen_machine.lat_stats())
            t2_lat_tsc = iteration_data['lat_tsc']
            sample_count = 0
            for sample_percentile, bucket in enumerate(
                    iteration_data['buckets'], start=1):
                sample_count += bucket
                if sample_count > sum(
                        iteration_data['buckets']) * LAT_PERCENTILE:
                    break
            iteration_data['lat_perc_max'] = (sample_percentile == len(
                iteration_data['buckets']))
            iteration_data['bucket_size'] = float(2**BUCKET_SIZE_EXP) / (
                old_div(float(iteration_data['lat_hz']), float(10**6)))
            time_loop_data['bucket_size'] = iteration_data['bucket_size']
            iteration_data[
                'lat_perc'] = sample_percentile * iteration_data['bucket_size']
            if self.test['test'] == 'fixed_rate':
                iteration_data['pps_req_tx'] = None
                iteration_data['pps_tx'] = None
                iteration_data['pps_sut_tx'] = None
                iteration_data['pps_rx'] = None
                iteration_data['lat_perc'] = None
                iteration_data['actual_duration'] = None
                iteration_prefix = {
                    'speed': '',
                    'lat_avg': '',
                    'lat_perc': '',
                    'lat_max': '',
                    'abs_drop_rate': '',
                    'drop_rate': ''
                }
                RapidLog.info(
                    self.report_result(flow_number, size, iteration_data,
                                       iteration_prefix))
            tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
            iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
            tot_lat_measurement_duration = float(0)
            iteration_data['actual_duration'] = float(0)
            tot_sut_core_measurement_duration = float(0)
            tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
            lat_avail = core_avail = sut_avail = False
            while (iteration_data['actual_duration'] -
                   float(requested_duration) <= 0.1) or (
                       tot_lat_measurement_duration - float(requested_duration)
                       <= 0.1):
                time.sleep(0.5)
                time_loop_data.update(self.gen_machine.lat_stats())
                # Get statistics after some execution time
                if time_loop_data['lat_tsc'] != t2_lat_tsc:
                    single_lat_measurement_duration = (
                        time_loop_data['lat_tsc'] - t2_lat_tsc
                    ) * 1.0 / time_loop_data[
                        'lat_hz']  # time difference between the 2 measurements, expressed in seconds.
                    # A second has passed in between to lat_stats requests. Hence we need to process the results
                    tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
                    if iteration_data['lat_min'] > time_loop_data['lat_min']:
                        iteration_data['lat_min'] = time_loop_data['lat_min']
                    if iteration_data['lat_max'] < time_loop_data['lat_max']:
                        iteration_data['lat_max'] = time_loop_data['lat_max']
                    iteration_data['lat_avg'] = iteration_data[
                        'lat_avg'] + time_loop_data[
                            'lat_avg'] * single_lat_measurement_duration  # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
                    iteration_data['lat_used'] = iteration_data[
                        'lat_used'] + time_loop_data[
                            'lat_used'] * single_lat_measurement_duration  # and give it more weigth.
                    sample_count = 0
                    for sample_percentile, bucket in enumerate(
                            time_loop_data['buckets'], start=1):
                        sample_count += bucket
                        if sample_count > sum(
                                time_loop_data['buckets']) * LAT_PERCENTILE:
                            break
                    time_loop_data['lat_perc_max'] = (sample_percentile == len(
                        time_loop_data['buckets']))
                    time_loop_data[
                        'lat_perc'] = sample_percentile * iteration_data[
                            'bucket_size']
                    iteration_data['buckets'] = [
                        iteration_data['buckets'][i] +
                        time_loop_data['buckets'][i]
                        for i in range(len(iteration_data['buckets']))
                    ]
                    t2_lat_tsc = time_loop_data['lat_tsc']
                    lat_avail = True
                t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats(
                )
                if t3_tsc != t2_tsc:
                    time_loop_data['actual_duration'] = (
                        t3_tsc - t2_tsc
                    ) * 1.0 / tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                    iteration_data['actual_duration'] = iteration_data[
                        'actual_duration'] + time_loop_data['actual_duration']
                    delta_rx = t3_rx - t2_rx
                    tot_rx += delta_rx
                    delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
                    tot_non_dp_rx += delta_non_dp_rx
                    delta_tx = t3_tx - t2_tx
                    tot_tx += delta_tx
                    delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
                    tot_non_dp_tx += delta_non_dp_tx
                    delta_dp_tx = delta_tx - delta_non_dp_tx
                    delta_dp_rx = delta_rx - delta_non_dp_rx
                    time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
                    iteration_data['abs_dropped'] += time_loop_data[
                        'abs_dropped']
                    delta_drop = t3_drop - t2_drop
                    tot_drop += delta_drop
                    t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
                    core_avail = True
                if self.sut_machine != None:
                    t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                    )
                    if t3_sut_tsc != t2_sut_tsc:
                        single_sut_core_measurement_duration = (
                            t3_sut_tsc - t2_sut_tsc
                        ) * 1.0 / sut_tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                        tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
                        tot_sut_rx += t3_sut_rx - t2_sut_rx
                        tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
                        delta_sut_tx = t3_sut_tx - t2_sut_tx
                        tot_sut_tx += delta_sut_tx
                        delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
                        tot_sut_non_dp_tx += delta_sut_non_dp_tx
                        t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
                        sut_avail = True
                if self.test['test'] == 'fixed_rate':
                    if lat_avail == core_avail == True:
                        lat_avail = core_avail = False
                        time_loop_data['pps_req_tx'] = (
                            delta_tx + delta_drop - delta_rx
                        ) / time_loop_data['actual_duration'] / 1000000
                        time_loop_data['pps_tx'] = delta_tx / time_loop_data[
                            'actual_duration'] / 1000000
                        if self.sut_machine != None and sut_avail:
                            time_loop_data[
                                'pps_sut_tx'] = delta_sut_tx / single_sut_core_measurement_duration / 1000000
                            sut_avail = False
                        else:
                            time_loop_data['pps_sut_tx'] = None
                        time_loop_data['pps_rx'] = delta_rx / time_loop_data[
                            'actual_duration'] / 1000000
                        time_loop_data['abs_tx'] = delta_dp_tx
                        time_loop_data['abs_rx'] = delta_dp_rx
                        time_loop_prefix = {
                            'speed': '',
                            'lat_avg': '',
                            'lat_perc': '',
                            'lat_max': '',
                            'abs_drop_rate': '',
                            'drop_rate': ''
                        }
                        RapidLog.info(
                            self.report_result(flow_number, size,
                                               time_loop_data,
                                               time_loop_prefix))
                        time_loop_data['test'] = self.test['testname']
                        time_loop_data['environment_file'] = self.test[
                            'environment_file']
                        time_loop_data['Flows'] = flow_number
                        time_loop_data['Size'] = size
                        time_loop_data['RequestedSpeed'] = RapidTest.get_pps(
                            speed, size)
                        _ = self.post_data(time_loop_data)
            end_bg_gen_stats = []
            for bg_gen_machine in self.background_machines:
                bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats(
                )
                bg_gen_stat = {
                    "bg_dp_rx": bg_rx - bg_non_dp_rx,
                    "bg_dp_tx": bg_tx - bg_non_dp_tx,
                    "bg_tsc": bg_tsc,
                    "bg_hz": bg_hz
                }
                end_bg_gen_stats.append(dict(bg_gen_stat))
            self.stop_background_traffic(self.background_machines)
            i = 0
            bg_rates = []
            while i < len(end_bg_gen_stats):
                bg_rates.append(0.000001 *
                                (end_bg_gen_stats[i]['bg_dp_rx'] -
                                 start_bg_gen_stats[i]['bg_dp_rx']) /
                                ((end_bg_gen_stats[i]['bg_tsc'] -
                                  start_bg_gen_stats[i]['bg_tsc']) * 1.0 /
                                 end_bg_gen_stats[i]['bg_hz']))
                i += 1
            if len(bg_rates):
                iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
                RapidLog.debug(
                    'Average Background traffic rate: {:>7.3f} Mpps'.format(
                        iteration_data['avg_bg_rate']))
            else:
                iteration_data['avg_bg_rate'] = None
            #Stop generating
            self.gen_machine.stop_gen_cores()
            time.sleep(3.5)
            self.gen_machine.stop_latency_cores()
            iteration_data['r'] += 1
            iteration_data['lat_avg'] = old_div(
                iteration_data['lat_avg'], float(tot_lat_measurement_duration))
            iteration_data['lat_used'] = old_div(
                iteration_data['lat_used'],
                float(tot_lat_measurement_duration))
            t4_tsc = t2_tsc
            while t4_tsc == t2_tsc:
                t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats(
                )
            if self.test['test'] == 'fixed_rate':
                iteration_data['lat_tsc'] = t2_lat_tsc
                while iteration_data['lat_tsc'] == t2_lat_tsc:
                    iteration_data.update(self.gen_machine.lat_stats())
                sample_count = 0
                for percentile, bucket in enumerate(iteration_data['buckets'],
                                                    start=1):
                    sample_count += bucket
                    if sample_count > sum(
                            iteration_data['buckets']) * LAT_PERCENTILE:
                        break
                iteration_data['lat_perc_max'] = (percentile == len(
                    iteration_data['buckets']))
                iteration_data[
                    'lat_perc'] = percentile * iteration_data['bucket_size']
                delta_rx = t4_rx - t2_rx
                delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
                delta_tx = t4_tx - t2_tx
                delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
                delta_dp_tx = delta_tx - delta_non_dp_tx
                delta_dp_rx = delta_rx - delta_non_dp_rx
                iteration_data['abs_tx'] = delta_dp_tx
                iteration_data['abs_rx'] = delta_dp_rx
                iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
                iteration_data['pps_req_tx'] = None
                iteration_data['pps_tx'] = None
                iteration_data['pps_sut_tx'] = None
                iteration_data['drop_rate'] = 100.0 * (
                    iteration_data['abs_tx'] -
                    iteration_data['abs_rx']) / iteration_data['abs_tx']
                iteration_data['actual_duration'] = None
                break  ## Not really needed since the while loop will stop when evaluating the value of r
            else:
                sample_count = 0
                for percentile, bucket in enumerate(iteration_data['buckets'],
                                                    start=1):
                    sample_count += bucket
                    if sample_count > sum(
                            iteration_data['buckets']) * LAT_PERCENTILE:
                        break
                iteration_data['lat_perc_max'] = (percentile == len(
                    iteration_data['buckets']))
                iteration_data[
                    'lat_perc'] = percentile * iteration_data['bucket_size']
                iteration_data['pps_req_tx'] = (
                    tot_tx + tot_drop - tot_rx
                ) / iteration_data[
                    'actual_duration'] / 1000000.0  # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
                iteration_data['pps_tx'] = tot_tx / iteration_data[
                    'actual_duration'] / 1000000.0  # tot_tx is all generated packets actually accepted by the interface
                iteration_data['pps_rx'] = tot_rx / iteration_data[
                    'actual_duration'] / 1000000.0  # tot_rx is all packets received by the nop task = all packets received in the gen VM
                if self.sut_machine != None and sut_avail:
                    iteration_data[
                        'pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
                else:
                    iteration_data['pps_sut_tx'] = None
                iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx -
                                                              t1_non_dp_tx)
                iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx -
                                                              t1_non_dp_rx)
                iteration_data['abs_dropped'] = iteration_data[
                    'abs_tx'] - iteration_data['abs_rx']
                iteration_data['drop_rate'] = 100.0 * iteration_data[
                    'abs_dropped'] / iteration_data['abs_tx']
                if ((iteration_data['drop_rate'] <
                     self.test['drop_rate_threshold'])
                        or (iteration_data['abs_dropped'] ==
                            self.test['drop_rate_threshold'] == 0) or
                    (iteration_data['abs_dropped'] > self.test['maxz'])):
                    break
            self.gen_machine.stop_latency_cores()
        iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
        return (iteration_data)
示例#14
0
    def parse_config(test_params):
        testconfig = configparser.RawConfigParser()
        testconfig.read(test_params['test_file'])
        test_params['required_number_of_test_machines'] = int(testconfig.get(
            'TestParameters', 'total_number_of_test_machines'))
        test_params['number_of_tests'] = int(testconfig.get('TestParameters',
            'number_of_tests'))
        test_params['TestName'] = testconfig.get('TestParameters', 'name')
        if testconfig.has_option('TestParameters', 'lat_percentile'):
            test_params['lat_percentile'] = old_div(float(
                testconfig.get('TestParameters', 'lat_percentile')),100.0)
        else:
            test_params['lat_percentile'] = 0.99
        RapidLog.info('Latency percentile at {:.0f}%'.format(
            test_params['lat_percentile']*100))
        if testconfig.has_option('TestParameters', 'sleep_time'):
            test_params['sleep_time'] = int(testconfig.get('TestParameters', 'sleep_time'))
            if test_params['sleep_time'] < 2:
                test_params['sleep_time'] = 2
        else:
            test_params['sleep_time'] = 2

        if testconfig.has_option('TestParameters', 'ipv6'):
            test_params['ipv6'] = testconfig.getboolean('TestParameters','ipv6')
        else:
            test_params['ipv6'] = False
        config = configparser.RawConfigParser()
        config.read(test_params['environment_file'])
        test_params['vim_type'] = config.get('Varia', 'vim')
        test_params['user'] = config.get('ssh', 'user')
        if config.has_option('ssh', 'key'):
            test_params['key'] = config.get('ssh', 'key')
            if test_params['user'] in ['rapid']:
                if test_params['key'] != 'rapid_rsa_key':
                    RapidLog.debug(("Key file {} for user {} overruled by key file:"
                            " rapid_rsa_key").format(test_params['key'],
                            test_params['user']))
                    test_params['key'] = 'rapid_rsa_key'
        else:
            test_params['key'] = None
        if config.has_option('ssh', 'password'):
            test_params['password'] = config.get('ssh', 'password')
        else:
            test_params['password'] = None
        test_params['total_number_of_machines'] = int(config.get('rapid',
            'total_number_of_machines'))
        tests = []
        test = {}
        for test_index in range(1, test_params['number_of_tests']+1):
            test.clear()
            section = 'test%d'%test_index
            options = testconfig.options(section)
            for option in options:
                if option in ['imix','imixs','flows', 'warmupimix']:
                    test[option] = ast.literal_eval(testconfig.get(section,
                        option))
                elif option in ['maxframespersecondallingress','stepsize',
                        'flowsize','warmupflowsize','warmuptime', 'steps']:
                    test[option] = int(testconfig.get(section, option))
                elif option in ['startspeed', 'step', 'drop_rate_threshold',
                        'lat_avg_threshold','lat_perc_threshold',
                        'lat_max_threshold','accuracy','maxr','maxz',
                        'ramp_step','warmupspeed','mis_ordered_threshold']:
                    test[option] = float(testconfig.get(section, option))
                else:
                    test[option] = testconfig.get(section, option)
            tests.append(dict(test))
        for test in tests:
            if test['test'] in ['flowsizetest','TST009test']:
                if 'drop_rate_threshold' not in test.keys():
                    test['drop_rate_threshold'] = 0
                latency_thresholds = ['lat_avg_threshold','lat_perc_threshold','lat_max_threshold','mis_ordered_threshold']
                for threshold in latency_thresholds:
                    if threshold not in test.keys():
                        test[threshold] = inf
        test_params['tests'] = tests
        if test_params['required_number_of_test_machines'] > test_params[
                'total_number_of_machines']:
            RapidLog.exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
            raise Exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
        map_info = test_params['machine_map_file'].strip('[]').split(',')
        map_info_length = len(map_info)
        # If map_info is a list where the first entry is numeric, we assume we
        # are dealing with a list of machines and NOT the machine.map file
        if map_info[0].isnumeric():
            if map_info_length < test_params[
                    'required_number_of_test_machines']:
                RapidLog.exception('Not enough machine indices in --map \
                        parameter: {}. Needing {} entries'.format(map_info,
                            test_params['required_number_of_test_machines']))
            machine_index = list(map(int,map_info))
        else:
            machine_map = configparser.RawConfigParser()
            machine_map.read(test_params['machine_map_file'])
            machine_index = []
            for test_machine in range(1,
                    test_params['required_number_of_test_machines']+1):
                machine_index.append(int(machine_map.get(
                    'TestM%d'%test_machine, 'machine_index')))
        machine_map = configparser.RawConfigParser()
        machine_map.read(test_params['machine_map_file'])
        machines = []
        machine = {}
        for test_machine in range(1, test_params[
            'required_number_of_test_machines']+1):
            machine.clear()
            section = 'TestM%d'%test_machine
            options = testconfig.options(section)
            for option in options:
                if option in ['prox_socket','prox_launch_exit','monitor']:
                    machine[option] = testconfig.getboolean(section, option)
                elif option in ['mcore', 'cores', 'gencores','latcores']:
                    machine[option] = ast.literal_eval(testconfig.get(
                        section, option))
                elif option in ['bucket_size_exp']:
                    machine[option] = int(testconfig.get(section, option))
                    if machine[option] < 11:
                        RapidLog.exception(
                                "Minimum Value for bucket_size_exp is 11")
                else:
                    machine[option] = testconfig.get(section, option)
                for key in ['prox_socket','prox_launch_exit']:
                   if key not in machine.keys():
                       machine[key] = True
            if 'monitor' not in machine.keys():
                machine['monitor'] = True
            section = 'M%d'%machine_index[test_machine-1]
            options = config.options(section)
            for option in options:
                machine[option] = config.get(section, option)
            machines.append(dict(machine))
        for machine in machines:
            dp_ports = []
            if 'dest_vm' in machine.keys():
                index = 1
                while True: 
                    dp_ip_key = 'dp_ip{}'.format(index)
                    dp_mac_key = 'dp_mac{}'.format(index)
                    if dp_ip_key in machines[int(machine['dest_vm'])-1].keys() and \
                            dp_mac_key in machines[int(machine['dest_vm'])-1].keys():
                        dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
                                'mac' : machines[int(machine['dest_vm'])-1][dp_mac_key]}
                        dp_ports.append(dict(dp_port))
                        index += 1
                    else:
                        break
                    machine['dest_ports'] = list(dp_ports)
            gw_ips = []
            if 'gw_vm' in machine.keys():
                index = 1
                while True:
                    gw_ip_key = 'dp_ip{}'.format(index)
                    if gw_ip_key in machines[int(machine['gw_vm'])-1].keys():
                        gw_ip = machines[int(machine['gw_vm'])-1][gw_ip_key]
                        gw_ips.append(gw_ip)
                        index += 1
                    else:
                        break
                    machine['gw_ips'] = list(gw_ips)
        test_params['machines'] = machines
        return (test_params)
示例#15
0
 def run_tests(self, test_params):
     test_params = RapidConfigParser.parse_config(test_params)
     RapidLog.debug(test_params)
     monitor_gen = monitor_sut = False
     background_machines = []
     sut_machine = gen_machine = None
     self.machines = []
     for machine_params in test_params['machines']:
         if 'gencores' in machine_params.keys():
             machine = RapidGeneratorMachine(test_params['key'],
                                             test_params['user'],
                                             test_params['vim_type'],
                                             test_params['rundir'],
                                             machine_params,
                                             test_params['ipv6'])
             if machine_params['monitor']:
                 if monitor_gen:
                     RapidLog.exception("Can only monitor 1 generator")
                     raise Exception("Can only monitor 1 generator")
                 else:
                     monitor_gen = True
                     gen_machine = machine
             else:
                 background_machines.append(machine)
         else:
             machine = RapidMachine(test_params['key'], test_params['user'],
                                    test_params['vim_type'],
                                    test_params['rundir'], machine_params)
             if machine_params['monitor']:
                 if monitor_sut:
                     RapidLog.exception("Can only monitor 1 sut")
                     raise Exception("Can only monitor 1 sut")
                 else:
                     monitor_sut = True
                     if machine_params['prox_socket']:
                         sut_machine = machine
         self.machines.append(machine)
     prox_executor = concurrent.futures.ThreadPoolExecutor(
         max_workers=len(self.machines))
     self.future_to_prox = {
         prox_executor.submit(machine.start_prox,
                              test_params['configonly']): machine
         for machine in self.machines
     }
     if test_params['configonly']:
         concurrent.futures.wait(self.future_to_prox,
                                 return_when=ALL_COMPLETED)
         sys.exit()
     with concurrent.futures.ThreadPoolExecutor(
             max_workers=len(self.machines)) as executor:
         future_to_connect_prox = {
             executor.submit(machine.connect_prox): machine
             for machine in self.machines
         }
         concurrent.futures.wait(future_to_connect_prox,
                                 return_when=ALL_COMPLETED)
     result = True
     for test_param in test_params['tests']:
         RapidLog.info(test_param['test'])
         if test_param['test'] in [
                 'flowsizetest', 'TST009test', 'fixed_rate',
                 'increment_till_fail'
         ]:
             test = FlowSizeTest(test_param, test_params['lat_percentile'],
                                 test_params['runtime'],
                                 test_params['TestName'],
                                 test_params['environment_file'],
                                 gen_machine, sut_machine,
                                 background_machines)
         elif test_param['test'] in ['corestats']:
             test = CoreStatsTest(test_param, test_params['runtime'],
                                  test_params['TestName'],
                                  test_params['environment_file'],
                                  self.machines)
         elif test_param['test'] in ['portstats']:
             test = PortStatsTest(test_param, test_params['runtime'],
                                  test_params['TestName'],
                                  test_params['environment_file'],
                                  self.machines)
         elif test_param['test'] in ['impairtest']:
             test = ImpairTest(test_param, test_params['lat_percentile'],
                               test_params['runtime'],
                               test_params['TestName'],
                               test_params['environment_file'], gen_machine,
                               sut_machine)
         elif test_param['test'] in ['irqtest']:
             test = IrqTest(test_param, test_params['runtime'],
                            test_params['TestName'],
                            test_params['environment_file'], self.machines)
         elif test_param['test'] in ['warmuptest']:
             test = WarmupTest(test_param, gen_machine)
         else:
             RapidLog.debug('Test name ({}) is not valid:'.format(
                 test_param['test']))
         single_test_result = test.run()
         if not single_test_result:
             result = False
     return (result)
示例#16
0
 def run(self):
     RapidLog.info(
         "+------------------------------------------------------------------------------------------------------------------+"
     )
     RapidLog.info(
         "| Measuring core statistics on 1 or more PROX instances                                                            |"
     )
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+"
     )
     RapidLog.info(
         "| PROX ID   |    Time   |    RX      |     TX     | non DP RX  | non DP TX  |   TX - RX  | nonDP TX-RX|  DROP TOT  |"
     )
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+"
     )
     duration = self.test['runtime']
     tot_drop = []
     old_rx = []
     old_non_dp_rx = []
     old_tx = []
     old_non_dp_tx = []
     old_drop = []
     old_tx_fail = []
     old_tsc = []
     new_rx = []
     new_non_dp_rx = []
     new_tx = []
     new_non_dp_tx = []
     new_drop = []
     new_tx_fail = []
     new_tsc = []
     machines_to_go = len(self.machines)
     for machine in self.machines:
         machine.reset_stats()
         tot_drop.append(0)
         old_rx.append(0)
         old_non_dp_rx.append(0)
         old_tx.append(0)
         old_non_dp_tx.append(0)
         old_drop.append(0)
         old_tx_fail.append(0)
         old_tsc.append(0)
         old_rx[-1], old_non_dp_rx[-1], old_tx[-1], old_non_dp_tx[
             -1], old_drop[-1], old_tx_fail[-1], old_tsc[
                 -1], tsc_hz = machine.core_stats()
         new_rx.append(0)
         new_non_dp_rx.append(0)
         new_tx.append(0)
         new_non_dp_tx.append(0)
         new_drop.append(0)
         new_tx_fail.append(0)
         new_tsc.append(0)
     while (duration > 0):
         time.sleep(0.5)
         # Get statistics after some execution time
         for i, machine in enumerate(self.machines, start=0):
             new_rx[i], new_non_dp_rx[i], new_tx[i], new_non_dp_tx[
                 i], new_drop[i], new_tx_fail[i], new_tsc[
                     i], tsc_hz = machine.core_stats()
             drop = new_drop[i] - old_drop[i]
             rx = new_rx[i] - old_rx[i]
             tx = new_tx[i] - old_tx[i]
             non_dp_rx = new_non_dp_rx[i] - old_non_dp_rx[i]
             non_dp_tx = new_non_dp_tx[i] - old_non_dp_tx[i]
             tsc = new_tsc[i] - old_tsc[i]
             if tsc == 0:
                 continue
             machines_to_go -= 1
             old_drop[i] = new_drop[i]
             old_rx[i] = new_rx[i]
             old_tx[i] = new_tx[i]
             old_non_dp_rx[i] = new_non_dp_rx[i]
             old_non_dp_tx[i] = new_non_dp_tx[i]
             old_tsc[i] = new_tsc[i]
             tot_drop[i] = tot_drop[i] + tx - rx
             RapidLog.info('|{:>10.0f}'.format(i) +
                           ' |{:>10.0f}'.format(duration) + ' | ' +
                           '{:>10.0f}'.format(rx) + ' | ' +
                           '{:>10.0f}'.format(tx) + ' | ' +
                           '{:>10.0f}'.format(non_dp_rx) + ' | ' +
                           '{:>10.0f}'.format(non_dp_tx) + ' | ' +
                           '{:>10.0f}'.format(tx - rx) + ' | ' +
                           '{:>10.0f}'.format(non_dp_tx - non_dp_rx) +
                           ' | ' + '{:>10.0f}'.format(tot_drop[i]) + ' |')
             variables = {
                 'test': self.test['test'],
                 'environment_file': self.test['environment_file'],
                 'PROXID': i,
                 'StepSize': duration,
                 'Received': rx,
                 'Sent': tx,
                 'NonDPReceived': non_dp_rx,
                 'NonDPSent': non_dp_tx,
                 'Dropped': tot_drop[i]
             }
             self.post_data('rapid_corestatstest', variables)
             if machines_to_go == 0:
                 duration = duration - 1
                 machines_to_go = len(self.machines)
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+"
     )
     return (True)
示例#17
0
 def run_iteration(self, requested_duration, flow_number, size, speed):
     BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
     LAT_PERCENTILE = self.test['lat_percentile']
     r = 0
     sleep_time = 2
     while (r < self.test['maxr']):
         time.sleep(sleep_time)
         # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
         t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats(
         )
         t1_dp_rx = t1_rx - t1_non_dp_rx
         t1_dp_tx = t1_tx - t1_non_dp_tx
         self.gen_machine.start_gen_cores()
         time.sleep(
             2
         )  ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
         if self.sut_machine != None:
             t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
             )
         t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats(
         )
         tx = t2_tx - t1_tx
         dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx)
         dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
         tot_dp_drop = dp_tx - dp_rx
         if tx == 0:
             RapidLog.critical(
                 "TX = 0. Test interrupted since no packet has been sent.")
         if dp_tx == 0:
             RapidLog.critical(
                 "Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent."
             )
         # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
         # Measure latency statistics per second
         lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
         )
         lat_samples = sum(buckets)
         sample_count = 0
         for sample_percentile, bucket in enumerate(buckets, start=1):
             sample_count += bucket
             if sample_count > (lat_samples * LAT_PERCENTILE):
                 break
         percentile_max = (sample_percentile == len(buckets))
         sample_percentile = sample_percentile * float(
             2**BUCKET_SIZE_EXP) / (old_div(float(lat_hz), float(10**6)))
         if self.test['test'] == 'fixed_rate':
             RapidLog.info(
                 self.report_result(flow_number, size, speed, None, None,
                                    None, None, lat_avg, sample_percentile,
                                    percentile_max, lat_max, dp_tx, dp_rx,
                                    None, None))
         tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
         lat_avg = used_avg = 0
         buckets_total = [0] * 128
         tot_lat_samples = 0
         tot_lat_measurement_duration = float(0)
         tot_core_measurement_duration = float(0)
         tot_sut_core_measurement_duration = float(0)
         tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
         lat_avail = core_avail = sut_avail = False
         while (tot_core_measurement_duration - float(requested_duration) <=
                0.1) or (tot_lat_measurement_duration -
                         float(requested_duration) <= 0.1):
             time.sleep(0.5)
             lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
             )
             # Get statistics after some execution time
             if t3_lat_tsc != t2_lat_tsc:
                 single_lat_measurement_duration = (
                     t3_lat_tsc - t2_lat_tsc
                 ) * 1.0 / lat_hz  # time difference between the 2 measurements, expressed in seconds.
                 # A second has passed in between to lat_stats requests. Hence we need to process the results
                 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
                 if lat_min > lat_min_sample:
                     lat_min = lat_min_sample
                 if lat_max < lat_max_sample:
                     lat_max = lat_max_sample
                 lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration  # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
                 used_avg = used_avg + used_sample * single_lat_measurement_duration  # and give it more weigth.
                 lat_samples = sum(buckets)
                 tot_lat_samples += lat_samples
                 sample_count = 0
                 for sample_percentile, bucket in enumerate(buckets,
                                                            start=1):
                     sample_count += bucket
                     if sample_count > lat_samples * LAT_PERCENTILE:
                         break
                 percentile_max = (sample_percentile == len(buckets))
                 sample_percentile = sample_percentile * float(
                     2**BUCKET_SIZE_EXP) / (old_div(float(lat_hz),
                                                    float(10**6)))
                 buckets_total = [
                     buckets_total[i] + buckets[i]
                     for i in range(len(buckets_total))
                 ]
                 t2_lat_tsc = t3_lat_tsc
                 lat_avail = True
             t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats(
             )
             if t3_tsc != t2_tsc:
                 single_core_measurement_duration = (
                     t3_tsc - t2_tsc
                 ) * 1.0 / tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                 tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
                 delta_rx = t3_rx - t2_rx
                 tot_rx += delta_rx
                 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
                 tot_non_dp_rx += delta_non_dp_rx
                 delta_tx = t3_tx - t2_tx
                 tot_tx += delta_tx
                 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
                 tot_non_dp_tx += delta_non_dp_tx
                 delta_dp_tx = delta_tx - delta_non_dp_tx
                 delta_dp_rx = delta_rx - delta_non_dp_rx
                 delta_dp_drop = delta_dp_tx - delta_dp_rx
                 tot_dp_drop += delta_dp_drop
                 delta_drop = t3_drop - t2_drop
                 tot_drop += delta_drop
                 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
                 core_avail = True
             if self.sut_machine != None:
                 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                 )
                 if t3_sut_tsc != t2_sut_tsc:
                     single_sut_core_measurement_duration = (
                         t3_sut_tsc - t2_sut_tsc
                     ) * 1.0 / tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                     tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
                     tot_sut_rx += t3_sut_rx - t2_sut_rx
                     tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
                     delta_sut_tx = t3_sut_tx - t2_sut_tx
                     tot_sut_tx += delta_sut_tx
                     delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
                     tot_sut_non_dp_tx += delta_sut_non_dp_tx
                     t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
                     sut_avail = True
             if self.test['test'] == 'fixed_rate':
                 if lat_avail == core_avail == True:
                     lat_avail = core_avail = False
                     pps_req_tx = (
                         delta_tx + delta_drop - delta_rx
                     ) / single_core_measurement_duration / 1000000
                     pps_tx = delta_tx / single_core_measurement_duration / 1000000
                     if self.sut_machine != None and sut_avail:
                         pps_sut_tx = delta_sut_tx / single_sut_core_measurement_duration / 1000000
                         sut_avail = False
                     else:
                         pps_sut_tx = None
                     pps_rx = delta_rx / single_core_measurement_duration / 1000000
                     RapidLog.info(
                         self.report_result(
                             flow_number, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg_sample,
                             sample_percentile, percentile_max,
                             lat_max_sample, delta_dp_tx, delta_dp_rx,
                             tot_dp_drop, single_core_measurement_duration))
         #Stop generating
         self.gen_machine.stop_gen_cores()
         r += 1
         lat_avg = old_div(lat_avg, float(tot_lat_measurement_duration))
         used_avg = old_div(used_avg, float(tot_lat_measurement_duration))
         t4_tsc = t2_tsc
         while t4_tsc == t2_tsc:
             t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats(
             )
         if self.test['test'] == 'fixed_rate':
             t4_lat_tsc = t2_lat_tsc
             while t4_lat_tsc == t2_lat_tsc:
                 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
                 )
             sample_count = 0
             lat_samples = sum(buckets)
             for percentile, bucket in enumerate(buckets, start=1):
                 sample_count += bucket
                 if sample_count > lat_samples * LAT_PERCENTILE:
                     break
             percentile_max = (percentile == len(buckets))
             percentile = percentile * float(2**BUCKET_SIZE_EXP) / (old_div(
                 float(lat_hz), float(10**6)))
             lat_max = lat_max_sample
             lat_avg = lat_avg_sample
             delta_rx = t4_rx - t2_rx
             delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
             delta_tx = t4_tx - t2_tx
             delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
             delta_dp_tx = delta_tx - delta_non_dp_tx
             delta_dp_rx = delta_rx - delta_non_dp_rx
             dp_tx = delta_dp_tx
             dp_rx = delta_dp_rx
             tot_dp_drop += delta_dp_tx - delta_dp_rx
             pps_req_tx = None
             pps_tx = None
             pps_sut_tx = None
             pps_rx = None
             drop_rate = 100.0 * (dp_tx - dp_rx) / dp_tx
             tot_core_measurement_duration = None
             break  ## Not really needed since the while loop will stop when evaluating the value of r
         else:
             sample_count = 0
             for percentile, bucket in enumerate(buckets_total, start=1):
                 sample_count += bucket
                 if sample_count > tot_lat_samples * LAT_PERCENTILE:
                     break
             percentile_max = (percentile == len(buckets_total))
             percentile = percentile * float(2**BUCKET_SIZE_EXP) / (old_div(
                 float(lat_hz), float(10**6)))
             pps_req_tx = (
                 tot_tx + tot_drop - tot_rx
             ) / tot_core_measurement_duration / 1000000.0  # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
             pps_tx = tot_tx / tot_core_measurement_duration / 1000000.0  # tot_tx is all generated packets actually accepted by the interface
             pps_rx = tot_rx / tot_core_measurement_duration / 1000000.0  # tot_rx is all packets received by the nop task = all packets received in the gen VM
             if self.sut_machine != None and sut_avail:
                 pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
             else:
                 pps_sut_tx = None
             dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
             dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
             tot_dp_drop = dp_tx - dp_rx
             drop_rate = 100.0 * tot_dp_drop / dp_tx
             if ((drop_rate < self.test['drop_rate_threshold']) or
                 (tot_dp_drop == self.test['drop_rate_threshold'] == 0)
                     or (tot_dp_drop > self.test['maxz'])):
                 break
     return (pps_req_tx, pps_tx, pps_sut_tx, pps_rx, lat_avg, percentile,
             percentile_max, lat_max, dp_tx, dp_rx, tot_dp_drop,
             (t4_tx_fail - t1_tx_fail), drop_rate, lat_min, used_avg, r,
             tot_core_measurement_duration)
示例#18
0
 def run(self):
     RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------")
     RapidLog.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic   ")
     RapidLog.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and    ")
     RapidLog.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was       ")
     RapidLog.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout   ")
     RapidLog.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 ") 
     RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------")
     sys.stdout.flush()
     for machine in self.machines:
         buckets=machine.socket.show_irq_buckets(1)
         print('Measurement ongoing ... ',end='\r')
         machine.stop()
         old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(machine.get_cores())+1)] 
         irq = [[0 for x in range(len(buckets)+1)] for y in range(len(machine.get_cores())+1)]
         irq[0][0] = 'bucket us' 
         for j,bucket in enumerate(buckets,start=1):
             irq[0][j] = '<'+ bucket
         irq[0][-1] = '>'+ buckets [-2]
         machine.start()
         time.sleep(2)
         for j,bucket in enumerate(buckets,start=1):
             for i,irqcore in enumerate(machine.get_cores(),start=1):
                 old_irq[i][j] = machine.socket.irq_stats(irqcore,j-1)
         time.sleep(float(self.runtime))
         machine.stop()
         for i,irqcore in enumerate(machine.get_cores(),start=1):
             irq[i][0]='core %s '%irqcore
             for j,bucket in enumerate(buckets,start=1):
                 diff =  machine.socket.irq_stats(irqcore,j-1) - old_irq[i][j]
                 if diff == 0:
                     irq[i][j] = '0'
                 else:
                     irq[i][j] = str(round(old_div(diff,float(self.runtime)), 2))
         RapidLog.info('Results for PROX instance %s'%machine.name)
         for row in irq:
             RapidLog.info(''.join(['{:>12}'.format(item) for item in row]))
     return (True)
示例#19
0
 def IsKey(self):
     keypairs = self.nova_client.keypairs.list()
     if next((x for x in keypairs if x.name == self.key_name), None):
         RapidLog.info('Keypair {} already exists'.format(self.key_name))
         return True
     return False
示例#20
0
 def run(self):
     result_details = {'Details': 'Nothing'}
     imix = self.test['imix']
     size = mean(imix)
     flow_number = self.test['flowsize']
     attempts = self.test['steps']
     self.gen_machine.set_udp_packet_size(imix)
     flow_number = self.gen_machine.set_flows(flow_number)
     self.gen_machine.start_latency_cores()
     RapidLog.info('+' + '-' * 188 + '+')
     RapidLog.info(
         ("| Generator is sending UDP ({:>5} flow) packets ({:>5}"
          " bytes) to SUT via GW dropping and delaying packets. SUT sends "
          "packets back.{:>60}").format(flow_number, round(size), '|'))
     RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 + '+' +
                   '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                   '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' +
                   '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+' +
                   '-' * 11 + '+' + '-' * 7 + '+' + '-' * 4 + '+')
     RapidLog.info((
         '| Test   | Speed requested  | Gen by core | Sent by NIC'
         ' | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f} Pcentil'
         '| Max. Lat.|   Sent    |  Received |    Lost   | Total Lost|'
         'L.Ratio|Time|').format(self.test['lat_percentile'] * 100))
     RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 + '+' +
                   '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                   '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' +
                   '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+' +
                   '-' * 11 + '+' + '-' * 7 + '+' + '-' * 4 + '+')
     speed = self.test['startspeed']
     self.gen_machine.set_generator_speed(speed)
     while attempts:
         attempts -= 1
         print('Measurement ongoing at speed: ' + str(round(speed, 2)) +
               '%      ',
               end='\r')
         sys.stdout.flush()
         time.sleep(1)
         # Get statistics now that the generation is stable and NO ARP messages any more
         iteration_data = self.run_iteration(float(self.test['runtime']),
                                             flow_number, size, speed)
         iteration_data['speed'] = speed
         # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
         # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
         if (iteration_data['drop_rate'] +
                 iteration_data['lat_used'] * 100) < 95:
             lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
                            '{}').format(bcolors.WARNING,
                                         iteration_data['lat_used'] * 100,
                                         bcolors.ENDC)
         else:
             lat_warning = ''
         iteration_prefix = {
             'speed': '',
             'lat_avg': '',
             'lat_perc': '',
             'lat_max': '',
             'abs_drop_rate': '',
             'drop_rate': ''
         }
         RapidLog.info(
             self.report_result(attempts, size, iteration_data,
                                iteration_prefix))
         iteration_data['test'] = self.test['testname']
         iteration_data['environment_file'] = self.test['environment_file']
         iteration_data['Flows'] = flow_number
         iteration_data['Size'] = size
         iteration_data['RequestedSpeed'] = RapidTest.get_pps(
             iteration_data['speed'], size)
         result_details = self.post_data(iteration_data)
         RapidLog.debug(result_details)
     RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 + '+' +
                   '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                   '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' +
                   '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+' +
                   '-' * 11 + '+' + '-' * 7 + '+' + '-' * 4 + '+')
     self.gen_machine.stop_latency_cores()
     return (True, result_details)
示例#21
0
 def run(self):
     #    global fieldnames
     #    global writer
     #    #fieldnames = ['Flows','PacketSize','Gbps','Mpps','AvgLatency','MaxLatency','PacketsDropped','PacketDropRate']
     #    fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Sent','Received','Lost','LostTotal']
     #    writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
     #    writer.writeheader()
     self.gen_machine.start_latency_cores()
     TestPassed = True
     for imix in self.test['imixs']:
         size = mean(imix)
         self.gen_machine.set_udp_packet_size(imix)
         if self.background_machines:
             backgroundinfo = '{}Running {} x background traffic not represented in the table{}'.format(
                 bcolors.FLASH, len(self.background_machines), bcolors.ENDC)
         else:
             backgroundinfo = '{}{}'.format(bcolors.FLASH, bcolors.ENDC)
         self.set_background_size(self.background_machines, imix)
         RapidLog.info(
             "+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+"
         )
         RapidLog.info(
             '| UDP, {:>5} bytes, different number of flows by randomizing SRC & DST UDP port. {:116.116}|'
             .format(size, backgroundinfo))
         RapidLog.info(
             "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
         )
         RapidLog.info(
             '| Flows  | Speed requested  | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f} Pcentil| Max. Lat.|   Sent    |  Received |    Lost   | Total Lost|L.Ratio|Time|'
             .format(self.test['lat_percentile'] * 100))
         RapidLog.info(
             "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
         )
         for flow_number in self.test['flows']:
             attempts = 0
             self.gen_machine.reset_stats()
             if self.sut_machine:
                 self.sut_machine.reset_stats()
             flow_number = self.gen_machine.set_flows(flow_number)
             self.set_background_flows(self.background_machines,
                                       flow_number)
             endspeed = None
             speed = self.get_start_speed_and_init(size)
             while True:
                 attempts += 1
                 endwarning = False
                 print(str(flow_number) +
                       ' flows: Measurement ongoing at speed: ' +
                       str(round(speed, 2)) + '%      ',
                       end='\r')
                 sys.stdout.flush()
                 # Start generating packets at requested speed (in % of a 10Gb/s link)
                 self.gen_machine.set_generator_speed(speed)
                 self.set_background_speed(self.background_machines, speed)
                 self.start_background_traffic(self.background_machines)
                 # Get statistics now that the generation is stable and initial ARP messages are dealt with
                 pps_req_tx, pps_tx, pps_sut_tx, pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, abs_tx, abs_rx, abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration = self.run_iteration(
                     float(self.test['runtime']), flow_number, size, speed)
                 self.stop_background_traffic(self.background_machines)
                 if r > 1:
                     retry_warning = bcolors.WARNING + ' {:1} retries needed'.format(
                         r) + bcolors.ENDC
                 else:
                     retry_warning = ''
                 # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
                 # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
                 if (drop_rate + lat_used * 100) < 95:
                     lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(
                         lat_used * 100) + bcolors.ENDC
                 else:
                     lat_warning = ''
                 if self.test['test'] == 'fixed_rate':
                     endspeed = speed
                     endpps_req_tx = None
                     endpps_tx = None
                     endpps_sut_tx = None
                     endpps_rx = None
                     endlat_avg = lat_avg
                     endlat_perc = lat_perc
                     endlat_perc_max = lat_perc_max
                     endlat_max = lat_max
                     endabs_dropped = abs_dropped
                     enddrop_rate = drop_rate
                     endabs_tx = abs_tx
                     endabs_rx = abs_rx
                     if lat_warning or retry_warning:
                         endwarning = '|        | {:177.177} |'.format(
                             retry_warning + lat_warning)
                     success = True
                     TestPassed = False  # fixed rate testing cannot be True, it is just reported numbers every second
                     speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                 # The following if statement is testing if we pass the success criteria of a certain drop rate, average latency and maximum latency below the threshold
                 # The drop rate success can be achieved in 2 ways: either the drop rate is below a treshold, either we want that no packet has been lost during the test
                 # This can be specified by putting 0 in the .test file
                 elif (
                     (drop_rate < self.test['drop_rate_threshold']) or
                     (abs_dropped == self.test['drop_rate_threshold'] == 0)
                 ) and (lat_avg < self.test['lat_avg_threshold']) and (
                         lat_perc < self.test['lat_perc_threshold']) and (
                             lat_max < self.test['lat_max_threshold']):
                     if (old_div((self.get_pps(speed, size) - pps_tx),
                                 self.get_pps(speed, size))) > 0.01:
                         speed_prefix = bcolors.WARNING
                         if abs_tx_fail > 0:
                             gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(
                                 self.get_pps(speed, size), pps_tx,
                                 abs_tx_fail) + bcolors.ENDC
                         else:
                             gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(
                                 self.get_pps(speed, size),
                                 pps_tx) + bcolors.ENDC
                     else:
                         speed_prefix = bcolors.ENDC
                         gen_warning = ''
                     endspeed = speed
                     endspeed_prefix = speed_prefix
                     endpps_req_tx = pps_req_tx
                     endpps_tx = pps_tx
                     endpps_sut_tx = pps_sut_tx
                     endpps_rx = pps_rx
                     endlat_avg = lat_avg
                     endlat_perc = lat_perc
                     endlat_perc_max = lat_perc_max
                     endlat_max = lat_max
                     endabs_dropped = None
                     enddrop_rate = drop_rate
                     endabs_tx = abs_tx
                     endabs_rx = abs_rx
                     if lat_warning or gen_warning or retry_warning:
                         endwarning = '|        | {:186.186} |'.format(
                             retry_warning + lat_warning + gen_warning)
                     success = True
                     success_message = ' SUCCESS'
                     speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                     RapidLog.debug(
                         self.report_result(
                             -attempts, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg, lat_perc,
                             lat_perc_max, lat_max, abs_tx, abs_rx,
                             abs_dropped, actual_duration, speed_prefix,
                             lat_avg_prefix, lat_max_prefix,
                             abs_drop_rate_prefix, drop_rate_prefix) +
                         success_message + retry_warning + lat_warning +
                         gen_warning)
                 else:
                     success_message = ' FAILED'
                     abs_drop_rate_prefix = bcolors.ENDC
                     if ((abs_dropped > 0)
                             and (self.test['drop_rate_threshold'] == 0)):
                         abs_drop_rate_prefix = bcolors.FAIL
                     if (drop_rate < self.test['drop_rate_threshold']):
                         drop_rate_prefix = bcolors.ENDC
                     else:
                         drop_rate_prefix = bcolors.FAIL
                     if (lat_avg < self.test['lat_avg_threshold']):
                         lat_avg_prefix = bcolors.ENDC
                     else:
                         lat_avg_prefix = bcolors.FAIL
                     if (lat_perc < self.test['lat_perc_threshold']):
                         lat_perc_prefix = bcolors.ENDC
                     else:
                         lat_perc_prefix = bcolors.FAIL
                     if (lat_max < self.test['lat_max_threshold']):
                         lat_max_prefix = bcolors.ENDC
                     else:
                         lat_max_prefix = bcolors.FAIL
                     if ((old_div((self.get_pps(speed, size) - pps_tx),
                                  self.get_pps(speed, size))) < 0.001):
                         speed_prefix = bcolors.ENDC
                     else:
                         speed_prefix = bcolors.FAIL
                     success = False
                     RapidLog.debug(
                         self.report_result(
                             -attempts, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg, lat_perc,
                             lat_perc_max, lat_max, abs_tx, abs_rx,
                             abs_dropped, actual_duration, speed_prefix,
                             lat_avg_prefix, lat_perc_prefix,
                             lat_max_prefix, abs_drop_rate_prefix,
                             drop_rate_prefix) + success_message +
                         retry_warning + lat_warning)
                 speed = self.new_speed(speed, size, success)
                 if self.resolution_achieved():
                     break
             if endspeed is not None:
                 if TestPassed and (endpps_rx <
                                    self.test['pass_threshold']):
                     TestPassed = False
                 speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                 RapidLog.info(
                     self.report_result(
                         flow_number, size, endspeed, endpps_req_tx,
                         endpps_tx, endpps_sut_tx, endpps_rx, endlat_avg,
                         endlat_perc, endlat_perc_max, endlat_max,
                         endabs_tx, endabs_rx, endabs_dropped,
                         actual_duration, speed_prefix, lat_avg_prefix,
                         lat_perc_prefix, lat_max_prefix,
                         abs_drop_rate_prefix, drop_rate_prefix))
                 if endwarning:
                     RapidLog.info(endwarning)
                 RapidLog.info(
                     "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
                 )
                 #                writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':self.get_pps(endspeed,size),'GeneratedPPS':endpps_req_tx,'SentPPS':endpps_tx,'ForwardedPPS':endpps_sut_tx,'ReceivedPPS':endpps_rx,'AvgLatencyUSEC':endlat_avg,'MaxLatencyUSEC':endlat_max,'Sent':endabs_tx,'Received':endabs_rx,'Lost':endabs_dropped,'LostTotal':endabs_dropped})
                 if self.test['pushgateway']:
                     URL = self.test[
                         'pushgateway'] + '/metrics/job/' + self.test[
                             'test'] + '/instance/' + self.test[
                                 'environment_file']
                     if endabs_dropped == None:
                         ead = 0
                     else:
                         ead = endabs_dropped
                     DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nSent {}\nReceived {}\nLost {}\nLostTotal {}\n'.format(
                         flow_number, size + 4,
                         self.get_pps(endspeed, size), endpps_req_tx,
                         endpps_tx, endpps_sut_tx, endpps_rx, endlat_avg,
                         endlat_max, endabs_tx, endabs_rx, ead, ead)
                     HEADERS = {
                         'X-Requested-With': 'Python requests',
                         'Content-type': 'text/xml'
                     }
                     response = requests.post(url=URL,
                                              data=DATA,
                                              headers=HEADERS)
                     if (response.status_code !=
                             202) and (response.status_code != 200):
                         RapidLog.info(
                             'Cannot send metrics to {}'.format(URL))
                         RapidLog.info(DATA)
             else:
                 RapidLog.info('|{:>7}'.format(str(flow_number)) +
                               " | Speed 0 or close to 0")
     self.gen_machine.stop_latency_cores()
     return (TestPassed)
示例#22
0
    def run(self):
        imix = self.test['imix']
        size = mean(imix)
        flow_number = self.test['flowsize']
        attempts = 0
        self.gen_machine.set_udp_packet_size(imix)
        flow_number = self.gen_machine.set_flows(flow_number)
        self.gen_machine.start_latency_cores()
        RapidLog.info(
            "+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+"
        )
        RapidLog.info(
            "| Generator is sending UDP ({:>5} flow) packets ({:>5} bytes) to SUT via GW dropping and delaying packets. SUT sends packets back. Use ctrl-c to stop the test                               |"
            .format(flow_number, size))
        RapidLog.info(
            "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
        )
        RapidLog.info(
            '| Test   | Speed requested  | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f} Pcentil| Max. Lat.|   Sent    |  Received |    Lost   | Total Lost|L.Ratio|Time|'
            .format(self.test['lat_percentile'] * 100))
        RapidLog.info(
            "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
        )

        speed = self.test['startspeed']
        self.gen_machine.set_generator_speed(speed)
        while True:
            attempts += 1
            print('Measurement ongoing at speed: ' + str(round(speed, 2)) +
                  '%      ',
                  end='\r')
            sys.stdout.flush()
            time.sleep(1)
            # Get statistics now that the generation is stable and NO ARP messages any more
            pps_req_tx, pps_tx, pps_sut_tx, pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, abs_tx, abs_rx, abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration = self.run_iteration(
                float(self.test['runtime']), flow_number, size, speed)
            # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
            # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
            if (drop_rate + lat_used * 100) < 95:
                lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(
                    lat_used * 100) + bcolors.ENDC
            else:
                lat_warning = ''
            RapidLog.info(
                self.report_result(attempts, size, speed, pps_req_tx, pps_tx,
                                   pps_sut_tx, pps_rx, lat_avg, lat_perc,
                                   lat_perc_max, lat_max, abs_tx, abs_rx,
                                   abs_dropped, actual_duration))
            variables = {
                'test': self.test['test'],
                'environment_file': self.test['environment_file'],
                'Flows': flow_number,
                'Size': size,
                'RequestedSpeed': RapidTest.get_pps(speed, size),
                'CoreGenerated': pps_req_tx,
                'SentByNIC': pps_tx,
                'FwdBySUT': pps_sut_tx,
                'RevByCore': pps_rx,
                'AvgLatency': lat_avg,
                'PCTLatency': lat_perc,
                'MaxLatency': lat_max,
                'PacketsLost': abs_dropped,
                'DropRate': drop_rate
            }
            self.post_data('rapid_impairtest', variables)
        self.gen_machine.stop_latency_cores()
        return (True)
 def run(self):
     #    fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Dropped','DropRate']
     #    writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
     #    writer.writeheader()
     imix = self.test['imix']
     size = mean(imix)
     flow_number = self.test['flowsize']
     attempts = 0
     self.gen_machine.set_udp_packet_size(imix)
     flow_number = self.gen_machine.set_flows(flow_number)
     self.gen_machine.start_latency_cores()
     RapidLog.info(
         "+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+"
     )
     RapidLog.info(
         "| Generator is sending UDP (" + '{:>5}'.format(flow_number) +
         " flow) packets (" + '{:>5}'.format(size) +
         " bytes) to SUT via GW dropping and delaying packets. SUT sends packets back. Use ctrl-c to stop the test    |"
     )
     RapidLog.info(
         "+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+"
     )
     RapidLog.info(
         "| Test   |  Speed requested   | Sent to NIC    |  Sent by Gen   | Forward by SUT |  Rec. by Gen   |  Avg. Latency  |  Max. Latency  |  Packets Lost  | Loss Ratio |"
     )
     RapidLog.info(
         "+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+"
     )
     speed = self.test['startspeed']
     self.gen_machine.set_generator_speed(speed)
     while True:
         attempts += 1
         print('Measurement ongoing at speed: ' + str(round(speed, 2)) +
               '%      ',
               end='\r')
         sys.stdout.flush()
         time.sleep(1)
         # Get statistics now that the generation is stable and NO ARP messages any more
         pps_req_tx, pps_tx, pps_sut_tx_str, pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, abs_dropped, abs_tx_fail, abs_tx, lat_min, lat_used, r, actual_duration = run_iteration(
             float(self.test['runtime']), flow_number, size, speed)
         drop_rate = 100.0 * abs_dropped / abs_tx
         if lat_used < 0.95:
             lat_warning = bcolors.FAIL + ' Potential latency accuracy problem: {:>3.0f}%'.format(
                 lat_used * 100) + bcolors.ENDC
         else:
             lat_warning = ''
         RapidLog.info('|{:>7}'.format(str(attempts)) + " | " +
                       '{:>5.1f}'.format(speed) + '% ' +
                       '{:>6.3f}'.format(get_pps(speed, size)) +
                       ' Mpps | ' + '{:>9.3f}'.format(pps_req_tx) +
                       ' Mpps | ' + '{:>9.3f}'.format(pps_tx) + ' Mpps | ' +
                       '{:>9}'.format(pps_sut_tx_str) + ' Mpps | ' +
                       '{:>9.3f}'.format(pps_rx) + ' Mpps | ' +
                       '{:>9.0f}'.format(lat_avg) + ' us   | ' +
                       '{:>9.0f}'.format(lat_max) + ' us   | ' +
                       '{:>14d}'.format(abs_dropped) + ' |'
                       '{:>9.2f}'.format(drop_rate) + '%  |' + lat_warning)
         #        writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':get_pps(speed,size),'GeneratedPPS':pps_req_tx,'SentPPS':pps_tx,'ForwardedPPS':pps_sut_tx_str,'ReceivedPPS':pps_rx,'AvgLatencyUSEC':lat_avg,'MaxLatencyUSEC':lat_max,'Dropped':abs_dropped,'DropRate':drop_rate})
         if self.test['pushgateway']:
             URL = self.test[
                 'pushgateway'] + '/metrics/job/' + TestName + '/instance/' + self.test[
                     'environment_file']
             DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nDropped {}\nDropRate {}\n'.format(
                 flow_number, size + 4, get_pps(speed, size), pps_req_tx,
                 pps_tx, pps_sut_tx_str, pps_rx, lat_avg, lat_max,
                 abs_dropped, drop_rate)
             HEADERS = {
                 'X-Requested-With': 'Python requests',
                 'Content-type': 'text/xml'
             }
             response = requests.post(url=URL, data=DATA, headers=HEADERS)
             if (response.status_code != 202) and (response.status_code !=
                                                   200):
                 RapidLog.info('Cannot send metrics to {}'.format(URL))
                 RapidLog.info(DATA)
     self.gen_machine.stop_latency_cores()
     return (True)
 def run(self):
     #    fieldnames = ['PROXID','Time','Received','Sent','NoMbufs','iErrMiss']
     #    writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
     #    writer.writeheader()
     RapidLog.info(
         "+---------------------------------------------------------------------------+"
     )
     RapidLog.info(
         "| Measuring port statistics on 1 or more PROX instances                     |"
     )
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+"
     )
     RapidLog.info(
         "| PROX ID   |    Time   |    RX      |     TX     | no MBUFS   | ierr&imiss |"
     )
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+"
     )
     duration = float(runtime)
     old_rx = []
     old_tx = []
     old_no_mbufs = []
     old_errors = []
     old_tsc = []
     new_rx = []
     new_tx = []
     new_no_mbufs = []
     new_errors = []
     new_tsc = []
     machines_to_go = len(self.machines)
     for machine in self.machines:
         machine.reset_stats()
         old_rx.append(0)
         old_tx.append(0)
         old_no_mbufs.append(0)
         old_errors.append(0)
         old_tsc.append(0)
         old_rx[-1], old_tx[-1], old_no_mbufs[-1], old_errors[-1], old_tsc[
             -1] = machine.multi_port_stats()
         new_rx.append(0)
         new_tx.append(0)
         new_no_mbufs.append(0)
         new_errors.append(0)
         new_tsc.append(0)
     while (duration > 0):
         time.sleep(0.5)
         # Get statistics after some execution time
         for i, machine in enumerate(self.machines, start=0):
             new_rx[i], new_tx[i], new_no_mbufs[i], new_errors[i], new_tsc[
                 i] = machine.multi_port_stats()
             rx = new_rx[i] - old_rx[i]
             tx = new_tx[i] - old_tx[i]
             no_mbufs = new_no_mbufs[i] - old_no_mbufs[i]
             errors = new_errors[i] - old_errors[i]
             tsc = new_tsc[i] - old_tsc[i]
             if tsc == 0:
                 continue
             machines_to_go -= 1
             old_rx[i] = new_rx[i]
             old_tx[i] = new_tx[i]
             old_no_mbufs[i] = new_no_mbufs[i]
             old_errors[i] = new_errors[i]
             old_tsc[i] = new_tsc[i]
             RapidLog.info('|{:>10.0f}'.format(i) +
                           ' |{:>10.0f}'.format(duration) + ' | ' +
                           '{:>10.0f}'.format(rx) + ' | ' +
                           '{:>10.0f}'.format(tx) + ' | ' +
                           '{:>10.0f}'.format(no_mbufs) + ' | ' +
                           '{:>10.0f}'.format(errors) + ' |')
             #            writer.writerow({'PROXID':i,'Time':duration,'Received':rx,'Sent':tx,'NoMbufs':no_mbufs,'iErrMiss':errors})
             if self.pushgateway:
                 URL = self.pushgateway + '/metrics/job/' + TestName + '/instance/' + self.environment_file + str(
                     i)
                 DATA = 'PROXID {}\nTime {}\n Received {}\nSent {}\nNoMbufs {}\niErrMiss {}\n'.format(
                     i, duration, rx, tx, no_mbufs, errors)
                 HEADERS = {
                     'X-Requested-With': 'Python requests',
                     'Content-type': 'text/xml'
                 }
                 response = requests.post(url=URL,
                                          data=DATA,
                                          headers=HEADERS)
                 if (response.status_code != 202) and (response.status_code
                                                       != 200):
                     RapidLog.info('Cannot send metrics to {}'.format(URL))
                     RapidLog.info(DATA)
             if machines_to_go == 0:
                 duration = duration - 1
                 machines_to_go = len(self.machines)
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+"
     )
     return (True)
 def run(self):
     RapidLog.info(
         "+---------------------------------------------------------------------------+"
     )
     RapidLog.info(
         "| Measuring port statistics on 1 or more PROX instances                     |"
     )
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+"
     )
     RapidLog.info(
         "| PROX ID   |    Time   |    RX      |     TX     | no MBUFS   | ierr&imiss |"
     )
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+"
     )
     duration = float(self.test['runtime'])
     old_rx = []
     old_tx = []
     old_no_mbufs = []
     old_errors = []
     old_tsc = []
     new_rx = []
     new_tx = []
     new_no_mbufs = []
     new_errors = []
     new_tsc = []
     machines_to_go = len(self.machines)
     for machine in self.machines:
         machine.reset_stats()
         old_rx.append(0)
         old_tx.append(0)
         old_no_mbufs.append(0)
         old_errors.append(0)
         old_tsc.append(0)
         old_rx[-1], old_tx[-1], old_no_mbufs[-1], old_errors[-1], old_tsc[
             -1] = machine.multi_port_stats()
         new_rx.append(0)
         new_tx.append(0)
         new_no_mbufs.append(0)
         new_errors.append(0)
         new_tsc.append(0)
     while (duration > 0):
         time.sleep(0.5)
         # Get statistics after some execution time
         for i, machine in enumerate(self.machines, start=0):
             new_rx[i], new_tx[i], new_no_mbufs[i], new_errors[i], new_tsc[
                 i] = machine.multi_port_stats()
             rx = new_rx[i] - old_rx[i]
             tx = new_tx[i] - old_tx[i]
             no_mbufs = new_no_mbufs[i] - old_no_mbufs[i]
             errors = new_errors[i] - old_errors[i]
             tsc = new_tsc[i] - old_tsc[i]
             if tsc == 0:
                 continue
             machines_to_go -= 1
             old_rx[i] = new_rx[i]
             old_tx[i] = new_tx[i]
             old_no_mbufs[i] = new_no_mbufs[i]
             old_errors[i] = new_errors[i]
             old_tsc[i] = new_tsc[i]
             RapidLog.info('|{:>10.0f}'.format(i) +
                           ' |{:>10.0f}'.format(duration) + ' | ' +
                           '{:>10.0f}'.format(rx) + ' | ' +
                           '{:>10.0f}'.format(tx) + ' | ' +
                           '{:>10.0f}'.format(no_mbufs) + ' | ' +
                           '{:>10.0f}'.format(errors) + ' |')
             variables = {
                 'test': self.test['test'],
                 'environment_file': self.test['environment_file'],
                 'PROXID': i,
                 'StepSize': duration,
                 'Received': rx,
                 'Sent': tx,
                 'NoMbufs': no_mbufs,
                 'iErrMiss': errors
             }
             self.post_data('rapid_corestatstest', variables)
             if machines_to_go == 0:
                 duration = duration - 1
                 machines_to_go = len(self.machines)
     RapidLog.info(
         "+-----------+-----------+------------+------------+------------+------------+"
     )
     return (True)
示例#26
0
 def run(self):
     result_details = {'Details': 'Nothing'}
     self.gen_machine.start_latency_cores()
     TestPassed = True
     for imix in self.test['imixs']:
         size = mean(imix)
         self.gen_machine.set_udp_packet_size(imix)
         if self.background_machines:
             backgroundinfo = '{}Running {} x background traffic not represented in the table{}'.format(
                 bcolors.FLASH, len(self.background_machines), bcolors.ENDC)
         else:
             backgroundinfo = '{}{}'.format(bcolors.FLASH, bcolors.ENDC)
         self.set_background_size(self.background_machines, imix)
         RapidLog.info(
             "+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+"
         )
         RapidLog.info(
             '| UDP, {:>5} bytes, different number of flows by randomizing SRC & DST UDP port. {:116.116}|'
             .format(size, backgroundinfo))
         RapidLog.info(
             "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
         )
         RapidLog.info(
             '| Flows  | Speed requested  | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f} Pcentil| Max. Lat.|   Sent    |  Received |    Lost   | Total Lost|L.Ratio|Time|'
             .format(self.test['lat_percentile'] * 100))
         RapidLog.info(
             "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
         )
         for flow_number in self.test['flows']:
             attempts = 0
             self.gen_machine.reset_stats()
             if self.sut_machine:
                 self.sut_machine.reset_stats()
             flow_number = self.gen_machine.set_flows(flow_number)
             self.set_background_flows(self.background_machines,
                                       flow_number)
             endspeed = None
             speed = self.get_start_speed_and_init(size)
             self.record_start_time()
             while True:
                 attempts += 1
                 endwarning = False
                 print(str(flow_number) +
                       ' flows: Measurement ongoing at speed: ' +
                       str(round(speed, 2)) + '%      ',
                       end='\r')
                 sys.stdout.flush()
                 # Start generating packets at requested speed (in % of a 10Gb/s link)
                 self.gen_machine.set_generator_speed(speed)
                 self.set_background_speed(self.background_machines, speed)
                 self.start_background_traffic(self.background_machines)
                 # Get statistics now that the generation is stable and initial ARP messages are dealt with
                 pps_req_tx, pps_tx, pps_sut_tx, pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, abs_tx, abs_rx, abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration, avg_bg_rate, bucket_size, buckets = self.run_iteration(
                     float(self.test['runtime']), flow_number, size, speed)
                 self.stop_background_traffic(self.background_machines)
                 if r > 1:
                     retry_warning = bcolors.WARNING + ' {:1} retries needed'.format(
                         r) + bcolors.ENDC
                 else:
                     retry_warning = ''
                 # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
                 # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
                 if (drop_rate + lat_used * 100) < 95:
                     lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(
                         lat_used * 100) + bcolors.ENDC
                 else:
                     lat_warning = ''
                 if self.test['test'] == 'fixed_rate':
                     endspeed = speed
                     endpps_req_tx = None
                     endpps_tx = None
                     endpps_sut_tx = None
                     endpps_rx = None
                     endlat_avg = lat_avg
                     endlat_perc = lat_perc
                     endlat_perc_max = lat_perc_max
                     endlat_max = lat_max
                     endbuckets = buckets
                     endabs_dropped = abs_dropped
                     enddrop_rate = drop_rate
                     endabs_tx = abs_tx
                     endabs_rx = abs_rx
                     endavg_bg_rate = avg_bg_rate
                     if lat_warning or retry_warning:
                         endwarning = '|        | {:177.177} |'.format(
                             retry_warning + lat_warning)
                     success = True
                     TestPassed = False  # fixed rate testing cannot be True, it is just reporting numbers every second
                     speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                 # The following if statement is testing if we pass the success criteria of a certain drop rate, average latency and maximum latency below the threshold
                 # The drop rate success can be achieved in 2 ways: either the drop rate is below a treshold, either we want that no packet has been lost during the test
                 # This can be specified by putting 0 in the .test file
                 elif (
                     (drop_rate < self.test['drop_rate_threshold']) or
                     (abs_dropped == self.test['drop_rate_threshold'] == 0)
                 ) and (lat_avg < self.test['lat_avg_threshold']) and (
                         lat_perc < self.test['lat_perc_threshold']) and (
                             lat_max < self.test['lat_max_threshold']):
                     if (old_div((self.get_pps(speed, size) - pps_tx),
                                 self.get_pps(speed, size))) > 0.01:
                         speed_prefix = bcolors.WARNING
                         if abs_tx_fail > 0:
                             gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(
                                 self.get_pps(speed, size), pps_tx,
                                 abs_tx_fail) + bcolors.ENDC
                         else:
                             gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(
                                 self.get_pps(speed, size),
                                 pps_tx) + bcolors.ENDC
                     else:
                         speed_prefix = bcolors.ENDC
                         gen_warning = ''
                     endspeed = speed
                     endspeed_prefix = speed_prefix
                     endpps_req_tx = pps_req_tx
                     endpps_tx = pps_tx
                     endpps_sut_tx = pps_sut_tx
                     endpps_rx = pps_rx
                     endlat_avg = lat_avg
                     endlat_perc = lat_perc
                     endlat_perc_max = lat_perc_max
                     endlat_max = lat_max
                     endbuckets = buckets
                     endabs_dropped = None
                     enddrop_rate = drop_rate
                     endabs_tx = abs_tx
                     endabs_rx = abs_rx
                     endavg_bg_rate = avg_bg_rate
                     if lat_warning or gen_warning or retry_warning:
                         endwarning = '|        | {:186.186} |'.format(
                             retry_warning + lat_warning + gen_warning)
                     success = True
                     success_message = ' SUCCESS'
                     speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                     RapidLog.debug(
                         self.report_result(
                             -attempts, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg, lat_perc,
                             lat_perc_max, lat_max, abs_tx, abs_rx,
                             abs_dropped, actual_duration, speed_prefix,
                             lat_avg_prefix, lat_max_prefix,
                             abs_drop_rate_prefix, drop_rate_prefix) +
                         success_message + retry_warning + lat_warning +
                         gen_warning)
                 else:
                     success_message = ' FAILED'
                     abs_drop_rate_prefix = bcolors.ENDC
                     if ((abs_dropped > 0)
                             and (self.test['drop_rate_threshold'] == 0)):
                         abs_drop_rate_prefix = bcolors.FAIL
                     if (drop_rate < self.test['drop_rate_threshold']):
                         drop_rate_prefix = bcolors.ENDC
                     else:
                         drop_rate_prefix = bcolors.FAIL
                     if (lat_avg < self.test['lat_avg_threshold']):
                         lat_avg_prefix = bcolors.ENDC
                     else:
                         lat_avg_prefix = bcolors.FAIL
                     if (lat_perc < self.test['lat_perc_threshold']):
                         lat_perc_prefix = bcolors.ENDC
                     else:
                         lat_perc_prefix = bcolors.FAIL
                     if (lat_max < self.test['lat_max_threshold']):
                         lat_max_prefix = bcolors.ENDC
                     else:
                         lat_max_prefix = bcolors.FAIL
                     if ((old_div((self.get_pps(speed, size) - pps_tx),
                                  self.get_pps(speed, size))) < 0.001):
                         speed_prefix = bcolors.ENDC
                     else:
                         speed_prefix = bcolors.FAIL
                     success = False
                     RapidLog.debug(
                         self.report_result(
                             -attempts, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg, lat_perc,
                             lat_perc_max, lat_max, abs_tx, abs_rx,
                             abs_dropped, actual_duration, speed_prefix,
                             lat_avg_prefix, lat_perc_prefix,
                             lat_max_prefix, abs_drop_rate_prefix,
                             drop_rate_prefix) + success_message +
                         retry_warning + lat_warning)
                 speed = self.new_speed(speed, size, success)
                 if self.test['test'] == 'increment_till_fail':
                     if not success:
                         break
                 elif self.resolution_achieved():
                     break
             self.record_stop_time()
             if endspeed is not None:
                 if TestPassed and (endpps_rx <
                                    self.test['pass_threshold']):
                     TestPassed = False
                 speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                 RapidLog.info(
                     self.report_result(
                         flow_number, size, endspeed, endpps_req_tx,
                         endpps_tx, endpps_sut_tx, endpps_rx, endlat_avg,
                         endlat_perc, endlat_perc_max, endlat_max,
                         endabs_tx, endabs_rx, endabs_dropped,
                         actual_duration, speed_prefix, lat_avg_prefix,
                         lat_perc_prefix, lat_max_prefix,
                         abs_drop_rate_prefix, drop_rate_prefix))
                 if endavg_bg_rate:
                     tot_avg_rx_rate = endpps_rx + (
                         endavg_bg_rate * len(self.background_machines))
                     endtotaltrafficrate = '|        | Total amount of traffic received by all generators during this test: {:>4.3f} Gb/s {:7.3f} Mpps {} |'.format(
                         RapidTest.get_speed(tot_avg_rx_rate, size),
                         tot_avg_rx_rate, ' ' * 84)
                     RapidLog.info(endtotaltrafficrate)
                 if endwarning:
                     RapidLog.info(endwarning)
                 RapidLog.info(
                     "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
                 )
                 if self.test['test'] != 'fixed_rate':
                     result_details = {
                         'test': self.test['testname'],
                         'environment_file': self.test['environment_file'],
                         'start_date': self.start,
                         'stop_date': self.stop,
                         'Flows': flow_number,
                         'Size': size,
                         'RequestedSpeed':
                         RapidTest.get_pps(endspeed, size),
                         'CoreGenerated': endpps_req_tx,
                         'SentByNIC': endpps_tx,
                         'FwdBySUT': endpps_sut_tx,
                         'RevByCore': endpps_rx,
                         'AvgLatency': endlat_avg,
                         'PCTLatency': endlat_perc,
                         'MaxLatency': endlat_max,
                         'PacketsSent': endabs_tx,
                         'PacketsReceived': endabs_rx,
                         'PacketsLost': endabs_dropped,
                         'bucket_size': bucket_size,
                         'buckets': endbuckets
                     }
                     self.post_data('rapid_flowsizetest', result_details)
             else:
                 RapidLog.info('|{:>7}'.format(str(flow_number)) +
                               " | Speed 0 or close to 0")
     self.gen_machine.stop_latency_cores()
     return (TestPassed, result_details)
示例#27
0
 def run_tests(test_params):
     test_params = RapidConfigParser.parse_config(test_params)
     RapidLog.debug(test_params)
     monitor_gen = monitor_sut = False
     background_machines = []
     sut_machine = gen_machine = None
     machines = []
     for machine_params in test_params['machines']:
         if 'gencores' in machine_params.keys():
             machine = RapidGeneratorMachine(test_params['key'],
                     test_params['user'], test_params['vim_type'],
                     test_params['rundir'], machine_params,
                     test_params['ipv6'])
             if machine_params['monitor']:
                 if monitor_gen:
                     RapidLog.exception("Can only monitor 1 generator")
                     raise Exception("Can only monitor 1 generator")
                 else:
                     monitor_gen = True
                     gen_machine = machine
             else:
                 background_machines.append(machine)
         else:
             machine = RapidMachine(test_params['key'], test_params['user'],
                     test_params['vim_type'], test_params['rundir'],
                     machine_params)
             if machine_params['monitor']:
                 if monitor_sut:
                     RapidLog.exception("Can only monitor 1 sut")
                     raise Exception("Can only monitor 1 sut")
                 else:
                     monitor_sut = True
                     sut_machine = machine
         machines.append(machine)
     if test_params['configonly']:
         sys.exit()
     for machine in machines:
         machine.start_prox()
     result = True
     for test_param in test_params['tests']:
         RapidLog.info(test_param['test'])
         if test_param['test'] in ['flowsizetest', 'TST009test',
                 'fixed_rate', 'increment_till_fail']:
             test = FlowSizeTest(test_param, test_params['lat_percentile'],
                     test_params['runtime'], 
                     test_params['TestName'], 
                     test_params['environment_file'], gen_machine,
                     sut_machine, background_machines)
         elif test_param['test'] in ['corestats']:
             test = CoreStatsTest(test_param, test_params['runtime'],
                     test_params['TestName'], 
                     test_params['environment_file'], machines)
         elif test_param['test'] in ['portstats']:
             test = PortStatsTest(test_param, test_params['runtime'],
                     test_params['TestName'], 
                     test_params['environment_file'], machines)
         elif test_param['test'] in ['impairtest']:
             test = ImpairTest(test_param, test_params['lat_percentile'],
                     test_params['runtime'],
                     test_params['TestName'], 
                     test_params['environment_file'], gen_machine,
                     sut_machine)
         elif test_param['test'] in ['irqtest']:
             test = IrqTest(test_param, test_params['runtime'],
                     test_params['TestName'], 
                     test_params['environment_file'], machines)
         elif test_param['test'] in ['warmuptest']:
             test = WarmupTest(test_param, gen_machine)
         else:
             RapidLog.debug('Test name ({}) is not valid:'.format(
                 test_param['test']))
         single_test_result = test.run()
         if not single_test_result:
             result = False
     return (result)
示例#28
0
 def run(self):
     result_details = {'Details': 'Nothing'}
     TestResult = 0
     end_data = {}
     iteration_prefix = {}
     self.warm_up()
     for imix in self.test['imixs']:
         size = mean(imix)
         self.gen_machine.set_udp_packet_size(imix)
         if self.background_machines:
             backgroundinfo = ('{}Running {} x background traffic not '
                 'represented in the table{}').format(bcolors.FLASH,
                         len(self.background_machines),bcolors.ENDC)
         else:
             backgroundinfo = '{}{}'.format(bcolors.FLASH,bcolors.ENDC)
         self.set_background_size(self.background_machines, imix)
         RapidLog.info('+' + '-' * 188 + '+')
         RapidLog.info(("| UDP, {:>5} bytes, different number of flows by "
             "randomizing SRC & DST UDP port. {:116.116}|").
             format(round(size), backgroundinfo))
         RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
                 '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                 '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
                 + '+' + '-' * 11 + '+' + '-' * 11 + '+'  + '-' * 11 +  '+'
                 + '-' * 7 + '+' + '-' * 4 + '+')
         RapidLog.info(('| Flows  | Speed requested  | Gen by core | Sent by'
             ' NIC | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f}'
             ' Pcentil| Max. Lat.|   Sent    |  Received |    Lost   | Total'
             ' Lost|L.Ratio|Time|').format(self.test['lat_percentile']*100))
         RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
                 '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                 '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
                 + '+' + '-' * 11 + '+' + '-' * 11 + '+'  + '-' * 11 +  '+'
                 + '-' * 7 + '+' + '-' * 4 + '+')
         for flow_number in self.test['flows']:
             attempts = 0
             self.gen_machine.reset_stats()
             if self.sut_machine:
                 self.sut_machine.reset_stats()
             flow_number = self.gen_machine.set_flows(flow_number)
             self.set_background_flows(self.background_machines, flow_number)
             end_data['speed'] = None
             speed = self.get_start_speed_and_init(size)
             while True:
                 attempts += 1
                 endwarning = False
                 print('{} flows: Measurement ongoing at speed: {}%'.format(
                     str(flow_number), str(round(speed, 2))), end='     \r')
                 sys.stdout.flush()
                 iteration_data = self.run_iteration(
                         float(self.test['runtime']),flow_number,size,speed)
                 if iteration_data['r'] > 1:
                     retry_warning = '{} {:1} retries needed{}'.format(
                             bcolors.WARNING, iteration_data['r'],
                             bcolors.ENDC)
                 else:
                     retry_warning = ''
                 # Drop rate is expressed in percentage. lat_used is a ratio
                 # (0 to 1). The sum of these 2 should be 100%.
                 # If the sum is lower than 95, it means that more than 5%
                 # of the latency measurements where dropped for accuracy
                 # reasons.
                 if (iteration_data['drop_rate'] +
                         iteration_data['lat_used'] * 100) < 95:
                     lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
                         '{}').format(bcolors.WARNING,
                                 iteration_data['lat_used'] * 100,
                                 bcolors.ENDC)
                 else:
                     lat_warning = ''
                 iteration_prefix = {'speed' : bcolors.ENDC,
                         'lat_avg' : bcolors.ENDC,
                         'lat_perc' : bcolors.ENDC,
                         'lat_max' : bcolors.ENDC,
                         'abs_drop_rate' : bcolors.ENDC,
                         'drop_rate' : bcolors.ENDC}
                 if self.test['test'] == 'fixed_rate':
                     end_data = copy.deepcopy(iteration_data)
                     end_prefix = copy.deepcopy(iteration_prefix)
                     if lat_warning or retry_warning:
                         endwarning = '|        | {:177.177} |'.format(
                                 retry_warning + lat_warning)
                     success = True
                     # TestResult = TestResult + iteration_data['pps_rx']
                     # fixed rate testing result is strange: we just report
                     # the pps received
                 # The following if statement is testing if we pass the
                 # success criteria of a certain drop rate, average latency
                 # and maximum latency below the threshold.
                 # The drop rate success can be achieved in 2 ways: either
                 # the drop rate is below a treshold, either we want that no
                 # packet has been lost during the test.
                 # This can be specified by putting 0 in the .test file
                 elif ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped']==self.test['drop_rate_threshold']==0)) and (iteration_data['lat_avg']< self.test['lat_avg_threshold']) and (iteration_data['lat_perc']< self.test['lat_perc_threshold']) and (iteration_data['lat_max'] < self.test['lat_max_threshold']):
                     if (old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))>0.01:
                         iteration_prefix['speed'] = bcolors.WARNING
                         if iteration_data['abs_tx_fail'] > 0:
                             gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(self.get_pps(speed,size), iteration_data['pps_tx'], iteration_data['abs_tx_fail']) + bcolors.ENDC
                         else:
                             gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(self.get_pps(speed,size), iteration_data['pps_tx']) + bcolors.ENDC
                     else:
                         iteration_prefix['speed'] = bcolors.ENDC
                         gen_warning = ''
                     end_data = copy.deepcopy(iteration_data)
                     end_prefix = copy.deepcopy(iteration_prefix)
                     if lat_warning or gen_warning or retry_warning:
                         endwarning = '|        | {:186.186} |'.format(retry_warning + lat_warning + gen_warning)
                     success = True
                     success_message=' SUCCESS'
                     RapidLog.debug(self.report_result(-attempts, size,
                         iteration_data, iteration_prefix) + success_message +
                         retry_warning + lat_warning + gen_warning)
                 else:
                     success_message=' FAILED'
                     if ((iteration_data['abs_dropped']>0) and (self.test['drop_rate_threshold'] ==0)):
                         iteration_prefix['abs_drop_rate'] = bcolors.FAIL
                     if (iteration_data['drop_rate'] < self.test['drop_rate_threshold']):
                         iteration_prefix['drop_rate'] = bcolors.ENDC
                     else:
                         iteration_prefix['drop_rate'] = bcolors.FAIL
                     if (iteration_data['lat_avg']< self.test['lat_avg_threshold']):
                         iteration_prefix['lat_avg'] = bcolors.ENDC
                     else:
                         iteration_prefix['lat_avg'] = bcolors.FAIL
                     if (iteration_data['lat_perc']< self.test['lat_perc_threshold']):
                         iteration_prefix['lat_perc'] = bcolors.ENDC
                     else:
                         iteration_prefix['lat_perc'] = bcolors.FAIL
                     if (iteration_data['lat_max']< self.test['lat_max_threshold']):
                         iteration_prefix['lat_max'] = bcolors.ENDC
                     else:
                         iteration_prefix['lat_max'] = bcolors.FAIL
                     if ((old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))<0.001):
                         iteration_prefix['speed'] = bcolors.ENDC
                     else:
                         iteration_prefix['speed'] = bcolors.FAIL
                     success = False 
                     RapidLog.debug(self.report_result(-attempts, size,
                         iteration_data, iteration_prefix) +
                         success_message + retry_warning + lat_warning)
                 speed = self.new_speed(speed, size, success)
                 if self.test['test'] == 'increment_till_fail':
                     if not success:
                         break
                 elif self.resolution_achieved():
                     break
             if end_data['speed'] is None:
                 end_data = iteration_data
                 end_prefix = iteration_prefix
                 RapidLog.info('|{:>7} | {:<177} |'.format("FAILED","Speed 0 or close to 0, data for last failed step below:"))
             RapidLog.info(self.report_result(flow_number, size,
                 end_data, end_prefix))
             if end_data['avg_bg_rate']:
                 tot_avg_rx_rate = end_data['pps_rx'] + (end_data['avg_bg_rate'] * len(self.background_machines))
                 endtotaltrafficrate = '|        | Total amount of traffic received by all generators during this test: {:>4.3f} Gb/s {:7.3f} Mpps {} |'.format(RapidTest.get_speed(tot_avg_rx_rate,size) , tot_avg_rx_rate, ' '*84)
                 RapidLog.info (endtotaltrafficrate)
             if endwarning:
                 RapidLog.info (endwarning)
             if self.test['test'] != 'fixed_rate':
                 TestResult = TestResult + end_data['pps_rx']
                 end_data['test'] = self.test['testname']
                 end_data['environment_file'] = self.test['environment_file']
                 end_data['Flows'] = flow_number
                 end_data['Size'] = size
                 end_data['RequestedSpeed'] = RapidTest.get_pps(end_data['speed'] ,size)
                 result_details = self.post_data(end_data)
                 RapidLog.debug(result_details)
             RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
                 '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                 '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
                 + '+' + '-' * 11 + '+' + '-' * 11 + '+'  + '-' * 11 +  '+'
                 + '-' * 7 + '+' + '-' * 4 + '+')
     return (TestResult, result_details)
示例#29
0
 def run(self):
     RapidLog.info(
         "+----------------------------------------------------------------------------------------------------------------------------+"
     )
     RapidLog.info(
         "| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic   |"
     )
     RapidLog.info(
         "| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and    |"
     )
     RapidLog.info(
         "| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was       |"
     )
     RapidLog.info(
         "| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout   |"
     )
     RapidLog.info(
         "| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 |"
     )
     RapidLog.info(
         "+----------------------------------------------------------------------------------------------------------------------------+"
     )
     sys.stdout.flush()
     max_loop_duration = 0
     machine_details = {}
     for machine in self.machines:
         buckets = machine.socket.show_irq_buckets(1)
         if max_loop_duration == 0:
             # First time we go through the loop, we need to initialize
             # result_details
             result_details = {
                 'test': self.test['testname'],
                 'environment_file': self.test['environment_file'],
                 'buckets': buckets
             }
         print('Measurement ongoing ... ', end='\r')
         machine.start()  # PROX cores will be started within 0 to 1 seconds
         # That is why we sleep a bit over 1 second to make sure all cores
         # are started
         time.sleep(1.2)
         old_irq = [[0 for x in range(len(buckets))]
                    for y in range(len(machine.get_cores()))]
         irq = [[0 for x in range(len(buckets))]
                for y in range(len(machine.get_cores()))]
         column_names = []
         for bucket in buckets:
             column_names.append('<{}'.format(bucket))
         column_names[-1] = '>{}'.format(buckets[-2])
         for j, bucket in enumerate(buckets):
             for i, irqcore in enumerate(machine.get_cores()):
                 old_irq[i][j] = machine.socket.irq_stats(irqcore, j)
         # Measurements in the loop above, are updated by PROX every second
         # This means that taking the same measurement 0.5 second later
         # might result in the same data or data from the next 1s window
         time.sleep(float(self.test['runtime']))
         row_names = []
         for i, irqcore in enumerate(machine.get_cores()):
             row_names.append(irqcore)
             for j, bucket in enumerate(buckets):
                 diff = machine.socket.irq_stats(irqcore, j) - old_irq[i][j]
                 if diff == 0:
                     irq[i][j] = '0'
                 else:
                     irq[i][j] = str(
                         round(old_div(diff, float(self.test['runtime'])),
                               2))
                     if max_loop_duration < int(bucket):
                         max_loop_duration = int(bucket)
         # Measurements in the loop above, are updated by PROX every second
         # This means that taking the same measurement 0.5 second later
         # might result in the same data or data from the next 1s window
         # Conclusion: we don't know the exact window size.
         # Real measurement windows might be wrong by 1 second
         # This could be fixed in this script by checking this data every
         # 0.5 seconds Not implemented since we can also run this test for
         # a longer time and decrease the error. The absolute number of
         # interrupts is not so important.
         machine.stop()
         core_details = {}
         RapidLog.info('Results for PROX instance %s' % machine.name)
         RapidLog.info(
             '{:>12}'.format('bucket us') +
             ''.join(['{:>12}'.format(item) for item in column_names]))
         for j, row in enumerate(irq):
             RapidLog.info('Core {:>7}'.format(row_names[j]) +
                           ''.join(['{:>12}'.format(item) for item in row]))
             core_details['Core {}'.format(row_names[j])] = row
         machine_details[machine.name] = core_details
     result_details['machine_data'] = machine_details
     result_details = self.post_data(result_details)
     return (500000 - max_loop_duration, result_details)