Ejemplo n.º 1
0
 def start_prox(self, autostart=''):
     if self.machine_params['prox_socket']:
         self._client = prox_ctrl(self.ip, self.key, self.user,
                                  self.password)
         self._client.test_connection()
         if self.vim in ['OpenStack']:
             self.devbind()
         if self.vim in ['kubernetes']:
             self.read_cpuset()
             self.read_cpuset_mems()
             self.remap_all_cpus()
         _, prox_config_file_name = os.path.split(
             self.machine_params['config_file'])
         if self.machine_params['prox_launch_exit']:
             self.generate_lua()
             self._client.scp_put(
                 self.machine_params['config_file'],
                 '{}/{}'.format(self.rundir, prox_config_file_name))
             if not self.configonly:
                 cmd = 'sudo {}/prox {} -t -o cli -f {}/{}'.format(
                     self.rundir, autostart, self.rundir,
                     prox_config_file_name)
                 RapidLog.debug("Starting PROX on {}: {}".format(
                     self.name, cmd))
                 result = self._client.run_cmd(cmd)
                 RapidLog.debug("Finished PROX on {}: {}".format(
                     self.name, cmd))
Ejemplo n.º 2
0
 def start_prox(self, autostart=''):
     if self.machine_params['prox_launch_exit']:
         cmd = 'sudo {}/prox {} -t -o cli -f {}/{}'.format(
             self.rundir, autostart, self.rundir,
             self.machine_params['config_file'])
         result = self._client.fork_cmd(
             cmd, 'PROX Testing on {}'.format(self.name))
         RapidLog.debug("Starting PROX on {}: {}, {}".format(
             self.name, cmd, result))
     self.socket = self._client.connect_socket()
Ejemplo n.º 3
0
 def read_cpuset_mems(self):
     """Read list of NUMA nodes on which we allowed to allocate memory
     """
     cmd = 'cat /sys/fs/cgroup/cpuset/cpuset.mems'
     cpuset_mems = self._client.run_cmd(cmd).decode().rstrip()
     RapidLog.debug('{} ({}): Allowed NUMA nodes: {}'.format(
         self.name, self.ip, cpuset_mems))
     self.numa_nodes = self.expand_list_format(cpuset_mems)
     RapidLog.debug('{} ({}): Expanded allowed NUMA nodes: {}'.format(
         self.name, self.ip, self.numa_nodes))
Ejemplo n.º 4
0
 def devbind(self):
     # Script to bind the right network interface to the poll mode driver
     for index, dp_port in enumerate(self.dp_ports, start = 1):
         DevBindFileName = self.rundir + '/devbind-{}-port{}.sh'.format(self.ip, index)
         self._client.scp_put('./devbind.sh', DevBindFileName)
         cmd =  'sed -i \'s/MACADDRESS/' + dp_port['mac'] + '/\' ' + DevBindFileName 
         result = self._client.run_cmd(cmd)
         RapidLog.debug('devbind.sh MAC updated for port {} on {} {}'.format(index, self.name, result))
         if ((not self.configonly) and self.machine_params['prox_launch_exit']):
             result = self._client.run_cmd(DevBindFileName)
             RapidLog.debug('devbind.sh running for port {} on {} {}'.format(index, self.name, result))
Ejemplo n.º 5
0
 def start_prox(self, configonly=False, autostart=''):
     if self.machine_params['prox_socket']:
         self._client = prox_ctrl(self.ip, self.key, self.user)
         self._client.connect()
         if self.vim in ['OpenStack']:
             self.devbind(configonly)
         self.generate_lua(self.vim)
         self._client.scp_put(self.machine_params['config_file'], '{}/{}'.format(self.rundir, self.machine_params['config_file']))
         if ((not configonly) and self.machine_params['prox_launch_exit']):
             cmd = 'sudo {}/prox {} -t -o cli -f {}/{}'.format(self.rundir, autostart, self.rundir, self.machine_params['config_file'])
             RapidLog.debug("Starting PROX on {}: {}".format(self.name, cmd))
             result = self._client.run_cmd(cmd, 'PROX Testing on {}'.format(self.name))
             #RapidLog.debug("Finished PROX on {}: {}, {}".format(self.name, cmd, result))
             RapidLog.debug("Finished PROX on {}: {}".format(self.name, cmd))
Ejemplo n.º 6
0
 def generate_lua(self, appendix=''):
     self.LuaFileName = 'parameters-{}.lua'.format(self.ip)
     with open(self.LuaFileName, "w") as LuaFile:
         LuaFile.write('require "helper"\n')
         LuaFile.write('name="%s"\n' % self.name)
         for index, dp_port in enumerate(self.dp_ports, start=1):
             LuaFile.write('local_ip{}="{}"\n'.format(index, dp_port['ip']))
             LuaFile.write(
                 'local_hex_ip{}=convertIPToHex(local_ip{})\n'.format(
                     index, index))
         if self.vim in ['kubernetes']:
             socket_mem_str = self.get_prox_socket_mem_str()
             RapidLog.debug('{} ({}): PROX socket mem str: {}'.format(
                 self.name, self.ip, socket_mem_str))
             LuaFile.write(
                 "eal=\"--socket-mem=%s --file-prefix %s --pci-whitelist %s\"\n"
                 % (socket_mem_str, self.name,
                    self.machine_params['dp_pci_dev']))
         else:
             LuaFile.write("eal=\"\"\n")
         if 'mcore' in self.machine_params.keys():
             LuaFile.write('mcore="%s"\n' %
                           ','.join(map(str, self.machine_params['mcore'])))
         if 'cores' in self.machine_params.keys():
             LuaFile.write('cores="%s"\n' %
                           ','.join(map(str, self.machine_params['cores'])))
         if 'ports' in self.machine_params.keys():
             LuaFile.write('ports="%s"\n' %
                           ','.join(map(str, self.machine_params['ports'])))
         if 'dest_ports' in self.machine_params.keys():
             for index, dest_port in enumerate(
                     self.machine_params['dest_ports'], start=1):
                 LuaFile.write('dest_ip{}="{}"\n'.format(
                     index, dest_port['ip']))
                 LuaFile.write(
                     'dest_hex_ip{}=convertIPToHex(dest_ip{})\n'.format(
                         index, index))
                 LuaFile.write('dest_hex_mac{}="{}"\n'.format(
                     index, dest_port['mac'].replace(':', ' ')))
         if 'gw_vm' in self.machine_params.keys():
             for index, gw_ip in enumerate(self.machine_params['gw_ips'],
                                           start=1):
                 LuaFile.write('gw_ip{}="{}"\n'.format(index, gw_ip))
                 LuaFile.write(
                     'gw_hex_ip{}=convertIPToHex(gw_ip{})\n'.format(
                         index, index))
         LuaFile.write(appendix)
     self._client.scp_put(self.LuaFileName, self.rundir + '/parameters.lua')
     self._client.scp_put('helper.lua', self.rundir + '/helper.lua')
Ejemplo n.º 7
0
 def connect_socket(self):
     attempts = 1
     RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
             attempt: %d" % (self._ip, attempts))
     sock = None
     while True:
         sock = self.prox_sock()
         if sock is not None:
             break
         attempts += 1
         if attempts > 20:
             RapidLog.exception("Failed to connect to PROX on %s after %d \
                     attempts" % (self._ip, attempts))
         time.sleep(2)
         RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
                 attempt: %d" % (self._ip, attempts))
     RapidLog.info("Connected to PROX on %s" % self._ip)
     return sock
Ejemplo n.º 8
0
    def remap_all_cpus(self):
        """Convert relative cpu ids for different parameters (gencores, latcores)
        """
        super().remap_all_cpus()

        if self.cpu_mapping is None:
            return

        if 'gencores' in self.machine_params.keys():
            cpus_remapped = super().remap_cpus(self.machine_params['gencores'])
            RapidLog.debug('{} ({}): gencores {} remapped to {}'.format(
                self.name, self.ip, self.machine_params['gencores'],
                cpus_remapped))
            self.machine_params['gencores'] = cpus_remapped

        if 'latcores' in self.machine_params.keys():
            cpus_remapped = super().remap_cpus(self.machine_params['latcores'])
            RapidLog.debug('{} ({}): latcores {} remapped to {}'.format(
                self.name, self.ip, self.machine_params['latcores'],
                cpus_remapped))
            self.machine_params['latcores'] = cpus_remapped
Ejemplo n.º 9
0
 def connect(self):
     attempts = 1
     RapidLog.debug("Trying to connect to machine \
             on %s, attempt: %d" % (self._ip, attempts))
     while True:
         try:
             self.test_connect()
             break
         except RuntimeWarning as ex:
             RapidLog.debug("RuntimeWarning %d:\n%s" %
                            (ex.returncode, ex.output.strip()))
             attempts += 1
             if attempts > 20:
                 RapidLog.exception("Failed to connect to instance after %d\
                         attempts:\n%s" % (attempts, ex))
             time.sleep(2)
             RapidLog.debug("Trying to connect to machine \
                    on %s, attempt: %d" % (self._ip, attempts))
     RapidLog.debug("Connected to machine on %s" % self._ip)
Ejemplo n.º 10
0
    def read_cpuset(self):
        """Read list of cpus on which we allowed to execute
        """
        cmd = 'cat /sys/fs/cgroup/cpuset/cpuset.cpus'
        cpuset_cpus = self._client.run_cmd(cmd).decode().rstrip()
        RapidLog.debug('{} ({}): Allocated cpuset: {}'.format(self.name, self.ip, cpuset_cpus))
        self.cpu_mapping = self.expand_cpuset(cpuset_cpus)
        RapidLog.debug('{} ({}): Expanded cpuset: {}'.format(self.name, self.ip, self.cpu_mapping))

        # Log CPU core mapping for user information
        cpu_mapping_str = ''
        for i in range(len(self.cpu_mapping)):
            cpu_mapping_str = cpu_mapping_str + '[' + str(i) + '->' + str(self.cpu_mapping[i]) + '], '
        cpu_mapping_str = cpu_mapping_str[:-2]
        RapidLog.debug('{} ({}): CPU mapping: {}'.format(self.name, self.ip, cpu_mapping_str))
Ejemplo n.º 11
0
    def remap_all_cpus(self):
        """Convert relative cpu ids for different parameters (mcore, cores)
        """
        if self.cpu_mapping is None:
            RapidLog.debug('{} ({}): cpu mapping is not defined! Please check the configuration!'.format(self.name, self.ip))
            return

        if 'mcore' in self.machine_params.keys():
            cpus_remapped = self.remap_cpus(self.machine_params['mcore'])
            RapidLog.debug('{} ({}): mcore {} remapped to {}'.format(self.name, self.ip, self.machine_params['mcore'], cpus_remapped))
            self.machine_params['mcore'] = cpus_remapped

        if 'cores' in self.machine_params.keys():
            cpus_remapped = self.remap_cpus(self.machine_params['cores'])
            RapidLog.debug('{} ({}): cores {} remapped to {}'.format(self.name, self.ip, self.machine_params['cores'], cpus_remapped))
            self.machine_params['cores'] = cpus_remapped
Ejemplo n.º 12
0
 def connect(self):
     attempts = 1
     RapidLog.debug("Trying to connect to instance which was just launched \
             on %s, attempt: %d" % (self._ip, attempts))
     while True:
         try:
             self.test_connect()
             break
         except RuntimeWarning as ex:
             attempts += 1
             if attempts > 20:
                 RapidLog.exception("Failed to connect to instance after %d\
                         attempts:\n%s" % (attempts, ex))
                 raise Exception("Failed to connect to instance after %d \
                         attempts:\n%s" % (attempts, ex))
             time.sleep(2)
             RapidLog.debug("Trying to connect to instance which was just \
                     launched on %s, attempt: %d" % (self._ip, attempts))
     RapidLog.debug("Connected to instance on %s" % self._ip)
Ejemplo n.º 13
0
 def run(self):
     result_details = {'Details': 'Nothing'}
     TestResult = 0
     end_data = {}
     iteration_prefix = {}
     self.warm_up()
     for imix in self.test['imixs']:
         size = mean(imix)
         self.gen_machine.set_udp_packet_size(imix)
         if self.background_machines:
             backgroundinfo = ('{}Running {} x background traffic not '
                 'represented in the table{}').format(bcolors.FLASH,
                         len(self.background_machines),bcolors.ENDC)
         else:
             backgroundinfo = '{}{}'.format(bcolors.FLASH,bcolors.ENDC)
         self.set_background_size(self.background_machines, imix)
         RapidLog.info('+' + '-' * 188 + '+')
         RapidLog.info(("| UDP, {:>5} bytes, different number of flows by "
             "randomizing SRC & DST UDP port. {:116.116}|").
             format(round(size), backgroundinfo))
         RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
                 '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                 '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
                 + '+' + '-' * 11 + '+' + '-' * 11 + '+'  + '-' * 11 +  '+'
                 + '-' * 7 + '+' + '-' * 4 + '+')
         RapidLog.info(('| Flows  | Speed requested  | Gen by core | Sent by'
             ' NIC | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f}'
             ' Pcentil| Max. Lat.|   Sent    |  Received |    Lost   | Total'
             ' Lost|L.Ratio|Time|').format(self.test['lat_percentile']*100))
         RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
                 '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                 '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
                 + '+' + '-' * 11 + '+' + '-' * 11 + '+'  + '-' * 11 +  '+'
                 + '-' * 7 + '+' + '-' * 4 + '+')
         for flow_number in self.test['flows']:
             attempts = 0
             self.gen_machine.reset_stats()
             if self.sut_machine:
                 self.sut_machine.reset_stats()
             flow_number = self.gen_machine.set_flows(flow_number)
             self.set_background_flows(self.background_machines, flow_number)
             end_data['speed'] = None
             speed = self.get_start_speed_and_init(size)
             while True:
                 attempts += 1
                 endwarning = False
                 print('{} flows: Measurement ongoing at speed: {}%'.format(
                     str(flow_number), str(round(speed, 2))), end='     \r')
                 sys.stdout.flush()
                 iteration_data = self.run_iteration(
                         float(self.test['runtime']),flow_number,size,speed)
                 if iteration_data['r'] > 1:
                     retry_warning = '{} {:1} retries needed{}'.format(
                             bcolors.WARNING, iteration_data['r'],
                             bcolors.ENDC)
                 else:
                     retry_warning = ''
                 # Drop rate is expressed in percentage. lat_used is a ratio
                 # (0 to 1). The sum of these 2 should be 100%.
                 # If the sum is lower than 95, it means that more than 5%
                 # of the latency measurements where dropped for accuracy
                 # reasons.
                 if (iteration_data['drop_rate'] +
                         iteration_data['lat_used'] * 100) < 95:
                     lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
                         '{}').format(bcolors.WARNING,
                                 iteration_data['lat_used'] * 100,
                                 bcolors.ENDC)
                 else:
                     lat_warning = ''
                 iteration_prefix = {'speed' : bcolors.ENDC,
                         'lat_avg' : bcolors.ENDC,
                         'lat_perc' : bcolors.ENDC,
                         'lat_max' : bcolors.ENDC,
                         'abs_drop_rate' : bcolors.ENDC,
                         'drop_rate' : bcolors.ENDC}
                 if self.test['test'] == 'fixed_rate':
                     end_data = copy.deepcopy(iteration_data)
                     end_prefix = copy.deepcopy(iteration_prefix)
                     if lat_warning or retry_warning:
                         endwarning = '|        | {:177.177} |'.format(
                                 retry_warning + lat_warning)
                     success = True
                     # TestResult = TestResult + iteration_data['pps_rx']
                     # fixed rate testing result is strange: we just report
                     # the pps received
                 # The following if statement is testing if we pass the
                 # success criteria of a certain drop rate, average latency
                 # and maximum latency below the threshold.
                 # The drop rate success can be achieved in 2 ways: either
                 # the drop rate is below a treshold, either we want that no
                 # packet has been lost during the test.
                 # This can be specified by putting 0 in the .test file
                 elif ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped']==self.test['drop_rate_threshold']==0)) and (iteration_data['lat_avg']< self.test['lat_avg_threshold']) and (iteration_data['lat_perc']< self.test['lat_perc_threshold']) and (iteration_data['lat_max'] < self.test['lat_max_threshold']):
                     if (old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))>0.01:
                         iteration_prefix['speed'] = bcolors.WARNING
                         if iteration_data['abs_tx_fail'] > 0:
                             gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(self.get_pps(speed,size), iteration_data['pps_tx'], iteration_data['abs_tx_fail']) + bcolors.ENDC
                         else:
                             gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(self.get_pps(speed,size), iteration_data['pps_tx']) + bcolors.ENDC
                     else:
                         iteration_prefix['speed'] = bcolors.ENDC
                         gen_warning = ''
                     end_data = copy.deepcopy(iteration_data)
                     end_prefix = copy.deepcopy(iteration_prefix)
                     if lat_warning or gen_warning or retry_warning:
                         endwarning = '|        | {:186.186} |'.format(retry_warning + lat_warning + gen_warning)
                     success = True
                     success_message=' SUCCESS'
                     RapidLog.debug(self.report_result(-attempts, size,
                         iteration_data, iteration_prefix) + success_message +
                         retry_warning + lat_warning + gen_warning)
                 else:
                     success_message=' FAILED'
                     if ((iteration_data['abs_dropped']>0) and (self.test['drop_rate_threshold'] ==0)):
                         iteration_prefix['abs_drop_rate'] = bcolors.FAIL
                     if (iteration_data['drop_rate'] < self.test['drop_rate_threshold']):
                         iteration_prefix['drop_rate'] = bcolors.ENDC
                     else:
                         iteration_prefix['drop_rate'] = bcolors.FAIL
                     if (iteration_data['lat_avg']< self.test['lat_avg_threshold']):
                         iteration_prefix['lat_avg'] = bcolors.ENDC
                     else:
                         iteration_prefix['lat_avg'] = bcolors.FAIL
                     if (iteration_data['lat_perc']< self.test['lat_perc_threshold']):
                         iteration_prefix['lat_perc'] = bcolors.ENDC
                     else:
                         iteration_prefix['lat_perc'] = bcolors.FAIL
                     if (iteration_data['lat_max']< self.test['lat_max_threshold']):
                         iteration_prefix['lat_max'] = bcolors.ENDC
                     else:
                         iteration_prefix['lat_max'] = bcolors.FAIL
                     if ((old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))<0.001):
                         iteration_prefix['speed'] = bcolors.ENDC
                     else:
                         iteration_prefix['speed'] = bcolors.FAIL
                     success = False 
                     RapidLog.debug(self.report_result(-attempts, size,
                         iteration_data, iteration_prefix) +
                         success_message + retry_warning + lat_warning)
                 speed = self.new_speed(speed, size, success)
                 if self.test['test'] == 'increment_till_fail':
                     if not success:
                         break
                 elif self.resolution_achieved():
                     break
             if end_data['speed'] is None:
                 end_data = iteration_data
                 end_prefix = iteration_prefix
                 RapidLog.info('|{:>7} | {:<177} |'.format("FAILED","Speed 0 or close to 0, data for last failed step below:"))
             RapidLog.info(self.report_result(flow_number, size,
                 end_data, end_prefix))
             if end_data['avg_bg_rate']:
                 tot_avg_rx_rate = end_data['pps_rx'] + (end_data['avg_bg_rate'] * len(self.background_machines))
                 endtotaltrafficrate = '|        | Total amount of traffic received by all generators during this test: {:>4.3f} Gb/s {:7.3f} Mpps {} |'.format(RapidTest.get_speed(tot_avg_rx_rate,size) , tot_avg_rx_rate, ' '*84)
                 RapidLog.info (endtotaltrafficrate)
             if endwarning:
                 RapidLog.info (endwarning)
             if self.test['test'] != 'fixed_rate':
                 TestResult = TestResult + end_data['pps_rx']
                 end_data['test'] = self.test['testname']
                 end_data['environment_file'] = self.test['environment_file']
                 end_data['Flows'] = flow_number
                 end_data['Size'] = size
                 end_data['RequestedSpeed'] = RapidTest.get_pps(end_data['speed'] ,size)
                 result_details = self.post_data(end_data)
                 RapidLog.debug(result_details)
             RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
                 '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                 '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
                 + '+' + '-' * 11 + '+' + '-' * 11 + '+'  + '-' * 11 +  '+'
                 + '-' * 7 + '+' + '-' * 4 + '+')
     return (TestResult, result_details)
Ejemplo n.º 14
0
 def run(self):
     result_details = {'Details': 'Nothing'}
     imix = self.test['imix']
     size = mean(imix)
     flow_number = self.test['flowsize']
     attempts = self.test['steps']
     self.gen_machine.set_udp_packet_size(imix)
     flow_number = self.gen_machine.set_flows(flow_number)
     self.gen_machine.start_latency_cores()
     RapidLog.info('+' + '-' * 188 + '+')
     RapidLog.info(
         ("| Generator is sending UDP ({:>5} flow) packets ({:>5}"
          " bytes) to SUT via GW dropping and delaying packets. SUT sends "
          "packets back.{:>60}").format(flow_number, round(size), '|'))
     RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 + '+' +
                   '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                   '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' +
                   '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+' +
                   '-' * 11 + '+' + '-' * 7 + '+' + '-' * 4 + '+')
     RapidLog.info((
         '| Test   | Speed requested  | Gen by core | Sent by NIC'
         ' | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f} Pcentil'
         '| Max. Lat.|   Sent    |  Received |    Lost   | Total Lost|'
         'L.Ratio|Time|').format(self.test['lat_percentile'] * 100))
     RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 + '+' +
                   '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                   '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' +
                   '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+' +
                   '-' * 11 + '+' + '-' * 7 + '+' + '-' * 4 + '+')
     speed = self.test['startspeed']
     self.gen_machine.set_generator_speed(speed)
     while attempts:
         attempts -= 1
         print('Measurement ongoing at speed: ' + str(round(speed, 2)) +
               '%      ',
               end='\r')
         sys.stdout.flush()
         time.sleep(1)
         # Get statistics now that the generation is stable and NO ARP messages any more
         iteration_data = self.run_iteration(float(self.test['runtime']),
                                             flow_number, size, speed)
         iteration_data['speed'] = speed
         # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
         # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
         if (iteration_data['drop_rate'] +
                 iteration_data['lat_used'] * 100) < 95:
             lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
                            '{}').format(bcolors.WARNING,
                                         iteration_data['lat_used'] * 100,
                                         bcolors.ENDC)
         else:
             lat_warning = ''
         iteration_prefix = {
             'speed': '',
             'lat_avg': '',
             'lat_perc': '',
             'lat_max': '',
             'abs_drop_rate': '',
             'drop_rate': ''
         }
         RapidLog.info(
             self.report_result(attempts, size, iteration_data,
                                iteration_prefix))
         iteration_data['test'] = self.test['testname']
         iteration_data['environment_file'] = self.test['environment_file']
         iteration_data['Flows'] = flow_number
         iteration_data['Size'] = size
         iteration_data['RequestedSpeed'] = RapidTest.get_pps(
             iteration_data['speed'], size)
         result_details = self.post_data(iteration_data)
         RapidLog.debug(result_details)
     RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 + '+' +
                   '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
                   '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' +
                   '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+' +
                   '-' * 11 + '+' + '-' * 7 + '+' + '-' * 4 + '+')
     self.gen_machine.stop_latency_cores()
     return (True, result_details)
Ejemplo n.º 15
0
 def run(self):
     result_details = {'Details': 'Nothing'}
     self.gen_machine.start_latency_cores()
     TestPassed = True
     for imix in self.test['imixs']:
         size = mean(imix)
         self.gen_machine.set_udp_packet_size(imix)
         if self.background_machines:
             backgroundinfo = '{}Running {} x background traffic not represented in the table{}'.format(
                 bcolors.FLASH, len(self.background_machines), bcolors.ENDC)
         else:
             backgroundinfo = '{}{}'.format(bcolors.FLASH, bcolors.ENDC)
         self.set_background_size(self.background_machines, imix)
         RapidLog.info(
             "+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+"
         )
         RapidLog.info(
             '| UDP, {:>5} bytes, different number of flows by randomizing SRC & DST UDP port. {:116.116}|'
             .format(size, backgroundinfo))
         RapidLog.info(
             "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
         )
         RapidLog.info(
             '| Flows  | Speed requested  | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f} Pcentil| Max. Lat.|   Sent    |  Received |    Lost   | Total Lost|L.Ratio|Time|'
             .format(self.test['lat_percentile'] * 100))
         RapidLog.info(
             "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
         )
         for flow_number in self.test['flows']:
             attempts = 0
             self.gen_machine.reset_stats()
             if self.sut_machine:
                 self.sut_machine.reset_stats()
             flow_number = self.gen_machine.set_flows(flow_number)
             self.set_background_flows(self.background_machines,
                                       flow_number)
             endspeed = None
             speed = self.get_start_speed_and_init(size)
             self.record_start_time()
             while True:
                 attempts += 1
                 endwarning = False
                 print(str(flow_number) +
                       ' flows: Measurement ongoing at speed: ' +
                       str(round(speed, 2)) + '%      ',
                       end='\r')
                 sys.stdout.flush()
                 # Start generating packets at requested speed (in % of a 10Gb/s link)
                 self.gen_machine.set_generator_speed(speed)
                 self.set_background_speed(self.background_machines, speed)
                 self.start_background_traffic(self.background_machines)
                 # Get statistics now that the generation is stable and initial ARP messages are dealt with
                 pps_req_tx, pps_tx, pps_sut_tx, pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, abs_tx, abs_rx, abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration, avg_bg_rate, bucket_size, buckets = self.run_iteration(
                     float(self.test['runtime']), flow_number, size, speed)
                 self.stop_background_traffic(self.background_machines)
                 if r > 1:
                     retry_warning = bcolors.WARNING + ' {:1} retries needed'.format(
                         r) + bcolors.ENDC
                 else:
                     retry_warning = ''
                 # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
                 # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
                 if (drop_rate + lat_used * 100) < 95:
                     lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(
                         lat_used * 100) + bcolors.ENDC
                 else:
                     lat_warning = ''
                 if self.test['test'] == 'fixed_rate':
                     endspeed = speed
                     endpps_req_tx = None
                     endpps_tx = None
                     endpps_sut_tx = None
                     endpps_rx = None
                     endlat_avg = lat_avg
                     endlat_perc = lat_perc
                     endlat_perc_max = lat_perc_max
                     endlat_max = lat_max
                     endbuckets = buckets
                     endabs_dropped = abs_dropped
                     enddrop_rate = drop_rate
                     endabs_tx = abs_tx
                     endabs_rx = abs_rx
                     endavg_bg_rate = avg_bg_rate
                     if lat_warning or retry_warning:
                         endwarning = '|        | {:177.177} |'.format(
                             retry_warning + lat_warning)
                     success = True
                     TestPassed = False  # fixed rate testing cannot be True, it is just reporting numbers every second
                     speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                 # The following if statement is testing if we pass the success criteria of a certain drop rate, average latency and maximum latency below the threshold
                 # The drop rate success can be achieved in 2 ways: either the drop rate is below a treshold, either we want that no packet has been lost during the test
                 # This can be specified by putting 0 in the .test file
                 elif (
                     (drop_rate < self.test['drop_rate_threshold']) or
                     (abs_dropped == self.test['drop_rate_threshold'] == 0)
                 ) and (lat_avg < self.test['lat_avg_threshold']) and (
                         lat_perc < self.test['lat_perc_threshold']) and (
                             lat_max < self.test['lat_max_threshold']):
                     if (old_div((self.get_pps(speed, size) - pps_tx),
                                 self.get_pps(speed, size))) > 0.01:
                         speed_prefix = bcolors.WARNING
                         if abs_tx_fail > 0:
                             gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(
                                 self.get_pps(speed, size), pps_tx,
                                 abs_tx_fail) + bcolors.ENDC
                         else:
                             gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(
                                 self.get_pps(speed, size),
                                 pps_tx) + bcolors.ENDC
                     else:
                         speed_prefix = bcolors.ENDC
                         gen_warning = ''
                     endspeed = speed
                     endspeed_prefix = speed_prefix
                     endpps_req_tx = pps_req_tx
                     endpps_tx = pps_tx
                     endpps_sut_tx = pps_sut_tx
                     endpps_rx = pps_rx
                     endlat_avg = lat_avg
                     endlat_perc = lat_perc
                     endlat_perc_max = lat_perc_max
                     endlat_max = lat_max
                     endbuckets = buckets
                     endabs_dropped = None
                     enddrop_rate = drop_rate
                     endabs_tx = abs_tx
                     endabs_rx = abs_rx
                     endavg_bg_rate = avg_bg_rate
                     if lat_warning or gen_warning or retry_warning:
                         endwarning = '|        | {:186.186} |'.format(
                             retry_warning + lat_warning + gen_warning)
                     success = True
                     success_message = ' SUCCESS'
                     speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                     RapidLog.debug(
                         self.report_result(
                             -attempts, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg, lat_perc,
                             lat_perc_max, lat_max, abs_tx, abs_rx,
                             abs_dropped, actual_duration, speed_prefix,
                             lat_avg_prefix, lat_max_prefix,
                             abs_drop_rate_prefix, drop_rate_prefix) +
                         success_message + retry_warning + lat_warning +
                         gen_warning)
                 else:
                     success_message = ' FAILED'
                     abs_drop_rate_prefix = bcolors.ENDC
                     if ((abs_dropped > 0)
                             and (self.test['drop_rate_threshold'] == 0)):
                         abs_drop_rate_prefix = bcolors.FAIL
                     if (drop_rate < self.test['drop_rate_threshold']):
                         drop_rate_prefix = bcolors.ENDC
                     else:
                         drop_rate_prefix = bcolors.FAIL
                     if (lat_avg < self.test['lat_avg_threshold']):
                         lat_avg_prefix = bcolors.ENDC
                     else:
                         lat_avg_prefix = bcolors.FAIL
                     if (lat_perc < self.test['lat_perc_threshold']):
                         lat_perc_prefix = bcolors.ENDC
                     else:
                         lat_perc_prefix = bcolors.FAIL
                     if (lat_max < self.test['lat_max_threshold']):
                         lat_max_prefix = bcolors.ENDC
                     else:
                         lat_max_prefix = bcolors.FAIL
                     if ((old_div((self.get_pps(speed, size) - pps_tx),
                                  self.get_pps(speed, size))) < 0.001):
                         speed_prefix = bcolors.ENDC
                     else:
                         speed_prefix = bcolors.FAIL
                     success = False
                     RapidLog.debug(
                         self.report_result(
                             -attempts, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg, lat_perc,
                             lat_perc_max, lat_max, abs_tx, abs_rx,
                             abs_dropped, actual_duration, speed_prefix,
                             lat_avg_prefix, lat_perc_prefix,
                             lat_max_prefix, abs_drop_rate_prefix,
                             drop_rate_prefix) + success_message +
                         retry_warning + lat_warning)
                 speed = self.new_speed(speed, size, success)
                 if self.test['test'] == 'increment_till_fail':
                     if not success:
                         break
                 elif self.resolution_achieved():
                     break
             self.record_stop_time()
             if endspeed is not None:
                 if TestPassed and (endpps_rx <
                                    self.test['pass_threshold']):
                     TestPassed = False
                 speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                 RapidLog.info(
                     self.report_result(
                         flow_number, size, endspeed, endpps_req_tx,
                         endpps_tx, endpps_sut_tx, endpps_rx, endlat_avg,
                         endlat_perc, endlat_perc_max, endlat_max,
                         endabs_tx, endabs_rx, endabs_dropped,
                         actual_duration, speed_prefix, lat_avg_prefix,
                         lat_perc_prefix, lat_max_prefix,
                         abs_drop_rate_prefix, drop_rate_prefix))
                 if endavg_bg_rate:
                     tot_avg_rx_rate = endpps_rx + (
                         endavg_bg_rate * len(self.background_machines))
                     endtotaltrafficrate = '|        | Total amount of traffic received by all generators during this test: {:>4.3f} Gb/s {:7.3f} Mpps {} |'.format(
                         RapidTest.get_speed(tot_avg_rx_rate, size),
                         tot_avg_rx_rate, ' ' * 84)
                     RapidLog.info(endtotaltrafficrate)
                 if endwarning:
                     RapidLog.info(endwarning)
                 RapidLog.info(
                     "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
                 )
                 if self.test['test'] != 'fixed_rate':
                     result_details = {
                         'test': self.test['testname'],
                         'environment_file': self.test['environment_file'],
                         'start_date': self.start,
                         'stop_date': self.stop,
                         'Flows': flow_number,
                         'Size': size,
                         'RequestedSpeed':
                         RapidTest.get_pps(endspeed, size),
                         'CoreGenerated': endpps_req_tx,
                         'SentByNIC': endpps_tx,
                         'FwdBySUT': endpps_sut_tx,
                         'RevByCore': endpps_rx,
                         'AvgLatency': endlat_avg,
                         'PCTLatency': endlat_perc,
                         'MaxLatency': endlat_max,
                         'PacketsSent': endabs_tx,
                         'PacketsReceived': endabs_rx,
                         'PacketsLost': endabs_dropped,
                         'bucket_size': bucket_size,
                         'buckets': endbuckets
                     }
                     self.post_data('rapid_flowsizetest', result_details)
             else:
                 RapidLog.info('|{:>7}'.format(str(flow_number)) +
                               " | Speed 0 or close to 0")
     self.gen_machine.stop_latency_cores()
     return (TestPassed, result_details)
Ejemplo n.º 16
0
    def run_iteration(self, requested_duration, flow_number, size, speed):
        BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
        LAT_PERCENTILE = self.test['lat_percentile']
        r = 0
        sleep_time = 2
        while (r < self.test['maxr']):
            time.sleep(sleep_time)
            # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
            t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats(
            )
            t1_dp_rx = t1_rx - t1_non_dp_rx
            t1_dp_tx = t1_tx - t1_non_dp_tx
            self.gen_machine.set_generator_speed(0)
            self.gen_machine.start_gen_cores()
            if self.background_machines:
                self.set_background_speed(self.background_machines, 0)
                self.start_background_traffic(self.background_machines)
            if 'ramp_step' in self.test.keys():
                ramp_speed = self.test['ramp_step']
            else:
                ramp_speed = speed
            while ramp_speed < speed:
                self.gen_machine.set_generator_speed(ramp_speed)
                if self.background_machines:
                    self.set_background_speed(self.background_machines,
                                              ramp_speed)
                time.sleep(2)
                ramp_speed = ramp_speed + self.test['ramp_step']
            self.gen_machine.set_generator_speed(speed)
            if self.background_machines:
                self.set_background_speed(self.background_machines, speed)
            time.sleep(
                2
            )  ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
            start_bg_gen_stats = []
            for bg_gen_machine in self.background_machines:
                bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats(
                )
                bg_gen_stat = {
                    "bg_dp_rx": bg_rx - bg_non_dp_rx,
                    "bg_dp_tx": bg_tx - bg_non_dp_tx,
                    "bg_tsc": bg_tsc
                }
                start_bg_gen_stats.append(dict(bg_gen_stat))
            if self.sut_machine != None:
                t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                )
            t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats(
            )
            tx = t2_tx - t1_tx
            dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx)
            dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
            tot_dp_drop = dp_tx - dp_rx
            if tx == 0:
                RapidLog.critical(
                    "TX = 0. Test interrupted since no packet has been sent.")
            if dp_tx == 0:
                RapidLog.critical(
                    "Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent."
                )
            # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
            # Measure latency statistics per second
            lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
            )
            lat_samples = sum(buckets)
            sample_count = 0
            for sample_percentile, bucket in enumerate(buckets, start=1):
                sample_count += bucket
                if sample_count > (lat_samples * LAT_PERCENTILE):
                    break
            percentile_max = (sample_percentile == len(buckets))
            sample_percentile = sample_percentile * float(
                2**BUCKET_SIZE_EXP) / (old_div(float(lat_hz), float(10**6)))
            if self.test['test'] == 'fixed_rate':
                RapidLog.info(
                    self.report_result(flow_number, size, speed, None, None,
                                       None, None, lat_avg, sample_percentile,
                                       percentile_max, lat_max, dp_tx, dp_rx,
                                       None, None))
            tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
            lat_avg = used_avg = 0
            buckets_total = buckets
            tot_lat_samples = sum(buckets)
            tot_lat_measurement_duration = float(0)
            tot_core_measurement_duration = float(0)
            tot_sut_core_measurement_duration = float(0)
            tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
            lat_avail = core_avail = sut_avail = False
            while (tot_core_measurement_duration - float(requested_duration) <=
                   0.1) or (tot_lat_measurement_duration -
                            float(requested_duration) <= 0.1):
                time.sleep(0.5)
                lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
                )
                # Get statistics after some execution time
                if t3_lat_tsc != t2_lat_tsc:
                    single_lat_measurement_duration = (
                        t3_lat_tsc - t2_lat_tsc
                    ) * 1.0 / lat_hz  # time difference between the 2 measurements, expressed in seconds.
                    # A second has passed in between to lat_stats requests. Hence we need to process the results
                    tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
                    if lat_min > lat_min_sample:
                        lat_min = lat_min_sample
                    if lat_max < lat_max_sample:
                        lat_max = lat_max_sample
                    lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration  # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
                    used_avg = used_avg + used_sample * single_lat_measurement_duration  # and give it more weigth.
                    lat_samples = sum(buckets)
                    tot_lat_samples += lat_samples
                    sample_count = 0
                    for sample_percentile, bucket in enumerate(buckets,
                                                               start=1):
                        sample_count += bucket
                        if sample_count > lat_samples * LAT_PERCENTILE:
                            break
                    percentile_max = (sample_percentile == len(buckets))
                    bucket_size = float(2**BUCKET_SIZE_EXP) / (old_div(
                        float(lat_hz), float(10**6)))
                    sample_percentile = sample_percentile * bucket_size
                    buckets_total = [
                        buckets_total[i] + buckets[i]
                        for i in range(len(buckets_total))
                    ]
                    t2_lat_tsc = t3_lat_tsc
                    lat_avail = True
                t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats(
                )
                if t3_tsc != t2_tsc:
                    single_core_measurement_duration = (
                        t3_tsc - t2_tsc
                    ) * 1.0 / tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                    tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
                    delta_rx = t3_rx - t2_rx
                    tot_rx += delta_rx
                    delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
                    tot_non_dp_rx += delta_non_dp_rx
                    delta_tx = t3_tx - t2_tx
                    tot_tx += delta_tx
                    delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
                    tot_non_dp_tx += delta_non_dp_tx
                    delta_dp_tx = delta_tx - delta_non_dp_tx
                    delta_dp_rx = delta_rx - delta_non_dp_rx
                    delta_dp_drop = delta_dp_tx - delta_dp_rx
                    tot_dp_drop += delta_dp_drop
                    delta_drop = t3_drop - t2_drop
                    tot_drop += delta_drop
                    t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
                    core_avail = True
                if self.sut_machine != None:
                    t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                    )
                    if t3_sut_tsc != t2_sut_tsc:
                        single_sut_core_measurement_duration = (
                            t3_sut_tsc - t2_sut_tsc
                        ) * 1.0 / tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                        tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
                        tot_sut_rx += t3_sut_rx - t2_sut_rx
                        tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
                        delta_sut_tx = t3_sut_tx - t2_sut_tx
                        tot_sut_tx += delta_sut_tx
                        delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
                        tot_sut_non_dp_tx += delta_sut_non_dp_tx
                        t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
                        sut_avail = True
                if self.test['test'] == 'fixed_rate':
                    if lat_avail == core_avail == True:
                        lat_avail = core_avail = False
                        pps_req_tx = (
                            delta_tx + delta_drop - delta_rx
                        ) / single_core_measurement_duration / 1000000
                        pps_tx = delta_tx / single_core_measurement_duration / 1000000
                        if self.sut_machine != None and sut_avail:
                            pps_sut_tx = delta_sut_tx / single_sut_core_measurement_duration / 1000000
                            sut_avail = False
                        else:
                            pps_sut_tx = None
                        pps_rx = delta_rx / single_core_measurement_duration / 1000000
                        RapidLog.info(
                            self.report_result(
                                flow_number, size, speed, pps_req_tx, pps_tx,
                                pps_sut_tx, pps_rx, lat_avg_sample,
                                sample_percentile, percentile_max,
                                lat_max_sample, delta_dp_tx, delta_dp_rx,
                                tot_dp_drop, single_core_measurement_duration))
                        variables = {
                            'Flows': flow_number,
                            'Size': size,
                            'RequestedSpeed': self.get_pps(speed, size),
                            'CoreGenerated': pps_req_tx,
                            'SentByNIC': pps_tx,
                            'FwdBySUT': pps_sut_tx,
                            'RevByCore': pps_rx,
                            'AvgLatency': lat_avg_sample,
                            'PCTLatency': sample_percentile,
                            'MaxLatency': lat_max_sample,
                            'PacketsSent': delta_dp_tx,
                            'PacketsReceived': delta_dp_rx,
                            'PacketsLost': tot_dp_drop,
                            'bucket_size': bucket_size,
                            'buckets': buckets
                        }

                        self.post_data('rapid_flowsizetest', variables)
            end_bg_gen_stats = []
            for bg_gen_machine in self.background_machines:
                bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats(
                )
                bg_gen_stat = {
                    "bg_dp_rx": bg_rx - bg_non_dp_rx,
                    "bg_dp_tx": bg_tx - bg_non_dp_tx,
                    "bg_tsc": bg_tsc,
                    "bg_hz": bg_hz
                }
                end_bg_gen_stats.append(dict(bg_gen_stat))
            i = 0
            bg_rates = []
            while i < len(end_bg_gen_stats):
                bg_rates.append(0.000001 *
                                (end_bg_gen_stats[i]['bg_dp_rx'] -
                                 start_bg_gen_stats[i]['bg_dp_rx']) /
                                ((end_bg_gen_stats[i]['bg_tsc'] -
                                  start_bg_gen_stats[i]['bg_tsc']) * 1.0 /
                                 end_bg_gen_stats[i]['bg_hz']))
                i += 1
            if len(bg_rates):
                avg_bg_rate = sum(bg_rates) / len(bg_rates)
                RapidLog.debug(
                    'Average Background traffic rate: {:>7.3f} Mpps'.format(
                        avg_bg_rate))
            else:
                avg_bg_rate = None
            #Stop generating
            self.gen_machine.stop_gen_cores()
            r += 1
            lat_avg = old_div(lat_avg, float(tot_lat_measurement_duration))
            used_avg = old_div(used_avg, float(tot_lat_measurement_duration))
            t4_tsc = t2_tsc
            while t4_tsc == t2_tsc:
                t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats(
                )
            if self.test['test'] == 'fixed_rate':
                t4_lat_tsc = t2_lat_tsc
                while t4_lat_tsc == t2_lat_tsc:
                    lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats(
                    )
                sample_count = 0
                lat_samples = sum(buckets)
                for percentile, bucket in enumerate(buckets, start=1):
                    sample_count += bucket
                    if sample_count > lat_samples * LAT_PERCENTILE:
                        break
                percentile_max = (percentile == len(buckets))
                percentile = percentile * bucket_size
                lat_max = lat_max_sample
                lat_avg = lat_avg_sample
                delta_rx = t4_rx - t2_rx
                delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
                delta_tx = t4_tx - t2_tx
                delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
                delta_dp_tx = delta_tx - delta_non_dp_tx
                delta_dp_rx = delta_rx - delta_non_dp_rx
                dp_tx = delta_dp_tx
                dp_rx = delta_dp_rx
                tot_dp_drop += delta_dp_tx - delta_dp_rx
                pps_req_tx = None
                pps_tx = None
                pps_sut_tx = None
                pps_rx = None
                drop_rate = 100.0 * (dp_tx - dp_rx) / dp_tx
                tot_core_measurement_duration = None
                break  ## Not really needed since the while loop will stop when evaluating the value of r
            else:
                sample_count = 0
                buckets = buckets_total
                for percentile, bucket in enumerate(buckets_total, start=1):
                    sample_count += bucket
                    if sample_count > tot_lat_samples * LAT_PERCENTILE:
                        break
                percentile_max = (percentile == len(buckets_total))
                percentile = percentile * bucket_size
                pps_req_tx = (
                    tot_tx + tot_drop - tot_rx
                ) / tot_core_measurement_duration / 1000000.0  # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
                pps_tx = tot_tx / tot_core_measurement_duration / 1000000.0  # tot_tx is all generated packets actually accepted by the interface
                pps_rx = tot_rx / tot_core_measurement_duration / 1000000.0  # tot_rx is all packets received by the nop task = all packets received in the gen VM
                if self.sut_machine != None and sut_avail:
                    pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
                else:
                    pps_sut_tx = None
                dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
                dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
                tot_dp_drop = dp_tx - dp_rx
                drop_rate = 100.0 * tot_dp_drop / dp_tx
                if ((drop_rate < self.test['drop_rate_threshold']) or
                    (tot_dp_drop == self.test['drop_rate_threshold'] == 0)
                        or (tot_dp_drop > self.test['maxz'])):
                    break
        return (pps_req_tx, pps_tx, pps_sut_tx, pps_rx, lat_avg, percentile,
                percentile_max, lat_max, dp_tx, dp_rx, tot_dp_drop,
                (t4_tx_fail - t1_tx_fail), drop_rate, lat_min, used_avg, r,
                tot_core_measurement_duration, avg_bg_rate, bucket_size,
                buckets)
Ejemplo n.º 17
0
 def run_tests(self, test_params):
     test_params = RapidConfigParser.parse_config(test_params)
     RapidLog.debug(test_params)
     monitor_gen = monitor_sut = False
     background_machines = []
     sut_machine = gen_machine = None
     configonly = test_params['configonly']
     for machine_params in test_params['machines']:
         if 'gencores' in machine_params.keys():
             machine = RapidGeneratorMachine(
                 test_params['key'], test_params['user'],
                 test_params['password'], test_params['vim_type'],
                 test_params['rundir'], test_params['resultsdir'],
                 machine_params, configonly, test_params['ipv6'])
             if machine_params['monitor']:
                 if monitor_gen:
                     RapidLog.exception("Can only monitor 1 generator")
                     raise Exception("Can only monitor 1 generator")
                 else:
                     monitor_gen = True
                     gen_machine = machine
             else:
                 background_machines.append(machine)
         else:
             machine = RapidMachine(test_params['key'], test_params['user'],
                                    test_params['password'],
                                    test_params['vim_type'],
                                    test_params['rundir'],
                                    test_params['resultsdir'],
                                    machine_params, configonly)
             if machine_params['monitor']:
                 if monitor_sut:
                     RapidLog.exception("Can only monitor 1 sut")
                     raise Exception("Can only monitor 1 sut")
                 else:
                     monitor_sut = True
                     if machine_params['prox_socket']:
                         sut_machine = machine
         self.machines.append(machine)
     try:
         prox_executor = concurrent.futures.ThreadPoolExecutor(
             max_workers=len(self.machines))
         self.future_to_prox = {
             prox_executor.submit(machine.start_prox): machine
             for machine in self.machines
         }
         if configonly:
             concurrent.futures.wait(self.future_to_prox,
                                     return_when=ALL_COMPLETED)
             sys.exit()
         socket_executor = concurrent.futures.ThreadPoolExecutor(
             max_workers=len(self.machines))
         future_to_connect_prox = {
             socket_executor.submit(machine.connect_prox): machine
             for machine in self.machines
         }
         concurrent.futures.wait(future_to_connect_prox,
                                 return_when=ALL_COMPLETED)
         result = 0
         for test_param in test_params['tests']:
             RapidLog.info(test_param['test'])
             if test_param['test'] in [
                     'flowsizetest', 'TST009test', 'fixed_rate',
                     'increment_till_fail'
             ]:
                 test = FlowSizeTest(
                     test_param, test_params['lat_percentile'],
                     test_params['runtime'], test_params['TestName'],
                     test_params['environment_file'], gen_machine,
                     sut_machine, background_machines,
                     test_params['sleep_time'])
             elif test_param['test'] in ['corestatstest']:
                 test = CoreStatsTest(test_param, test_params['runtime'],
                                      test_params['TestName'],
                                      test_params['environment_file'],
                                      self.machines)
             elif test_param['test'] in ['portstatstest']:
                 test = PortStatsTest(test_param, test_params['runtime'],
                                      test_params['TestName'],
                                      test_params['environment_file'],
                                      self.machines)
             elif test_param['test'] in ['impairtest']:
                 test = ImpairTest(
                     test_param, test_params['lat_percentile'],
                     test_params['runtime'], test_params['TestName'],
                     test_params['environment_file'], gen_machine,
                     sut_machine, background_machines)
             elif test_param['test'] in ['irqtest']:
                 test = IrqTest(test_param, test_params['runtime'],
                                test_params['TestName'],
                                test_params['environment_file'],
                                self.machines)
             elif test_param['test'] in ['warmuptest']:
                 test = WarmupTest(test_param, gen_machine)
             else:
                 RapidLog.debug('Test name ({}) is not valid:'.format(
                     test_param['test']))
             single_test_result, result_details = test.run()
             result = result + single_test_result
         for machine in self.machines:
             machine.close_prox()
         concurrent.futures.wait(self.future_to_prox,
                                 return_when=ALL_COMPLETED)
     except (ConnectionError, KeyboardInterrupt) as e:
         result = result_details = None
         socket_executor.shutdown(wait=False)
         socket_executor._threads.clear()
         prox_executor.shutdown(wait=False)
         prox_executor._threads.clear()
         concurrent.futures.thread._threads_queues.clear()
         RapidLog.error("Test interrupted: {} {}".format(
             type(e).__name__, e))
     return (result, result_details)
Ejemplo n.º 18
0
    def parse_config(test_params):
        testconfig = configparser.RawConfigParser()
        testconfig.read(test_params['test_file'])
        test_params['required_number_of_test_machines'] = int(testconfig.get(
            'TestParameters', 'total_number_of_test_machines'))
        test_params['number_of_tests'] = int(testconfig.get('TestParameters',
            'number_of_tests'))
        test_params['TestName'] = testconfig.get('TestParameters', 'name')
        if testconfig.has_option('TestParameters', 'lat_percentile'):
            test_params['lat_percentile'] = old_div(float(
                testconfig.get('TestParameters', 'lat_percentile')),100.0)
        else:
            test_params['lat_percentile'] = 0.99
        RapidLog.info('Latency percentile at {:.0f}%'.format(
            test_params['lat_percentile']*100))
        if testconfig.has_option('TestParameters', 'sleep_time'):
            test_params['sleep_time'] = int(testconfig.get('TestParameters', 'sleep_time'))
            if test_params['sleep_time'] < 2:
                test_params['sleep_time'] = 2
        else:
            test_params['sleep_time'] = 2

        if testconfig.has_option('TestParameters', 'ipv6'):
            test_params['ipv6'] = testconfig.getboolean('TestParameters','ipv6')
        else:
            test_params['ipv6'] = False
        config = configparser.RawConfigParser()
        config.read(test_params['environment_file'])
        test_params['vim_type'] = config.get('Varia', 'vim')
        test_params['user'] = config.get('ssh', 'user')
        if config.has_option('ssh', 'key'):
            test_params['key'] = config.get('ssh', 'key')
            if test_params['user'] in ['rapid']:
                if test_params['key'] != 'rapid_rsa_key':
                    RapidLog.debug(("Key file {} for user {} overruled by key file:"
                            " rapid_rsa_key").format(test_params['key'],
                            test_params['user']))
                    test_params['key'] = 'rapid_rsa_key'
        else:
            test_params['key'] = None
        if config.has_option('ssh', 'password'):
            test_params['password'] = config.get('ssh', 'password')
        else:
            test_params['password'] = None
        test_params['total_number_of_machines'] = int(config.get('rapid',
            'total_number_of_machines'))
        tests = []
        test = {}
        for test_index in range(1, test_params['number_of_tests']+1):
            test.clear()
            section = 'test%d'%test_index
            options = testconfig.options(section)
            for option in options:
                if option in ['imix','imixs','flows', 'warmupimix']:
                    test[option] = ast.literal_eval(testconfig.get(section,
                        option))
                elif option in ['maxframespersecondallingress','stepsize',
                        'flowsize','warmupflowsize','warmuptime', 'steps']:
                    test[option] = int(testconfig.get(section, option))
                elif option in ['startspeed', 'step', 'drop_rate_threshold',
                        'lat_avg_threshold','lat_perc_threshold',
                        'lat_max_threshold','accuracy','maxr','maxz',
                        'ramp_step','warmupspeed','mis_ordered_threshold']:
                    test[option] = float(testconfig.get(section, option))
                else:
                    test[option] = testconfig.get(section, option)
            tests.append(dict(test))
        for test in tests:
            if test['test'] in ['flowsizetest','TST009test']:
                if 'drop_rate_threshold' not in test.keys():
                    test['drop_rate_threshold'] = 0
                latency_thresholds = ['lat_avg_threshold','lat_perc_threshold','lat_max_threshold','mis_ordered_threshold']
                for threshold in latency_thresholds:
                    if threshold not in test.keys():
                        test[threshold] = inf
        test_params['tests'] = tests
        if test_params['required_number_of_test_machines'] > test_params[
                'total_number_of_machines']:
            RapidLog.exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
            raise Exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
        map_info = test_params['machine_map_file'].strip('[]').split(',')
        map_info_length = len(map_info)
        # If map_info is a list where the first entry is numeric, we assume we
        # are dealing with a list of machines and NOT the machine.map file
        if map_info[0].isnumeric():
            if map_info_length < test_params[
                    'required_number_of_test_machines']:
                RapidLog.exception('Not enough machine indices in --map \
                        parameter: {}. Needing {} entries'.format(map_info,
                            test_params['required_number_of_test_machines']))
            machine_index = list(map(int,map_info))
        else:
            machine_map = configparser.RawConfigParser()
            machine_map.read(test_params['machine_map_file'])
            machine_index = []
            for test_machine in range(1,
                    test_params['required_number_of_test_machines']+1):
                machine_index.append(int(machine_map.get(
                    'TestM%d'%test_machine, 'machine_index')))
        machine_map = configparser.RawConfigParser()
        machine_map.read(test_params['machine_map_file'])
        machines = []
        machine = {}
        for test_machine in range(1, test_params[
            'required_number_of_test_machines']+1):
            machine.clear()
            section = 'TestM%d'%test_machine
            options = testconfig.options(section)
            for option in options:
                if option in ['prox_socket','prox_launch_exit','monitor']:
                    machine[option] = testconfig.getboolean(section, option)
                elif option in ['mcore', 'cores', 'gencores','latcores']:
                    machine[option] = ast.literal_eval(testconfig.get(
                        section, option))
                elif option in ['bucket_size_exp']:
                    machine[option] = int(testconfig.get(section, option))
                    if machine[option] < 11:
                        RapidLog.exception(
                                "Minimum Value for bucket_size_exp is 11")
                else:
                    machine[option] = testconfig.get(section, option)
                for key in ['prox_socket','prox_launch_exit']:
                   if key not in machine.keys():
                       machine[key] = True
            if 'monitor' not in machine.keys():
                machine['monitor'] = True
            section = 'M%d'%machine_index[test_machine-1]
            options = config.options(section)
            for option in options:
                machine[option] = config.get(section, option)
            machines.append(dict(machine))
        for machine in machines:
            dp_ports = []
            if 'dest_vm' in machine.keys():
                index = 1
                while True: 
                    dp_ip_key = 'dp_ip{}'.format(index)
                    dp_mac_key = 'dp_mac{}'.format(index)
                    if dp_ip_key in machines[int(machine['dest_vm'])-1].keys() and \
                            dp_mac_key in machines[int(machine['dest_vm'])-1].keys():
                        dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
                                'mac' : machines[int(machine['dest_vm'])-1][dp_mac_key]}
                        dp_ports.append(dict(dp_port))
                        index += 1
                    else:
                        break
                    machine['dest_ports'] = list(dp_ports)
            gw_ips = []
            if 'gw_vm' in machine.keys():
                index = 1
                while True:
                    gw_ip_key = 'dp_ip{}'.format(index)
                    if gw_ip_key in machines[int(machine['gw_vm'])-1].keys():
                        gw_ip = machines[int(machine['gw_vm'])-1][gw_ip_key]
                        gw_ips.append(gw_ip)
                        index += 1
                    else:
                        break
                    machine['gw_ips'] = list(gw_ips)
        test_params['machines'] = machines
        return (test_params)
Ejemplo n.º 19
0
    def run_iteration(self, requested_duration, flow_number, size, speed):
        BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
        sleep_time = self.test['sleep_time']
        LAT_PERCENTILE = self.test['lat_percentile']
        iteration_data = {}
        time_loop_data = {}
        iteration_data['r'] = 0

        while (iteration_data['r'] < self.test['maxr']):
            self.gen_machine.start_latency_cores()
            time.sleep(sleep_time)
            # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
            t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats(
            )
            t1_dp_rx = t1_rx - t1_non_dp_rx
            t1_dp_tx = t1_tx - t1_non_dp_tx
            self.gen_machine.set_generator_speed(0)
            self.gen_machine.start_gen_cores()
            self.set_background_speed(self.background_machines, 0)
            self.start_background_traffic(self.background_machines)
            if 'ramp_step' in self.test.keys():
                ramp_speed = self.test['ramp_step']
            else:
                ramp_speed = speed
            while ramp_speed < speed:
                self.gen_machine.set_generator_speed(ramp_speed)
                self.set_background_speed(self.background_machines, ramp_speed)
                time.sleep(2)
                ramp_speed = ramp_speed + self.test['ramp_step']
            self.gen_machine.set_generator_speed(speed)
            self.set_background_speed(self.background_machines, speed)
            iteration_data['speed'] = speed
            time_loop_data['speed'] = speed
            time.sleep(
                2
            )  ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
            start_bg_gen_stats = []
            for bg_gen_machine in self.background_machines:
                bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats(
                )
                bg_gen_stat = {
                    "bg_dp_rx": bg_rx - bg_non_dp_rx,
                    "bg_dp_tx": bg_tx - bg_non_dp_tx,
                    "bg_tsc": bg_tsc
                }
                start_bg_gen_stats.append(dict(bg_gen_stat))
            if self.sut_machine != None:
                t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                )
            t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats(
            )
            tx = t2_tx - t1_tx
            iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx)
            iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx -
                                                        t1_non_dp_rx)
            iteration_data['abs_dropped'] = iteration_data[
                'abs_tx'] - iteration_data['abs_rx']
            if tx == 0:
                RapidLog.critical(
                    "TX = 0. Test interrupted since no packet has been sent.")
            if iteration_data['abs_tx'] == 0:
                RapidLog.critical(
                    "Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent."
                )
            # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
            # Measure latency statistics per second
            iteration_data.update(self.gen_machine.lat_stats())
            t2_lat_tsc = iteration_data['lat_tsc']
            sample_count = 0
            for sample_percentile, bucket in enumerate(
                    iteration_data['buckets'], start=1):
                sample_count += bucket
                if sample_count > sum(
                        iteration_data['buckets']) * LAT_PERCENTILE:
                    break
            iteration_data['lat_perc_max'] = (sample_percentile == len(
                iteration_data['buckets']))
            iteration_data['bucket_size'] = float(2**BUCKET_SIZE_EXP) / (
                old_div(float(iteration_data['lat_hz']), float(10**6)))
            time_loop_data['bucket_size'] = iteration_data['bucket_size']
            iteration_data[
                'lat_perc'] = sample_percentile * iteration_data['bucket_size']
            if self.test['test'] == 'fixed_rate':
                iteration_data['pps_req_tx'] = None
                iteration_data['pps_tx'] = None
                iteration_data['pps_sut_tx'] = None
                iteration_data['pps_rx'] = None
                iteration_data['lat_perc'] = None
                iteration_data['actual_duration'] = None
                iteration_prefix = {
                    'speed': '',
                    'lat_avg': '',
                    'lat_perc': '',
                    'lat_max': '',
                    'abs_drop_rate': '',
                    'drop_rate': ''
                }
                RapidLog.info(
                    self.report_result(flow_number, size, iteration_data,
                                       iteration_prefix))
            tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
            iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
            tot_lat_measurement_duration = float(0)
            iteration_data['actual_duration'] = float(0)
            tot_sut_core_measurement_duration = float(0)
            tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
            lat_avail = core_avail = sut_avail = False
            while (iteration_data['actual_duration'] -
                   float(requested_duration) <= 0.1) or (
                       tot_lat_measurement_duration - float(requested_duration)
                       <= 0.1):
                time.sleep(0.5)
                time_loop_data.update(self.gen_machine.lat_stats())
                # Get statistics after some execution time
                if time_loop_data['lat_tsc'] != t2_lat_tsc:
                    single_lat_measurement_duration = (
                        time_loop_data['lat_tsc'] - t2_lat_tsc
                    ) * 1.0 / time_loop_data[
                        'lat_hz']  # time difference between the 2 measurements, expressed in seconds.
                    # A second has passed in between to lat_stats requests. Hence we need to process the results
                    tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
                    if iteration_data['lat_min'] > time_loop_data['lat_min']:
                        iteration_data['lat_min'] = time_loop_data['lat_min']
                    if iteration_data['lat_max'] < time_loop_data['lat_max']:
                        iteration_data['lat_max'] = time_loop_data['lat_max']
                    iteration_data['lat_avg'] = iteration_data[
                        'lat_avg'] + time_loop_data[
                            'lat_avg'] * single_lat_measurement_duration  # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
                    iteration_data['lat_used'] = iteration_data[
                        'lat_used'] + time_loop_data[
                            'lat_used'] * single_lat_measurement_duration  # and give it more weigth.
                    sample_count = 0
                    for sample_percentile, bucket in enumerate(
                            time_loop_data['buckets'], start=1):
                        sample_count += bucket
                        if sample_count > sum(
                                time_loop_data['buckets']) * LAT_PERCENTILE:
                            break
                    time_loop_data['lat_perc_max'] = (sample_percentile == len(
                        time_loop_data['buckets']))
                    time_loop_data[
                        'lat_perc'] = sample_percentile * iteration_data[
                            'bucket_size']
                    iteration_data['buckets'] = [
                        iteration_data['buckets'][i] +
                        time_loop_data['buckets'][i]
                        for i in range(len(iteration_data['buckets']))
                    ]
                    t2_lat_tsc = time_loop_data['lat_tsc']
                    lat_avail = True
                t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats(
                )
                if t3_tsc != t2_tsc:
                    time_loop_data['actual_duration'] = (
                        t3_tsc - t2_tsc
                    ) * 1.0 / tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                    iteration_data['actual_duration'] = iteration_data[
                        'actual_duration'] + time_loop_data['actual_duration']
                    delta_rx = t3_rx - t2_rx
                    tot_rx += delta_rx
                    delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
                    tot_non_dp_rx += delta_non_dp_rx
                    delta_tx = t3_tx - t2_tx
                    tot_tx += delta_tx
                    delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
                    tot_non_dp_tx += delta_non_dp_tx
                    delta_dp_tx = delta_tx - delta_non_dp_tx
                    delta_dp_rx = delta_rx - delta_non_dp_rx
                    time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
                    iteration_data['abs_dropped'] += time_loop_data[
                        'abs_dropped']
                    delta_drop = t3_drop - t2_drop
                    tot_drop += delta_drop
                    t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
                    core_avail = True
                if self.sut_machine != None:
                    t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats(
                    )
                    if t3_sut_tsc != t2_sut_tsc:
                        single_sut_core_measurement_duration = (
                            t3_sut_tsc - t2_sut_tsc
                        ) * 1.0 / sut_tsc_hz  # time difference between the 2 measurements, expressed in seconds.
                        tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
                        tot_sut_rx += t3_sut_rx - t2_sut_rx
                        tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
                        delta_sut_tx = t3_sut_tx - t2_sut_tx
                        tot_sut_tx += delta_sut_tx
                        delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
                        tot_sut_non_dp_tx += delta_sut_non_dp_tx
                        t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
                        sut_avail = True
                if self.test['test'] == 'fixed_rate':
                    if lat_avail == core_avail == True:
                        lat_avail = core_avail = False
                        time_loop_data['pps_req_tx'] = (
                            delta_tx + delta_drop - delta_rx
                        ) / time_loop_data['actual_duration'] / 1000000
                        time_loop_data['pps_tx'] = delta_tx / time_loop_data[
                            'actual_duration'] / 1000000
                        if self.sut_machine != None and sut_avail:
                            time_loop_data[
                                'pps_sut_tx'] = delta_sut_tx / single_sut_core_measurement_duration / 1000000
                            sut_avail = False
                        else:
                            time_loop_data['pps_sut_tx'] = None
                        time_loop_data['pps_rx'] = delta_rx / time_loop_data[
                            'actual_duration'] / 1000000
                        time_loop_data['abs_tx'] = delta_dp_tx
                        time_loop_data['abs_rx'] = delta_dp_rx
                        time_loop_prefix = {
                            'speed': '',
                            'lat_avg': '',
                            'lat_perc': '',
                            'lat_max': '',
                            'abs_drop_rate': '',
                            'drop_rate': ''
                        }
                        RapidLog.info(
                            self.report_result(flow_number, size,
                                               time_loop_data,
                                               time_loop_prefix))
                        time_loop_data['test'] = self.test['testname']
                        time_loop_data['environment_file'] = self.test[
                            'environment_file']
                        time_loop_data['Flows'] = flow_number
                        time_loop_data['Size'] = size
                        time_loop_data['RequestedSpeed'] = RapidTest.get_pps(
                            speed, size)
                        _ = self.post_data(time_loop_data)
            end_bg_gen_stats = []
            for bg_gen_machine in self.background_machines:
                bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats(
                )
                bg_gen_stat = {
                    "bg_dp_rx": bg_rx - bg_non_dp_rx,
                    "bg_dp_tx": bg_tx - bg_non_dp_tx,
                    "bg_tsc": bg_tsc,
                    "bg_hz": bg_hz
                }
                end_bg_gen_stats.append(dict(bg_gen_stat))
            self.stop_background_traffic(self.background_machines)
            i = 0
            bg_rates = []
            while i < len(end_bg_gen_stats):
                bg_rates.append(0.000001 *
                                (end_bg_gen_stats[i]['bg_dp_rx'] -
                                 start_bg_gen_stats[i]['bg_dp_rx']) /
                                ((end_bg_gen_stats[i]['bg_tsc'] -
                                  start_bg_gen_stats[i]['bg_tsc']) * 1.0 /
                                 end_bg_gen_stats[i]['bg_hz']))
                i += 1
            if len(bg_rates):
                iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
                RapidLog.debug(
                    'Average Background traffic rate: {:>7.3f} Mpps'.format(
                        iteration_data['avg_bg_rate']))
            else:
                iteration_data['avg_bg_rate'] = None
            #Stop generating
            self.gen_machine.stop_gen_cores()
            time.sleep(3.5)
            self.gen_machine.stop_latency_cores()
            iteration_data['r'] += 1
            iteration_data['lat_avg'] = old_div(
                iteration_data['lat_avg'], float(tot_lat_measurement_duration))
            iteration_data['lat_used'] = old_div(
                iteration_data['lat_used'],
                float(tot_lat_measurement_duration))
            t4_tsc = t2_tsc
            while t4_tsc == t2_tsc:
                t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats(
                )
            if self.test['test'] == 'fixed_rate':
                iteration_data['lat_tsc'] = t2_lat_tsc
                while iteration_data['lat_tsc'] == t2_lat_tsc:
                    iteration_data.update(self.gen_machine.lat_stats())
                sample_count = 0
                for percentile, bucket in enumerate(iteration_data['buckets'],
                                                    start=1):
                    sample_count += bucket
                    if sample_count > sum(
                            iteration_data['buckets']) * LAT_PERCENTILE:
                        break
                iteration_data['lat_perc_max'] = (percentile == len(
                    iteration_data['buckets']))
                iteration_data[
                    'lat_perc'] = percentile * iteration_data['bucket_size']
                delta_rx = t4_rx - t2_rx
                delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
                delta_tx = t4_tx - t2_tx
                delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
                delta_dp_tx = delta_tx - delta_non_dp_tx
                delta_dp_rx = delta_rx - delta_non_dp_rx
                iteration_data['abs_tx'] = delta_dp_tx
                iteration_data['abs_rx'] = delta_dp_rx
                iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
                iteration_data['pps_req_tx'] = None
                iteration_data['pps_tx'] = None
                iteration_data['pps_sut_tx'] = None
                iteration_data['drop_rate'] = 100.0 * (
                    iteration_data['abs_tx'] -
                    iteration_data['abs_rx']) / iteration_data['abs_tx']
                iteration_data['actual_duration'] = None
                break  ## Not really needed since the while loop will stop when evaluating the value of r
            else:
                sample_count = 0
                for percentile, bucket in enumerate(iteration_data['buckets'],
                                                    start=1):
                    sample_count += bucket
                    if sample_count > sum(
                            iteration_data['buckets']) * LAT_PERCENTILE:
                        break
                iteration_data['lat_perc_max'] = (percentile == len(
                    iteration_data['buckets']))
                iteration_data[
                    'lat_perc'] = percentile * iteration_data['bucket_size']
                iteration_data['pps_req_tx'] = (
                    tot_tx + tot_drop - tot_rx
                ) / iteration_data[
                    'actual_duration'] / 1000000.0  # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
                iteration_data['pps_tx'] = tot_tx / iteration_data[
                    'actual_duration'] / 1000000.0  # tot_tx is all generated packets actually accepted by the interface
                iteration_data['pps_rx'] = tot_rx / iteration_data[
                    'actual_duration'] / 1000000.0  # tot_rx is all packets received by the nop task = all packets received in the gen VM
                if self.sut_machine != None and sut_avail:
                    iteration_data[
                        'pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
                else:
                    iteration_data['pps_sut_tx'] = None
                iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx -
                                                              t1_non_dp_tx)
                iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx -
                                                              t1_non_dp_rx)
                iteration_data['abs_dropped'] = iteration_data[
                    'abs_tx'] - iteration_data['abs_rx']
                iteration_data['drop_rate'] = 100.0 * iteration_data[
                    'abs_dropped'] / iteration_data['abs_tx']
                if ((iteration_data['drop_rate'] <
                     self.test['drop_rate_threshold'])
                        or (iteration_data['abs_dropped'] ==
                            self.test['drop_rate_threshold'] == 0) or
                    (iteration_data['abs_dropped'] > self.test['maxz'])):
                    break
            self.gen_machine.stop_latency_cores()
        iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
        return (iteration_data)
Ejemplo n.º 20
0
 def run_tests(test_params):
     test_params = RapidConfigParser.parse_config(test_params)
     RapidLog.debug(test_params)
     monitor_gen = monitor_sut = False
     background_machines = []
     sut_machine = gen_machine = None
     machines = []
     for machine_params in test_params['machines']:
         if 'gencores' in machine_params.keys():
             machine = RapidGeneratorMachine(test_params['key'],
                     test_params['user'], test_params['vim_type'],
                     test_params['rundir'], machine_params,
                     test_params['ipv6'])
             if machine_params['monitor']:
                 if monitor_gen:
                     RapidLog.exception("Can only monitor 1 generator")
                     raise Exception("Can only monitor 1 generator")
                 else:
                     monitor_gen = True
                     gen_machine = machine
             else:
                 background_machines.append(machine)
         else:
             machine = RapidMachine(test_params['key'], test_params['user'],
                     test_params['vim_type'], test_params['rundir'],
                     machine_params)
             if machine_params['monitor']:
                 if monitor_sut:
                     RapidLog.exception("Can only monitor 1 sut")
                     raise Exception("Can only monitor 1 sut")
                 else:
                     monitor_sut = True
                     sut_machine = machine
         machines.append(machine)
     if test_params['configonly']:
         sys.exit()
     for machine in machines:
         machine.start_prox()
     result = True
     for test_param in test_params['tests']:
         RapidLog.info(test_param['test'])
         if test_param['test'] in ['flowsizetest', 'TST009test',
                 'fixed_rate', 'increment_till_fail']:
             test = FlowSizeTest(test_param, test_params['lat_percentile'],
                     test_params['runtime'], 
                     test_params['TestName'], 
                     test_params['environment_file'], gen_machine,
                     sut_machine, background_machines)
         elif test_param['test'] in ['corestats']:
             test = CoreStatsTest(test_param, test_params['runtime'],
                     test_params['TestName'], 
                     test_params['environment_file'], machines)
         elif test_param['test'] in ['portstats']:
             test = PortStatsTest(test_param, test_params['runtime'],
                     test_params['TestName'], 
                     test_params['environment_file'], machines)
         elif test_param['test'] in ['impairtest']:
             test = ImpairTest(test_param, test_params['lat_percentile'],
                     test_params['runtime'],
                     test_params['TestName'], 
                     test_params['environment_file'], gen_machine,
                     sut_machine)
         elif test_param['test'] in ['irqtest']:
             test = IrqTest(test_param, test_params['runtime'],
                     test_params['TestName'], 
                     test_params['environment_file'], machines)
         elif test_param['test'] in ['warmuptest']:
             test = WarmupTest(test_param, gen_machine)
         else:
             RapidLog.debug('Test name ({}) is not valid:'.format(
                 test_param['test']))
         single_test_result = test.run()
         if not single_test_result:
             result = False
     return (result)
Ejemplo n.º 21
0
 def run_tests(self, test_params):
     test_params = RapidConfigParser.parse_config(test_params)
     RapidLog.debug(test_params)
     monitor_gen = monitor_sut = False
     background_machines = []
     sut_machine = gen_machine = None
     self.machines = []
     for machine_params in test_params['machines']:
         if 'gencores' in machine_params.keys():
             machine = RapidGeneratorMachine(test_params['key'],
                                             test_params['user'],
                                             test_params['vim_type'],
                                             test_params['rundir'],
                                             machine_params,
                                             test_params['ipv6'])
             if machine_params['monitor']:
                 if monitor_gen:
                     RapidLog.exception("Can only monitor 1 generator")
                     raise Exception("Can only monitor 1 generator")
                 else:
                     monitor_gen = True
                     gen_machine = machine
             else:
                 background_machines.append(machine)
         else:
             machine = RapidMachine(test_params['key'], test_params['user'],
                                    test_params['vim_type'],
                                    test_params['rundir'], machine_params)
             if machine_params['monitor']:
                 if monitor_sut:
                     RapidLog.exception("Can only monitor 1 sut")
                     raise Exception("Can only monitor 1 sut")
                 else:
                     monitor_sut = True
                     if machine_params['prox_socket']:
                         sut_machine = machine
         self.machines.append(machine)
     prox_executor = concurrent.futures.ThreadPoolExecutor(
         max_workers=len(self.machines))
     self.future_to_prox = {
         prox_executor.submit(machine.start_prox,
                              test_params['configonly']): machine
         for machine in self.machines
     }
     if test_params['configonly']:
         concurrent.futures.wait(self.future_to_prox,
                                 return_when=ALL_COMPLETED)
         sys.exit()
     with concurrent.futures.ThreadPoolExecutor(
             max_workers=len(self.machines)) as executor:
         future_to_connect_prox = {
             executor.submit(machine.connect_prox): machine
             for machine in self.machines
         }
         concurrent.futures.wait(future_to_connect_prox,
                                 return_when=ALL_COMPLETED)
     result = True
     for test_param in test_params['tests']:
         RapidLog.info(test_param['test'])
         if test_param['test'] in [
                 'flowsizetest', 'TST009test', 'fixed_rate',
                 'increment_till_fail'
         ]:
             test = FlowSizeTest(test_param, test_params['lat_percentile'],
                                 test_params['runtime'],
                                 test_params['TestName'],
                                 test_params['environment_file'],
                                 gen_machine, sut_machine,
                                 background_machines)
         elif test_param['test'] in ['corestats']:
             test = CoreStatsTest(test_param, test_params['runtime'],
                                  test_params['TestName'],
                                  test_params['environment_file'],
                                  self.machines)
         elif test_param['test'] in ['portstats']:
             test = PortStatsTest(test_param, test_params['runtime'],
                                  test_params['TestName'],
                                  test_params['environment_file'],
                                  self.machines)
         elif test_param['test'] in ['impairtest']:
             test = ImpairTest(test_param, test_params['lat_percentile'],
                               test_params['runtime'],
                               test_params['TestName'],
                               test_params['environment_file'], gen_machine,
                               sut_machine)
         elif test_param['test'] in ['irqtest']:
             test = IrqTest(test_param, test_params['runtime'],
                            test_params['TestName'],
                            test_params['environment_file'], self.machines)
         elif test_param['test'] in ['warmuptest']:
             test = WarmupTest(test_param, gen_machine)
         else:
             RapidLog.debug('Test name ({}) is not valid:'.format(
                 test_param['test']))
         single_test_result = test.run()
         if not single_test_result:
             result = False
     return (result)
Ejemplo n.º 22
0
 def run(self):
     #    global fieldnames
     #    global writer
     #    #fieldnames = ['Flows','PacketSize','Gbps','Mpps','AvgLatency','MaxLatency','PacketsDropped','PacketDropRate']
     #    fieldnames = ['Flows','PacketSize','RequestedPPS','GeneratedPPS','SentPPS','ForwardedPPS','ReceivedPPS','AvgLatencyUSEC','MaxLatencyUSEC','Sent','Received','Lost','LostTotal']
     #    writer = csv.DictWriter(data_csv_file, fieldnames=fieldnames)
     #    writer.writeheader()
     self.gen_machine.start_latency_cores()
     TestPassed = True
     for imix in self.test['imixs']:
         size = mean(imix)
         self.gen_machine.set_udp_packet_size(imix)
         if self.background_machines:
             backgroundinfo = '{}Running {} x background traffic not represented in the table{}'.format(
                 bcolors.FLASH, len(self.background_machines), bcolors.ENDC)
         else:
             backgroundinfo = '{}{}'.format(bcolors.FLASH, bcolors.ENDC)
         self.set_background_size(self.background_machines, imix)
         RapidLog.info(
             "+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+"
         )
         RapidLog.info(
             '| UDP, {:>5} bytes, different number of flows by randomizing SRC & DST UDP port. {:116.116}|'
             .format(size, backgroundinfo))
         RapidLog.info(
             "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
         )
         RapidLog.info(
             '| Flows  | Speed requested  | Gen by core | Sent by NIC | Fwrd by SUT | Rec. by core           | Avg. Lat.|{:.0f} Pcentil| Max. Lat.|   Sent    |  Received |    Lost   | Total Lost|L.Ratio|Time|'
             .format(self.test['lat_percentile'] * 100))
         RapidLog.info(
             "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
         )
         for flow_number in self.test['flows']:
             attempts = 0
             self.gen_machine.reset_stats()
             if self.sut_machine:
                 self.sut_machine.reset_stats()
             flow_number = self.gen_machine.set_flows(flow_number)
             self.set_background_flows(self.background_machines,
                                       flow_number)
             endspeed = None
             speed = self.get_start_speed_and_init(size)
             while True:
                 attempts += 1
                 endwarning = False
                 print(str(flow_number) +
                       ' flows: Measurement ongoing at speed: ' +
                       str(round(speed, 2)) + '%      ',
                       end='\r')
                 sys.stdout.flush()
                 # Start generating packets at requested speed (in % of a 10Gb/s link)
                 self.gen_machine.set_generator_speed(speed)
                 self.set_background_speed(self.background_machines, speed)
                 self.start_background_traffic(self.background_machines)
                 # Get statistics now that the generation is stable and initial ARP messages are dealt with
                 pps_req_tx, pps_tx, pps_sut_tx, pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, abs_tx, abs_rx, abs_dropped, abs_tx_fail, drop_rate, lat_min, lat_used, r, actual_duration = self.run_iteration(
                     float(self.test['runtime']), flow_number, size, speed)
                 self.stop_background_traffic(self.background_machines)
                 if r > 1:
                     retry_warning = bcolors.WARNING + ' {:1} retries needed'.format(
                         r) + bcolors.ENDC
                 else:
                     retry_warning = ''
                 # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
                 # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
                 if (drop_rate + lat_used * 100) < 95:
                     lat_warning = bcolors.WARNING + ' Latency accuracy issue?: {:>3.0f}%'.format(
                         lat_used * 100) + bcolors.ENDC
                 else:
                     lat_warning = ''
                 if self.test['test'] == 'fixed_rate':
                     endspeed = speed
                     endpps_req_tx = None
                     endpps_tx = None
                     endpps_sut_tx = None
                     endpps_rx = None
                     endlat_avg = lat_avg
                     endlat_perc = lat_perc
                     endlat_perc_max = lat_perc_max
                     endlat_max = lat_max
                     endabs_dropped = abs_dropped
                     enddrop_rate = drop_rate
                     endabs_tx = abs_tx
                     endabs_rx = abs_rx
                     if lat_warning or retry_warning:
                         endwarning = '|        | {:177.177} |'.format(
                             retry_warning + lat_warning)
                     success = True
                     TestPassed = False  # fixed rate testing cannot be True, it is just reported numbers every second
                     speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                 # The following if statement is testing if we pass the success criteria of a certain drop rate, average latency and maximum latency below the threshold
                 # The drop rate success can be achieved in 2 ways: either the drop rate is below a treshold, either we want that no packet has been lost during the test
                 # This can be specified by putting 0 in the .test file
                 elif (
                     (drop_rate < self.test['drop_rate_threshold']) or
                     (abs_dropped == self.test['drop_rate_threshold'] == 0)
                 ) and (lat_avg < self.test['lat_avg_threshold']) and (
                         lat_perc < self.test['lat_perc_threshold']) and (
                             lat_max < self.test['lat_max_threshold']):
                     if (old_div((self.get_pps(speed, size) - pps_tx),
                                 self.get_pps(speed, size))) > 0.01:
                         speed_prefix = bcolors.WARNING
                         if abs_tx_fail > 0:
                             gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(
                                 self.get_pps(speed, size), pps_tx,
                                 abs_tx_fail) + bcolors.ENDC
                         else:
                             gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(
                                 self.get_pps(speed, size),
                                 pps_tx) + bcolors.ENDC
                     else:
                         speed_prefix = bcolors.ENDC
                         gen_warning = ''
                     endspeed = speed
                     endspeed_prefix = speed_prefix
                     endpps_req_tx = pps_req_tx
                     endpps_tx = pps_tx
                     endpps_sut_tx = pps_sut_tx
                     endpps_rx = pps_rx
                     endlat_avg = lat_avg
                     endlat_perc = lat_perc
                     endlat_perc_max = lat_perc_max
                     endlat_max = lat_max
                     endabs_dropped = None
                     enddrop_rate = drop_rate
                     endabs_tx = abs_tx
                     endabs_rx = abs_rx
                     if lat_warning or gen_warning or retry_warning:
                         endwarning = '|        | {:186.186} |'.format(
                             retry_warning + lat_warning + gen_warning)
                     success = True
                     success_message = ' SUCCESS'
                     speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                     RapidLog.debug(
                         self.report_result(
                             -attempts, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg, lat_perc,
                             lat_perc_max, lat_max, abs_tx, abs_rx,
                             abs_dropped, actual_duration, speed_prefix,
                             lat_avg_prefix, lat_max_prefix,
                             abs_drop_rate_prefix, drop_rate_prefix) +
                         success_message + retry_warning + lat_warning +
                         gen_warning)
                 else:
                     success_message = ' FAILED'
                     abs_drop_rate_prefix = bcolors.ENDC
                     if ((abs_dropped > 0)
                             and (self.test['drop_rate_threshold'] == 0)):
                         abs_drop_rate_prefix = bcolors.FAIL
                     if (drop_rate < self.test['drop_rate_threshold']):
                         drop_rate_prefix = bcolors.ENDC
                     else:
                         drop_rate_prefix = bcolors.FAIL
                     if (lat_avg < self.test['lat_avg_threshold']):
                         lat_avg_prefix = bcolors.ENDC
                     else:
                         lat_avg_prefix = bcolors.FAIL
                     if (lat_perc < self.test['lat_perc_threshold']):
                         lat_perc_prefix = bcolors.ENDC
                     else:
                         lat_perc_prefix = bcolors.FAIL
                     if (lat_max < self.test['lat_max_threshold']):
                         lat_max_prefix = bcolors.ENDC
                     else:
                         lat_max_prefix = bcolors.FAIL
                     if ((old_div((self.get_pps(speed, size) - pps_tx),
                                  self.get_pps(speed, size))) < 0.001):
                         speed_prefix = bcolors.ENDC
                     else:
                         speed_prefix = bcolors.FAIL
                     success = False
                     RapidLog.debug(
                         self.report_result(
                             -attempts, size, speed, pps_req_tx, pps_tx,
                             pps_sut_tx, pps_rx, lat_avg, lat_perc,
                             lat_perc_max, lat_max, abs_tx, abs_rx,
                             abs_dropped, actual_duration, speed_prefix,
                             lat_avg_prefix, lat_perc_prefix,
                             lat_max_prefix, abs_drop_rate_prefix,
                             drop_rate_prefix) + success_message +
                         retry_warning + lat_warning)
                 speed = self.new_speed(speed, size, success)
                 if self.resolution_achieved():
                     break
             if endspeed is not None:
                 if TestPassed and (endpps_rx <
                                    self.test['pass_threshold']):
                     TestPassed = False
                 speed_prefix = lat_avg_prefix = lat_perc_prefix = lat_max_prefix = abs_drop_rate_prefix = drop_rate_prefix = bcolors.ENDC
                 RapidLog.info(
                     self.report_result(
                         flow_number, size, endspeed, endpps_req_tx,
                         endpps_tx, endpps_sut_tx, endpps_rx, endlat_avg,
                         endlat_perc, endlat_perc_max, endlat_max,
                         endabs_tx, endabs_rx, endabs_dropped,
                         actual_duration, speed_prefix, lat_avg_prefix,
                         lat_perc_prefix, lat_max_prefix,
                         abs_drop_rate_prefix, drop_rate_prefix))
                 if endwarning:
                     RapidLog.info(endwarning)
                 RapidLog.info(
                     "+--------+------------------+-------------+-------------+-------------+------------------------+----------+----------+----------+-----------+-----------+-----------+-----------+-------+----+"
                 )
                 #                writer.writerow({'Flows':flow_number,'PacketSize':(size+4),'RequestedPPS':self.get_pps(endspeed,size),'GeneratedPPS':endpps_req_tx,'SentPPS':endpps_tx,'ForwardedPPS':endpps_sut_tx,'ReceivedPPS':endpps_rx,'AvgLatencyUSEC':endlat_avg,'MaxLatencyUSEC':endlat_max,'Sent':endabs_tx,'Received':endabs_rx,'Lost':endabs_dropped,'LostTotal':endabs_dropped})
                 if self.test['pushgateway']:
                     URL = self.test[
                         'pushgateway'] + '/metrics/job/' + self.test[
                             'test'] + '/instance/' + self.test[
                                 'environment_file']
                     if endabs_dropped == None:
                         ead = 0
                     else:
                         ead = endabs_dropped
                     DATA = 'Flows {}\nPacketSize {}\nRequestedPPS {}\nGeneratedPPS {}\nSentPPS {}\nForwardedPPS {}\nReceivedPPS {}\nAvgLatencyUSEC {}\nMaxLatencyUSEC {}\nSent {}\nReceived {}\nLost {}\nLostTotal {}\n'.format(
                         flow_number, size + 4,
                         self.get_pps(endspeed, size), endpps_req_tx,
                         endpps_tx, endpps_sut_tx, endpps_rx, endlat_avg,
                         endlat_max, endabs_tx, endabs_rx, ead, ead)
                     HEADERS = {
                         'X-Requested-With': 'Python requests',
                         'Content-type': 'text/xml'
                     }
                     response = requests.post(url=URL,
                                              data=DATA,
                                              headers=HEADERS)
                     if (response.status_code !=
                             202) and (response.status_code != 200):
                         RapidLog.info(
                             'Cannot send metrics to {}'.format(URL))
                         RapidLog.info(DATA)
             else:
                 RapidLog.info('|{:>7}'.format(str(flow_number)) +
                               " | Speed 0 or close to 0")
     self.gen_machine.stop_latency_cores()
     return (TestPassed)