def __get_valgrind_params(self): """ Get Valgrind command as list. :return: list """ valgrind = [] if self.valgrind: valgrind.extend(['valgrind']) if self.valgrind == 'memcheck': valgrind.extend(['--tool=memcheck', '--leak-check=full']) if self.valgrind_track_origins: valgrind.extend(['--track-origins=yes']) if self.valgrind_console: # just dump the default output, which is text dumped to console valgrind.extend([]) elif self.valgrind_xml: valgrind.extend([ '--xml=yes', '--xml-file=' + LogManager.get_testcase_logfilename( self.name + '_valgrind_mem.xml', prepend_tc_name=True) ]) else: valgrind.extend([ '--log-file=' + LogManager.get_testcase_logfilename( self.name + '_valgrind_mem.txt') ]) elif self.valgrind == 'callgrind': valgrind.extend([ '--tool=callgrind', '--dump-instr=yes', '--simulate-cache=yes', '--collect-jumps=yes']) if self.valgrind_console: # just dump the default output, which is text dumped to console valgrind.extend([]) elif self.valgrind_xml: valgrind.extend([ '--xml=yes', '--xml-file=' + LogManager.get_testcase_logfilename( self.name + '_valgrind_calls.xml', prepend_tc_name=True) ]) else: valgrind.extend([ '--callgrind-out-file=' + LogManager.get_testcase_logfilename( self.name + '_valgrind_calls.data') ]) elif self.valgrind == 'massif': valgrind.extend(['--tool=massif']) valgrind.extend([ '--massif-out-file=' + LogManager.get_testcase_logfilename( self.name + '_valgrind_massif.data') ]) # this allows one to specify misc params to valgrind, # eg. "--threshold=0.4" to get some more data from massif if self.valgrind_extra_params != '': valgrind.extend(self.valgrind_extra_params.split()) return valgrind
def test_configs_merge(self): filepath = os.path.abspath( os.path.join(__file__, os.path.pardir, "tests", "logging_config.json")) LogManager._read_config(filepath) self.assertDictEqual(LogManager.LOGGING_CONFIG.get("test_logger"), {"level": "ERROR"})
def __init__(self): """ Constructor for IceteaManager. Appends libraries to sys.path, loads the test case metadata schema, parses arguments and initializes logging. """ self.libpath = os.sep.join( os.path.abspath(__file__).split(os.sep)[:-1]) sys.path.append(self.libpath) libpath2 = os.sep.join(self.libpath.split(os.sep)[:-1]) sys.path.append(libpath2) # Initialize TCMetaSchema with correct libpath TCMetaSchema(self.libpath) self.args, self.unknown = IceteaManager._parse_arguments() # If called with --clean, clean up logs. if self.args.clean: _cleanlogs(silent=self.args.silent, log_location=self.args.log) LogManager.init_base_logging( self.args.log, verbose=self.args.verbose, silent=self.args.silent, color=self.args.color, no_file=(self.args.list or self.args.listsuites), truncate=not self.args.disable_log_truncate) self.logger = LogManager.get_logger("icetea") self.pluginmanager = None self.resourceprovider = ResourceProvider(self.args) self._init_pluginmanager() self.resourceprovider.set_pluginmanager(self.pluginmanager)
def test_configs_read(self): filepath = os.path.abspath( os.path.join(__file__, os.path.pardir, "tests", "does_not_exist.json")) with self.assertRaises(IOError): LogManager.init_base_logging(config_location=filepath) filepath = os.path.abspath( os.path.join(__file__, os.path.pardir, "tests", "logging_config.json")) LogManager._read_config(filepath)
def test_configs_schema_validation(self): filepath = os.path.abspath( os.path.join(__file__, os.path.pardir, "tests", "erroneous_logging_config.json")) with self.assertRaises(ValidationError): LogManager._read_config(filepath) filepath = os.path.abspath( os.path.join(__file__, os.path.pardir, "tests", "logging_config.json")) LogManager._read_config(filepath)
def __init__(self, dut): try: self.logger = LogManager.get_bench_logger() except KeyError: self.logger = None self.response = None self.dut = dut
def __init__(self): NwPacketManager.__init__(self) self.logger = LogManager.get_bench_logger("bench", "WS") self.__captureThreadLive = None self.__captureThreadFile = None if not pyshark: raise ImportError("Pyshark not installed.")
def test_benchformatter_formats_timestamp(self): # pylint: disable=invalid-name formatter = LogManager.BenchFormatter( "%(asctime)s | %(source)s %(type)s %(threadName)s: " "%(message)s", "%Y-%m-%dT%H:%M:%S.%FZ") record = create_log_record("test_message") time_str = formatter.formatTime(record, "%Y-%m-%dT%H:%M:%S.%FZ") # Check that T exists, should be in between date and time. self.assertTrue(time_str.rfind("T") > 0) # Check that date format matches ISO-8601 date = time_str[:time_str.rfind("T")] self.assertRegexpMatches( date, r"^([0-9]{4})(-?)(1[0-2]|0[1-9])\2(3[01]|0[1-9]|[12][0-9])$") # Chech that time format matches ISO-8601 time_ending = time_str[time_str.rfind("T") + 1:time_str.rfind(".")] self.assertRegexpMatches( time_ending, r"^(2[0-3]|[01][0-9]):?([0-5][0-9]):?([0-5][0-9])$") # Check that time_str ends with Z to show it's UTC self.assertTrue(time_str.endswith("Z")) # Check that milliseconds exist millis = time_str[time_str.rfind(".") + 1:time_str.rfind("Z")] self.assertRegexpMatches(millis, r"([0-9][0-9][0-9])")
def __init__(self, resource_configuration, resources, configurations, **kwargs): super(BenchFunctions, self).__init__(**kwargs) self._resource_configuration = resource_configuration self._resources = resources self._configurations = configurations self._logger = LogManager.get_dummy_logger()
def get_nw_log_filename(self): # pylint: disable=no-self-use """ Get nw data log file name. :return: string """ return LogManager.get_testcase_logfilename("network.nw.pcap")
def __init__(self, name, cmd=None, path=None, logger=None): self.name = name self.proc = None self.logger = logger self.cmd = None self.cmd_arr = None self.path = None self.gdb = False self.gdbs = False self.vgdb = False self.gdbs_port = None self.nobuf = False self.valgrind = None self.valgrind_xml = None self.valgrind_console = None self.valgrind_track_origins = None self.valgrind_extra_params = None self.__print_io = True self.__valgrind_log_basename = None self.read_thread = None self.__ignore_return_code = False self.default_retcode = 0 if not self.logger: self.logger = LogManager.get_bench_logger(name, 'GP', False) self.cmd = cmd self.path = path
def __init__(self, name, params=None): if Dut._dutlist is None: Dut._dutlist = [] self.testcase = '' self.version = {} self.dutinformation = None self.name = name self.dut_name = name self.stopped = False self.comport = False # TODO: Move to DutProcess? # TODO: Move to MeshCommands self.MAC = None # pylint: disable=invalid-name self.location = Location(0, 0) self.logger = LogManager.get_bench_logger('Dut.%s' % name, name) self.get_time = time.time self.query = None # Query to node self.query_timeout = 0 self.query_async_expected = None # Expected retcode for async cmd self.query_async_response = None # Async response to fullfill when response is available self.waiting_for_response = None self.response_coming_in = None # Response coming in self.prev = None # Previous command, stored for logging purposes self.traces = [] # All traces self.response_traces = [] # Incoming response lines self.response_received = Event() self.response_received.set() self.config = {} self.init_cli_cmds = None self.post_cli_cmds = None self.params = params self.index = None self.init_done = Event() self.init_event_matcher = None self.init_wait_timeout = None Dut._dutlist.append(self)
def __init__(self, json_configuration=None, logger=None): self.json_config = json_configuration self._dut_requirements = [] self.logger = logger self._sim_config = None if self.logger is None: self.logger = LogManager.get_dummy_logger() self._counts = {"total": 0, "hardware": 0, "process": 0}
def get_current_filename(self, extension, basename="result."): """ Generate filename for a report. :param extension: Extension for file name :param basename: Base file name :return: path to basename.extension """ return os.path.join(LogManager.get_base_dir(), basename + extension)
def get_latest_filename(self, extension, basename="../latest."): """ Generate filename with 'latest.' prefix. :param extension: Extension for file :param basename: Base file name :return: path to latest.basename.extension. """ return os.path.join(LogManager.get_base_dir(), basename + extension)
def __init__(self, args): # @TODO: Refactor args into some separate configuration class maybe? self.args = args self.allocator = None self.jsonconf = None no_file = False if self.args.list or self.args.listsuites else True self.logger = LogManager.get_resourceprovider_logger( "ResourceProvider", "RSP", no_file) self._pluginmanager = None
def __init__(self, resources, configurations, args, logger=None, **kwargs): super(NetworkSniffer, self).__init__(**kwargs) self.__sniffing = False self.__wshark = None self.__capture_file = None self.__tshark_arguments = {} self._logger = logger if logger else LogManager.get_dummy_logger() self._resources = resources self._configurations = configurations self._args = args
def __init__(self): try: self.logger = LogManager.get_bench_logger() except KeyError: self.logger = logging.getLogger("bench") self.timeout = False self.timedelta = 0 self.retcode = None self.lines = [] self.traces = [] self.parsed = None
def resolve_configuration(self, conf, resource_configuration): """ Resolve the configuration from given JSON encoded configuration data. :param conf: JSON encoded configuration :param resource_configuration: ResourceConfig object """ if not self.logger: self.logger = LogManager.get_resourceprovider_logger( "ResourceProvider", "RSP") self.jsonconf = conf resource_configuration.resolve_configuration(conf)
def append_result(self, tc_file=None): """ Append a new fully constructed Result to the internal ResultList. :param tc_file: Test case file path :return: Nothing """ result = Result() result.set_tc_metadata(self._configuration.config) tc_rev = get_git_info(self._configuration.get_tc_abspath(tc_file), verbose=self._args.verbose) if self._logger: self._logger.debug(tc_rev) result.set_tc_git_info(tc_rev) result.component = self._configuration.get_test_component() result.feature = self._configuration.get_features_under_test() result.skip_reason = self._configuration.skip_reason() if self._configuration.skip() else '' result.fail_reason = self._failreason result.logpath = os.path.abspath(LogManager.get_base_dir()) result.logfiles = LogManager.get_logfiles() result.retcode = self.retcode result.set_dutinformation(self._resources.dutinformations) # pylint: disable=unused-variable for platform, serialnumber in zip(self._resources.get_platforms(), self._resources.get_serialnumbers()): # Zipping done to keep platforms and serial numbers aligned in case some sn:s are # missing result.dut_vendor.append('') result.dut_resource_id.append(serialnumber) result.dut_count = self._resources.get_dut_count() result.duts = self._resources.resource_configuration.get_dut_configuration() if self._resources.resource_configuration.count_hardware() > 0: result.dut_type = 'hw' elif self._resources.resource_configuration.count_process() > 0: result.dut_type = 'process' else: result.dut_type = None self._result_list.append(result)
def open_node_terminal(self, k='*', wait=True): """ Open Putty (/or kitty if exists) :param k: number 1.<max duts> or '*' to open putty to all devices :param wait: wait while putty is closed before continue testing :return: Nothing """ if k == '*': for ind in self._resource_configuration.get_dut_range(): self.open_node_terminal(ind, wait) return if not self._resources.is_my_dut_index(k): return params = '-serial ' + self._resources.duts[ k - 1].comport + ' -sercfg ' + str( self._resources.duts[k - 1].serialBaudrate) putty_exe = self._configurations.env['extApps']['puttyExe'] if os.path.exists(self._configurations.env['extApps']['kittyExe']): putty_exe = self._configurations.env['extApps']['kittyExe'] if "kitty.exe" in putty_exe: params = params + ' -title "' + self._resources.duts[k - 1].comport params += ' - ' + self._configurations.test_name params += ' | DUT' + str(k) + ' ' + self._resources.get_dut_nick( k) + '"' params += ' -log "' + LogManager.get_testcase_logfilename( 'DUT%d.manual' % k) + '"' if os.path.exists(putty_exe): command = putty_exe + ' ' + params self._logger.info(command) if wait: if self._resources.is_my_dut_index(k): self._resources.duts[k - 1].close_dut() self._resources.duts[k - 1].close_connection() self._resources.resource_provider.allocator.release( dut=self._resources.duts[k - 1]) process = subprocess.Popen(command) time.sleep(2) process.wait() self._resources.duts[k - 1].open_dut() else: subprocess.Popen(command, close_fds=True) else: self._logger.warning('putty not exists in path: %s', putty_exe)
def start_dut_thread(self): # pylint: disable=no-self-use """ Start Dut thread. :return: Nothing """ if Dut._th is None: Dut._run = True Dut._sem = Semaphore(0) Dut._signalled_duts = deque() Dut._logger = LogManager.get_bench_logger('Dut') Dut._th = Thread(target=Dut.run, name='DutThread') Dut._th.daemon = True Dut._th.start()
def __start_sniffer(self): """ Start network sniffer capturing pcap to a file. :return: Nothing """ iface = self.__get_nw_interface() if not iface: raise TestStepError("Cannot capture wireshark log") try: self.__create_wshark_object() except ImportError as error: raise TestStepError(error) self.__capture_file = LogManager.get_testcase_logfilename( "network.nw.pcap") self._logger.debug('Start wireshark capture: %s', self.capture_file) # Add self.tshark_preferences to parameters # when pyshark starts supporting the -o tshark argument self.wshark.startCapture(iface, self.__capture_file, self.__tshark_arguments) self.__sniffing = True
def __init__(self): NwPacketManager.__init__(self) self.logger = LogManager.get_bench_logger("bench", "WS") self.__captureThreadLive = None self.__captureThreadFile = None
def __init__(self, args=None, logger=None, **kwargs): super(Configurations, self).__init__() self._config, self._integer_keys_found = self._parse_config(**kwargs) self._env_cfg = None self._logger = logger if logger else LogManager.get_dummy_logger() self._args = args
def run(self, forceflash=False): """ Runs the testcase associated with this container. :param forceflash: boolean, True if forceflash should be used :return: Result """ if self.status == TestStatus.FINISHED: self.logger.debug("Creating new bench instance for repeat.") self._instance = self._create_new_bench_instance(self._modulename) self.set_final_config() self.status = TestStatus.RUNNING self.logger.debug("Starting test case {}".format(self.tcname)) tc_instance = self.get_instance() result = self._check_skip(tc_instance) if result: self.logger.debug("Skipping test case {}".format(self.tcname)) self._result = result self.status = TestStatus.FINISHED return result # Check if version checking is enabled in cli # and if the bench has the compatible key in it's config. result = self._check_version(tc_instance) if result is not None: self.logger.debug( "Version check triggered, skipping test case {}.".format( self.tcname)) self._result = result self.status = TestStatus.FINISHED return result parser = get_tc_arguments(get_base_arguments(get_parser())) args, unknown = parser.parse_known_args() if len(unknown) > 0: for para in unknown: self.logger.warning( "Icetea received unknown parameter {}".format(para)) if not args.ignore_invalid_params: self.logger.error( "Unknown parameters received, exiting. To ignore this add --ignore_invalid_params flag." ) parser.print_help() result = tc_instance.get_result() result.set_verdict(verdict="inconclusive", retcode=-1, duration=0) self.status = TestStatus.FINISHED return result args.forceflash = forceflash self.status = TestStatus.RUNNING tc_instance.set_args(args) self.logger.info("") self.logger.info("START TEST CASE EXECUTION: '%s'" % tc_instance.get_test_name()) self.logger.info("") a = datetime.datetime.now() try: retcode = tc_instance.run() self.logger.debug( "Test bench returned return code {}".format(retcode)) except: traceback.print_exc() retcode = -9999 b = datetime.datetime.now() result = tc_instance.get_result(tc_file=self._filepath) # Force garbage collection gc.collect() # cleanup Testcase tc_instance = None LogManager.finish_testcase_logging() self.status = TestStatus.FINISHED if isinstance(result, ResultList): self.logger.debug("Received a list of results from test bench.") return result if result.retcode == ReturnCodes.RETCODE_FAIL_ABORTED_BY_USER: print("Press CTRL + C again if you want to abort test run") try: time.sleep(5) except KeyboardInterrupt: self.status = TestStatus.FINISHED raise c = b - a duration = c.total_seconds() self.logger.debug("Duration: {} seconds".format(duration)) verdict = None if retcode == 0: verdict = "pass" elif retcode in ReturnCodes.INCONCLUSIVE_RETCODES: verdict = "inconclusive" elif retcode == ReturnCodes.RETCODE_SKIP: verdict = "skip" else: verdict = "fail" result.set_verdict(verdict=verdict, retcode=retcode, duration=duration) self._result = result return result
def __init__(self, benchapi, logger=None, **kwargs): super(RunnerSM, self).__init__(**kwargs) self.machine = None self.logger = logger if logger else LogManager.get_dummy_logger() self._benchapi = benchapi
def __init__(self, dut): self.logger = LogManager.get_bench_logger() self.response = None self.dut = dut
def __init__(self): self.__packets = [] self.__marks = [] self.__lock = Lock() self.setMarkForHead('start') self.logger = LogManager.get_bench_logger("bench", "WS")