def wait_udev_event(action='add', match_dict=None, subsystem=None, devtype=None, devicepath=None): if action not in ['add', 'remove', 'change']: raise LAVABug("Invalid action for udev to wait for: %s, expected 'add' or 'remove'" % action) if match_dict: if not isinstance(match_dict, dict): raise LAVABug("match_dict was not a dict") else: if devicepath: if not isinstance(devicepath, str): raise LAVABug("devicepath was not a string") match_dict = {} else: raise LAVABug("Neither match_dict nor devicepath were set") if devtype and not subsystem: raise LAVABug("Cannot filter udev by devtype without a subsystem") match_dict['ACTION'] = action context = pyudev.Context() monitor = pyudev.Monitor.from_netlink(context) if devtype and subsystem: monitor.filter_by(subsystem, devtype) else: if subsystem: monitor.filter_by(subsystem) for device in iter(monitor.poll, None): same = _dict_compare(dict(device), match_dict) if same == set(match_dict.keys()): if devicepath: if devicepath in dict(device).get('DEVLINKS', '') or \ devicepath in dict(device).get('DEVNAME', ''): break else: break
def wait_udev_event_setup(devicepath, devtype, match_dict, subsystem): """ Setup pyudev internals for use by wait_udev_event and wait_udev_change_event methods :param devicepath: :param devtype: :param match_dict: :param subsystem: :return: (context, match_dict, monitor) context is a pyudev.Context instance match_dict from input parameter (initialized to dict if unset) monitor is pyudev.Monitor instance """ if match_dict: if not isinstance(match_dict, dict): raise LAVABug("match_dict was not a dict") else: if devicepath: if not isinstance(devicepath, str): raise LAVABug("devicepath was not a string") match_dict = {} else: raise LAVABug("Neither match_dict nor devicepath were set") if devtype and not subsystem: raise LAVABug("Cannot filter udev by devtype without a subsystem") # Create and configure the monitor context = pyudev.Context() monitor = pyudev.Monitor.from_netlink(context) if devtype and subsystem: monitor.filter_by(subsystem, devtype) else: if subsystem: monitor.filter_by(subsystem) return context, match_dict, monitor
def __init__(self, command, lava_timeout, logger=None, cwd=None, window=2000): if isinstance(window, str): # constants need to be stored as strings. try: window = int(window) except ValueError: raise LAVABug("ShellCommand was passed an invalid window size of %s bytes." % window) if not lava_timeout or not isinstance(lava_timeout, Timeout): raise LAVABug("ShellCommand needs a timeout set by the calling Action") if not logger: raise LAVABug("ShellCommand needs a logger") pexpect.spawn.__init__( self, command, timeout=lava_timeout.duration, cwd=cwd, logfile=ShellLogger(logger), encoding='utf-8', # Data before searchwindowsize point is preserved, but not searched. searchwindowsize=None, # pattern match the entire buffer maxread=window, # limit the size of the buffer. 1 to turn off buffering codec_errors='replace' ) self.name = "ShellCommand" self.logger = logger # set a default newline character, but allow actions to override as neccessary self.linesep = LINE_SEPARATOR self.lava_timeout = lava_timeout
def disconnect(self, reason): logger = logging.getLogger("dispatcher") if not self.tags or (self.name not in self.recognized_names and not set(RECOGNIZED_TAGS) & set(self.tags)): raise LAVABug("'disconnect' not implemented") try: if "telnet" in self.tags: logger.info("Disconnecting from telnet: %s", reason) self.sendcontrol("]") self.sendline("quit", disconnecting=True) elif "ssh" in self.tags: logger.info("Disconnecting from ssh: %s", reason) self.sendline("", disconnecting=True) self.sendline("~.", disconnecting=True) elif self.name == "LxcSession": logger.info("Disconnecting from lxc: %s", reason) self.sendline("", disconnecting=True) self.sendline("exit", disconnecting=True) elif self.name == "QemuSession": logger.info("Disconnecting from qemu: %s", reason) else: raise LAVABug("'disconnect' not supported for %s" % self.tags) except ValueError: # protection against file descriptor == -1 logger.debug("Already disconnected") self.connected = False if self.raw_connection: self.raw_connection.close(force=True) self.raw_connection = None
def disconnect(self, reason): logger = logging.getLogger('dispatcher') if not self.tags: raise LAVABug("'disconnect' not implemented") try: if 'telnet' in self.tags: logger.info("Disconnecting from telnet: %s", reason) self.sendcontrol(']') self.sendline('quit', disconnecting=True) elif 'ssh' in self.tags: logger.info("Disconnecting from ssh: %s", reason) self.sendline('', disconnecting=True) self.sendline('~.', disconnecting=True) elif self.name == "LxcSession": logger.info("Disconnecting from lxc: %s", reason) self.sendline('', disconnecting=True) self.sendline('exit', disconnecting=True) elif self.name == 'QemuSession': logger.info("Disconnecting from qemu: %s", reason) else: raise LAVABug("'disconnect' not supported for %s" % self.tags) except ValueError: # protection against file descriptor == -1 logger.debug("Already disconnected") self.connected = False if self.raw_connection: self.raw_connection.close(force=True) self.raw_connection = None
def run(self, connection, max_end_time): output = os.path.join(self.mkdtemp(), "overlay-%s.tar.gz" % self.level) location = self.get_namespace_data(action='test', label='shared', key='location') lava_test_results_dir = self.get_namespace_data( action='test', label='results', key='lava_test_results_dir') self.set_namespace_data(action='test', label='shared', key='output', value=output) if not location: raise LAVABug("Missing lava overlay location") if not os.path.exists(location): raise LAVABug("Unable to find overlay location") if not self.valid: self.logger.error(self.errors) return connection connection = super().run(connection, max_end_time) with chdir(location): try: with tarfile.open(output, "w:gz") as tar: tar.add(".%s" % lava_test_results_dir) # ssh authorization support if os.path.exists('./root/'): tar.add(".%s" % '/root/') except tarfile.TarError as exc: raise InfrastructureError( "Unable to create lava overlay tarball: %s" % exc) self.set_namespace_data(action=self.name, label='output', key='file', value=output) return connection
def _api_select(self, data, action=None): if not data: raise TestError("[%s] Protocol called without any data." % self.name) if not action: raise LAVABug('LXC protocol needs to be called from an action.') for item in data: if 'request' not in item: raise LAVABug("[%s] Malformed protocol request data." % self.name) if 'pre-os-command' in item['request']: action.logger.info("[%s] Running pre OS command via protocol.", self.name) command = action.job.device.pre_os_command if not command: raise JobError("No pre OS command is defined for this device.") if not isinstance(command, list): command = [command] for cmd in command: if not action.run_command(cmd.split(' '), allow_silent=True): raise InfrastructureError("%s failed" % cmd) continue elif 'pre-power-command' in item['request']: action.logger.info("[%s] Running pre-power-command via protocol.", self.name) command = action.job.device.pre_power_command if not command: raise JobError("No pre power command is defined for this device.") if not isinstance(command, list): command = [command] for cmd in command: if not action.run_command(cmd.split(' '), allow_silent=True): raise InfrastructureError("%s failed" % cmd) continue else: raise JobError("[%s] Unrecognised protocol request: %s" % (self.name, item))
def _check_action(self, action): # pylint: disable=no-self-use if not action or not issubclass(type(action), Action): raise LAVABug("Only actions can be added to a pipeline: %s" % action) # if isinstance(action, DiagnosticAction): # raise LAVABug("Diagnostic actions need to be triggered, not added to a pipeline.") if not action: raise LAVABug("Unable to add empty action to pipeline")
def __init__(self, parent=None, job=None, parameters=None): self.actions = [] self.parent = None self.parameters = {} if parameters is None else parameters self.job = job if parent is not None: # parent must be an Action if not isinstance(parent, Action): raise LAVABug("Internal pipelines need an Action as a parent") if not parent.level: raise LAVABug( "Tried to create a pipeline using a parent action with no level set." ) self.parent = parent
def run(self, connection, max_end_time): if connection: raise LAVABug( "Fake action not meant to have a real connection") time.sleep(3) self.results = {"status": "failed"} return connection
def run(self, connection, max_end_time): if not connection: raise LAVABug("%s started without a connection already in use" % self.name) connection = super().run(connection, max_end_time) # From Versatile Express MCC, enter flash menu connection.sendline(self.flash_enter_cmd) self.logger.debug("Changing prompt to '%s'", self.flash_prompt) connection.prompt_str = self.flash_prompt self.wait(connection) # Issue flash erase command connection.sendline(self.flash_erase_cmd) self.logger.debug("Changing prompt to '%s'", self.flash_erase_msg) connection.prompt_str = self.flash_erase_msg self.wait(connection) # Once we know the erase is underway.. wait for the prompt self.logger.debug("Changing prompt to '%s'", self.flash_prompt) connection.prompt_str = self.flash_prompt self.wait(connection) # If flash erase command has completed, return to MCC main menu connection.sendline(self.flash_exit_cmd) self.logger.debug("Changing prompt to '%s'", self.mcc_prompt) connection.prompt_str = self.mcc_prompt self.wait(connection) return connection
def run(self, connection, max_end_time): if not connection: raise LAVABug("%s started without a connection already in use" % self.name) connection = super().run(connection, max_end_time) # Get possible prompts from device config prompt_list = [ self.autorun_prompt, self.mcc_prompt, self.mcc_reset_msg ] connection.prompt_str = prompt_list self.logger.debug("Changing prompt to '%s'", connection.prompt_str) index = self.wait(connection) # Interrupt autorun if enabled if connection.prompt_str[index] == self.autorun_prompt: self.logger.debug("Autorun enabled: interrupting..") connection.sendline(self.interrupt_char) connection.prompt_str = [self.mcc_prompt, self.mcc_reset_msg] index = self.wait(connection) elif connection.prompt_str[index] == self.mcc_prompt: self.logger.debug( "Already at MCC prompt: autorun looks to be disabled") # Check that mcc_reset_msg hasn't been received if connection.prompt_str[index] == self.mcc_reset_msg: raise InfrastructureError("MCC: Unable to interrupt auto-run") return connection
def run_cmd(self, command_list, allow_fail=False, error_msg=None, cwd=None): """ Run the given command on the dispatcher. If the command fail, a JobError will be raised unless allow_fail is set to True. The command output will be visible (almost) in real time. :param: command_list - the command to run (as a list) :param: allow_fail - if True, do not raise a JobError when the command fail (return non 0) :param: error_msg - the exception message. :param: cwd - the current working directory for this command """ # Build the command list (adding 'nice' at the front) if isinstance(command_list, str): command_list = shlex.split(command_list) elif not isinstance(command_list, list): raise LAVABug("commands to run_cmd need to be a list or a string") command_list = [str(s) for s in command_list] # Build the error message log_error_msg = "Unable to run 'nice' '%s'" % "' '".join(command_list) if error_msg is None: error_msg = log_error_msg # Start the subprocess self.logger.debug("Calling: 'nice' '%s'", "' '".join(command_list)) start = time.time() cmd_logger = CommandLogger(self.logger) ret = None try: proc = pexpect.spawn( "nice", command_list, cwd=cwd, encoding="utf-8", codec_errors="replace", logfile=cmd_logger, timeout=self.timeout.duration, searchwindowsize=10, ) proc.expect(pexpect.EOF) # wait for the process and record the return value ret = proc.wait() except pexpect.TIMEOUT: self.logger.error("Timed out after %s seconds", int(time.time() - start)) proc.terminate() proc.wait() except pexpect.ExceptionPexpect as exc: self.logger.error("Unable to run: %s", exc) cmd_logger.flush(force=True) if ret is not None: self.logger.debug( "Returned %d in %s seconds", ret, int(time.time() - start) ) # Check the return value if ret != 0 and not allow_fail: self.logger.error("Unable to run 'nice' '%s'", command_list) raise self.command_exception(error_msg)
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) if not connection: raise LAVABug("Cannot transfer overlay, no connection available.") overlay_file = self.get_namespace_data(action="compress-overlay", label="output", key="file") if not overlay_file: raise JobError("No overlay file identified for the transfer.") serial_number = self.job.device["adb_serial_number"] host_dir = self.mkdtemp() target_dir = "/data/local" untar_file(overlay_file, host_dir) host_dir = os.path.join(host_dir, "data/local/tmp") adb_cmd = ["adb", "-s", serial_number, "push", host_dir, target_dir] command_output = self.run_command(adb_cmd) if command_output and "pushed" not in command_output.lower(): raise JobError("Unable to push overlay files with adb: %s" % command_output) adb_cmd = [ "adb", "-s", serial_number, "shell", "/system/bin/chmod", "-R", "0777", os.path.join(target_dir, "tmp"), ] command_output = self.run_command(adb_cmd) if command_output and "pushed" not in command_output.lower(): raise JobError("Unable to chmod overlay files with adb: %s" % command_output) return connection
def __set_parameters__(self, data): try: self.__parameters__.update(data) except ValueError: raise LAVABug("Action parameters need to be a dictionary") # Overide the duration if needed if "timeout" in self.parameters: # preserve existing overrides if self.timeout.duration == Timeout.default_duration(): self.timeout.duration = Timeout.parse( self.parameters["timeout"]) if "connection_timeout" in self.parameters: self.connection_timeout.duration = Timeout.parse( self.parameters["connection_timeout"]) # only unit tests should have actions without a pointer to the job. if "failure_retry" in self.parameters and "repeat" in self.parameters: raise JobError( "Unable to use repeat and failure_retry, use a repeat block") if "failure_retry" in self.parameters: self.max_retries = self.parameters["failure_retry"] if "repeat" in self.parameters: self.max_retries = self.parameters["repeat"] if self.job: if self.job.device: if "character_delays" in self.job.device: self.character_delay = self.job.device[ "character_delays"].get(self.section, 0)
def run(self, connection, max_end_time): if not connection: raise LAVABug("%s started without a connection already in use" % self.name) connection = super().run(connection, max_end_time) if self.needs_interrupt: connection.prompt_str = [self.interrupt_prompt] self.wait(connection) if self.interrupt_control_chars: for char in self.interrupt_control_chars: connection.sendcontrol(char) else: if self.interrupt_newline: connection.sendline(self.interrupt_char) else: connection.send(self.interrupt_char) else: self.logger.info( "Not interrupting bootloader, waiting for bootloader prompt") connection.prompt_str = [self.bootloader_prompt] self.wait(connection) self.set_namespace_data( action="interrupt", label="interrupt", key="at_bootloader_prompt", value=True, ) return connection
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) if not connection: raise LAVABug("Cannot transfer overlay, no connection available.") overlay_full_path = self.get_namespace_data(action="compress-overlay", label="output", key="file") if not overlay_full_path: raise JobError("No overlay file identified for the transfer.") if not overlay_full_path.startswith(DISPATCHER_DOWNLOAD_DIR): raise ConfigurationError( "overlay should already be in DISPATCHER_DOWNLOAD_DIR") overlay_path = overlay_full_path[len(DISPATCHER_DOWNLOAD_DIR) + 1:] overlay = os.path.basename(overlay_path) connection.sendline("rm %s" % overlay) connection.wait() cmd = self.parameters["transfer_overlay"]["download_command"] ip_addr = dispatcher_ip(self.job.parameters["dispatcher"], "http") connection.sendline("%s http://%s/tmp/%s" % (cmd, ip_addr, overlay_path)) connection.wait() unpack = self.parameters["transfer_overlay"]["unpack_command"] connection.sendline(unpack + " " + overlay) connection.wait() return connection
def select(cls, device, parameters): candidates = cls.__subclasses__() replies = {} willing = [] for c in candidates: res = c.accepts(device, parameters) if not isinstance(res, tuple): raise LAVABug( "class %s accept function did not return a tuple" % c.__name__) if res[0]: willing.append(c) else: class_name = c.name if hasattr(c, "name") else c.__name__ replies[class_name] = res[1] if not willing: replies_string = "" for name, reply in replies.items(): replies_string += "%s: %s\n" % (name, reply) raise JobError( "None of the test strategies accepted your test parameters, reasons given:\n%s" % replies_string) willing.sort(key=lambda x: x.priority, reverse=True) return willing[0]
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) if not connection: raise LAVABug("Cannot transfer overlay, no connection available.") overlay_file = self.get_namespace_data(action='compress-overlay', label='output', key='file') if not overlay_file: raise JobError("No overlay file identified for the transfer.") serial_number = self.job.device['adb_serial_number'] host_dir = self.mkdtemp() target_dir = '/data/local' untar_file(overlay_file, host_dir) host_dir = os.path.join(host_dir, 'data/local/tmp') adb_cmd = ['adb', '-s', serial_number, 'push', host_dir, target_dir] command_output = self.run_command(adb_cmd) if command_output and 'pushed' not in command_output: raise JobError("Unable to push overlay files with adb: %s" % command_output) adb_cmd = [ 'adb', '-s', serial_number, 'shell', '/system/bin/chmod', '-R', '0777', os.path.join(target_dir, 'tmp') ] command_output = self.run_command(adb_cmd) if command_output and 'pushed' not in command_output: raise JobError("Unable to chmod overlay files with adb: %s" % command_output) return connection
def run(self, connection, max_end_time): if connection: raise LAVABug( "Fake action not meant to have a real connection") time.sleep(5) self.results = {'status': "passed"} return connection
def validate(self): if "test_name" not in self.parameters: self.errors = "Unable to determine test_name" return if not isinstance(self, InlineRepoAction): if self.vcs is None: raise LAVABug( "RepoAction validate called super without setting the vcs" ) if not os.path.exists(self.vcs.binary): self.errors = "%s is not installed on the dispatcher." % self.vcs.binary super().validate() # FIXME: unused # list of levels involved in the repo actions for this overlay uuid_list = self.get_namespace_data( action="repo-action", label="repo-action", key="uuid-list" ) if uuid_list: if self.uuid not in uuid_list: uuid_list.append(self.uuid) else: uuid_list = [self.uuid] self.set_namespace_data( action="repo-action", label="repo-action", key="uuid-list", value=uuid_list )
def dispatcher_ip(dispatcher_config, protocol=None): """ Retrieves the IP address of the interface associated with the current default gateway. :param protocol: 'http', 'tftp' or 'nfs' """ if protocol: if protocol not in VALID_DISPATCHER_IP_PROTOCOLS: raise LAVABug("protocol should be one of %s" % VALID_DISPATCHER_IP_PROTOCOLS) with contextlib.suppress(KeyError, TypeError): return dispatcher_config["dispatcher_%s_ip" % protocol] with contextlib.suppress(KeyError, TypeError): return dispatcher_config["dispatcher_ip"] gateways = netifaces.gateways() if "default" not in gateways: raise InfrastructureError( "Unable to find dispatcher 'default' gateway") iface = gateways["default"][netifaces.AF_INET][1] iface_addr = None try: addr = netifaces.ifaddresses(iface) iface_addr = addr[netifaces.AF_INET][0]["addr"] except KeyError: # TODO: This only handles first alias interface can be extended # to review all alias interfaces. addr = netifaces.ifaddresses(iface + ":0") iface_addr = addr[netifaces.AF_INET][0]["addr"] return iface_addr
def run(self, connection, max_end_time): overlay_file = self.get_namespace_data(action="compress-overlay", label="output", key="file") if not overlay_file: raise LAVABug("Unable to find the overlay") self.logger.debug("Overlay: %s", overlay_file) guest_dir = self.mkdtemp() guest_file = os.path.join(guest_dir, self.guest_filename) self.set_namespace_data(action=self.name, label="guest", key="filename", value=guest_file) blkid = prepare_guestfs( guest_file, overlay_file, self.job.device["actions"]["deploy"]["methods"]["image"] ["parameters"]["guest"]["size"], ) self.results = {"success": blkid} self.set_namespace_data(action=self.name, label="guest", key="UUID", value=blkid) return connection
def run(self, connection, max_end_time): """Download the provided test definition file into tmpdir.""" super().run(connection, max_end_time) runner_path = self.get_namespace_data(action="uuid", label="overlay_path", key=self.parameters["test_name"]) fname = self.get_namespace_data(action="download-action", label=self.action_key, key="file") self.logger.debug("Runner path : %s", runner_path) if os.path.exists(runner_path) and os.listdir(runner_path) == []: raise LAVABug( "Directory already exists and is not empty - duplicate Action?" ) self.logger.info("Untar tests from file %s to directory %s", fname, runner_path) untar_file(fname, runner_path) # now read the YAML to create a testdef dict to retrieve metadata yaml_file = os.path.join(runner_path, self.parameters["path"]) self.logger.debug("Tests stored (tmp) in %s", yaml_file) try: with open(yaml_file, "r") as test_file: testdef = yaml_safe_load(test_file) except IOError as exc: raise JobError("Unable to open test definition '%s': %s" % (self.parameters["path"], str(exc))) # set testdef metadata in base class self.store_testdef(testdef, "url") return connection
def _run(self): """ Run the pipeline under the run() wrapper that will catch the exceptions """ self.started = True # Setup the protocols for protocol in self.protocols: try: protocol.set_up() except LAVAError: raise except Exception as exc: self.logger.error("Unable to setup the protocols") self.logger.exception(traceback.format_exc()) raise LAVABug(exc) if not protocol.valid: msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors) self.logger.exception(msg) raise JobError(msg) # Run the pipeline and wait for exceptions with self.timeout(None, None) as max_end_time: self.pipeline.run_actions(self.connection, max_end_time)
def select(cls, device, parameters): cls.boot_check(device, parameters) candidates = cls.__subclasses__() # pylint: disable=no-member replies = {} willing = [] for c in candidates: res = c.accepts(device, parameters) if not isinstance(res, tuple): raise LAVABug( 'class %s accept function did not return a tuple' % c.__name__) if res[0]: willing.append(c) else: class_name = c.name if hasattr(c, 'name') else c.__name__ replies[class_name] = res[1] if len(willing) == 0: replies_string = "" for name, reply in replies.items(): replies_string += ("%s: %s\n" % (name, reply)) raise JobError( "None of the boot strategies accepted your boot parameters, reasons given:\n%s" % replies_string) willing.sort(key=lambda x: x.priority, reverse=True) return willing[0]
def validate(self): """ Public wrapper for the pipeline validation. Send a "fail" results if needed. """ label = "lava-dispatcher, installed at version: %s" % debian_package_version( pkg="lava-dispatcher") self.logger.info(label) self.logger.info("start: 0 validate") start = time.time() success = False try: self._validate() success = True except LAVAError: raise except Exception as exc: # provide useful info on command line, e.g. failed unit tests. self.logger.exception(traceback.format_exc()) raise LAVABug(exc) finally: self.logger.info("validate duration: %.02f", time.time() - start) self.logger.results({ "definition": "lava", "case": "validate", "result": "pass" if success else "fail", }) if not success: self.cleanup(connection=None)
def run(self, connection, max_end_time): if not self.parameters.get('ramdisk'): # idempotency return connection ramdisk = self.get_namespace_data(action='download-action', label='ramdisk', key='file') if self.skip: self.logger.info("Not extracting ramdisk.") suffix = self.get_namespace_data(action='tftp-deploy', label='tftp', key='suffix') filename = os.path.join(suffix, "ramdisk", os.path.basename(ramdisk)) # declare the original ramdisk as the name to be used later. self.set_namespace_data(action='compress-ramdisk', label='file', key='ramdisk', value=filename) return connection connection = super().run(connection, max_end_time) ramdisk_dir = self.mkdtemp() extracted_ramdisk = os.path.join(ramdisk_dir, 'ramdisk') os.mkdir(extracted_ramdisk, 0o755) compression = self.parameters['ramdisk'].get('compression') suffix = "" if compression: suffix = ".%s" % compression ramdisk_compressed_data = os.path.join(ramdisk_dir, RAMDISK_FNAME + suffix) if self.parameters['ramdisk'].get('header') == 'u-boot': cmd = ('dd if=%s of=%s ibs=%s skip=1' % (ramdisk, ramdisk_compressed_data, UBOOT_DEFAULT_HEADER_LENGTH)).split(' ') try: self.run_command(cmd) except Exception: raise LAVABug('Unable to remove uboot header: %s' % ramdisk) else: # give the file a predictable name shutil.move(ramdisk, ramdisk_compressed_data) ramdisk_data = decompress_file(ramdisk_compressed_data, compression) with chdir(extracted_ramdisk): cmd = ('cpio -iud -F %s' % ramdisk_data).split(' ') if not self.run_command(cmd): raise JobError( 'Unable to extract cpio archive: %s - missing header definition (i.e. u-boot)?' % ramdisk_data) # tell other actions where the unpacked ramdisk can be found self.set_namespace_data(action=self.name, label='extracted_ramdisk', key='directory', value=extracted_ramdisk) self.set_namespace_data(action=self.name, label='ramdisk_file', key='file', value=ramdisk_data) return connection
def run(self, connection, max_end_time): """ Creates the list of test definitions for this Test :param connection: Connection object, if any. :param max_end_time: remaining time before block timeout. :return: the received Connection. """ location = self.get_namespace_data( action="test", label="shared", key="location" ) lava_test_results_dir = self.get_namespace_data( action="test", label="results", key="lava_test_results_dir" ) if not location: raise LAVABug("Missing lava overlay location") if not os.path.exists(location): raise LAVABug("Unable to find overlay location") self.logger.info("Loading test definitions") # overlay_path is the location of the files before boot overlay_base = os.path.abspath("%s/%s" % (location, lava_test_results_dir)) self.set_namespace_data( action="test", label="test-definition", key="overlay_dir", value=overlay_base, ) connection = super().run(connection, max_end_time) self.logger.info("Creating lava-test-runner.conf files") for stage in range(self.stages): path = "%s/%s" % (overlay_base, stage) self.logger.debug( "Using lava-test-runner path: %s for stage %d", path, stage ) with open( "%s/%s/lava-test-runner.conf" % (overlay_base, stage), "a" ) as runner_conf: for handler in self.pipeline.actions: if isinstance(handler, RepoAction) and handler.stage == stage: self.logger.debug("- %s", handler.parameters["test_name"]) runner_conf.write(handler.runner) return connection
def run(self, connection, max_end_time): connection = super().run(connection, max_end_time) if not connection: raise LAVABug("%s needs a Connection") connection.check_char = "\n" connection.sendline( "\n") # to catch the first prompt (remove for PDU?) return connection