def run(self, connection, max_end_time, args=None): connection = super(DeployVExpressRecoveryImage, self).run(connection, max_end_time, args) mount_point = self.get_namespace_data(action='mount-vexpress-usbmsd', label='vexpress-fw', key='mount-point') try: os.path.realpath(mount_point) except OSError: raise InfrastructureError("Unable to locate mount point: %s" % mount_point) src_dir = self.get_namespace_data(action='extract-vexpress-recovery-image', label='file', key='recovery_image') try: os.path.realpath(src_dir) except OSError: raise InfrastructureError("Unable to locate recovery image source directory: %s" % src_dir) self.logger.debug("Removing existing recovery image from Versatile Express mass storage device..") try: remove_directory_contents(mount_point) except: raise JobError("Failed to erase old recovery image") self.logger.debug("Transferring new recovery image to Versatile Express mass storage device..") try: copy_directory_contents(src_dir, mount_point) except: raise JobError("Failed to deploy recovery image to %s" % mount_point) return connection
def run(self, connection, args=None): connection = super(ApplyLxcOverlay, self).run(connection, args) overlay_file = self.data['compress-overlay'].get('output') if overlay_file is None: self.logger.debug("skipped %s", self.name) return connection lxc_path = os.path.join(LXC_PATH, self.get_common_data('lxc', 'name'), "rootfs") if not os.path.exists(lxc_path): raise JobError("Lxc container rootfs not found") tar_cmd = [ 'tar', '--warning', 'no-timestamp', '-C', lxc_path, '-xaf', overlay_file ] command_output = self.run_command(tar_cmd) if command_output and command_output is not '': raise JobError("Unable to untar overlay: %s" % command_output) # FIXME: JobError needs a unit test # FIXME: Avoid copying this special 'lava-test-runner' which does not # have 'sync' in cleanup. This should be handled during the # creation of the overlay instead. Make a special case to copy # lxc specific scripts, with distro specific versions. fname = os.path.join(self.lava_test_dir, 'lava-test-runner') output_file = '%s/bin/%s' % (lxc_path, os.path.basename(fname)) self.logger.debug("Copying %s", output_file) try: shutil.copy(fname, output_file) except IOError: raise JobError("Unable to copy: %s" % output_file) return connection
def run(self, connection, args=None): if not self.parameters.get(self.param_key, None): # idempotency return connection connection = super(ExtractRootfs, self).run(connection, args) root = self.data['download_action'][self.param_key]['file'] root_dir = mkdtemp(basedir=DISPATCHER_DOWNLOAD_DIR) if self.use_tarfile: try: tar = tarfile.open(root) tar.extractall(root_dir) tar.close() except tarfile.TarError as exc: raise JobError("Unable to unpack %s: '%s' - %s" % (self.param_key, os.path.basename(root), exc)) elif self.use_lzma: with contextlib.closing(lzma.LZMAFile(root)) as xz: with tarfile.open(fileobj=xz) as tarball: try: tarball.extractall(root_dir) except tarfile.TarError as exc: raise JobError( "Unable to unpack %s: '%s' - %s" % (self.param_key, os.path.basename(root), exc)) else: raise RuntimeError("Unable to decompress %s: '%s'" % (self.param_key, os.path.basename(root))) self.set_common_data('file', self.file_key, root_dir) self.logger.debug("Extracted %s to %s" % (self.file_key, root_dir)) return connection
def run(self, connection, args=None): if 'download_action' not in self.data: raise RuntimeError("Missing download action") if 'offset' in self.data['download_action'][self.key]: # idempotency return connection connection = super(OffsetAction, self).run(connection, args) image = self.data['download_action'][self.key]['file'] if not os.path.exists(image): raise JobError("Not able to mount %s: file does not exist" % image) part_data = self.run_command( ['/sbin/parted', image, '-m', '-s', 'unit', 'b', 'print']) if not part_data: raise JobError("Unable to identify offset") deploy_params = self.job.device['actions']['deploy']['methods'][ 'image']['parameters'] partno = deploy_params[self.parameters['deployment_data'] ['lava_test_results_part_attr']] pattern = re.compile('%d:([0-9]+)B:' % partno) for line in part_data.splitlines(): found = re.match(pattern, line) if found: self.data['download_action'][self.key]['offset'] = found.group( 1) if 'offset' not in self.data['download_action'][self.key]: raise JobError( # FIXME: JobError needs a unit test "Unable to determine offset for %s" % image) return connection
def run(self, connection, max_end_time, args=None): if self.get_namespace_data(action='download-action', label=self.key, key='offset'): # idempotency return connection connection = super(OffsetAction, self).run(connection, max_end_time, args) image = self.get_namespace_data(action='download-action', label=self.key, key='file') if not os.path.exists(image): raise JobError("Not able to mount %s: file does not exist" % image) part_data = self.run_command( ['/sbin/parted', image, '-m', '-s', 'unit', 'b', 'print']) if not part_data: raise JobError("Unable to identify offset") deploy_params = self.job.device['actions']['deploy']['methods'][ 'image']['parameters'] partno = deploy_params[self.parameters['deployment_data'] ['lava_test_results_part_attr']] pattern = re.compile('%d:([0-9]+)B:' % partno) for line in part_data.splitlines(): found = re.match(pattern, line) if found: self.set_namespace_data(action=self.name, label=self.key, key='offset', value=found.group(1)) if not self.get_namespace_data( action=self.name, label=self.key, key='offset'): raise JobError( # FIXME: JobError needs a unit test "Unable to determine offset for %s" % image) return connection
def run(self, connection, args=None): if 'download_action' not in self.data: raise RuntimeError("Missing download action") if 'offset' in self.data['download_action']: # idempotency return connection image = self.data['download_action']['file'] if not os.path.exists(image): raise RuntimeError("Not able to mount %s: file does not exist" % image) part_data = self._run_command([ '/sbin/parted', image, '-m', '-s', 'unit', 'b', 'print' ]) if not part_data: raise JobError("Unable to identify offset") # FIXME: identify the partitions from the image, not from the device configuration partno = self.job.device.parameters[self.parameters['deployment_data']['lava_test_results_part_attr']] pattern = re.compile('%d:([0-9]+)B:' % partno) for line in part_data.splitlines(): found = re.match(pattern, line) if found: self.data['download_action']['offset'] = found.group(1) if 'offset' not in self.data['download_action']: # more reliable than checking if offset exists as offset can be zero raise JobError( # FIXME: JobError needs a unit test "Unable to determine offset for %s" % image ) return connection
def run(self, connection, max_end_time, args=None): connection = super(FastbootRebootAction, self).run(connection, max_end_time, args) # this is the device namespace - the lxc namespace is not accessible lxc_name = None protocol = [ protocol for protocol in self.job.protocols if protocol.name == LxcProtocol.name ][0] if protocol: lxc_name = protocol.lxc_name if not lxc_name: raise JobError("Unable to use fastboot") self.logger.debug("[%s] lxc name: %s", self.parameters['namespace'], lxc_name) serial_number = self.job.device['fastboot_serial_number'] fastboot_opts = self.job.device['fastboot_options'] fastboot_cmd = [ 'lxc-attach', '-n', lxc_name, '--', 'fastboot', '-s', serial_number, 'reboot' ] + fastboot_opts command_output = self.run_command(fastboot_cmd) if command_output and 'rebooting' not in command_output: raise JobError("Unable to fastboot reboot: %s" % command_output) else: status = [ status.strip() for status in command_output.split('\n') if 'finished' in status ][0] self.results = {'status': status} self.set_namespace_data(action='shared', label='shared', key='connection', value=connection) return connection
def run(self, connection, args=None): # substitutions substitutions = {'{emptyimage}': self.get_common_data('prepare-empty-image', 'output')} sub_command = self.get_common_data('prepare-qemu-commands', 'sub_command') sub_command = substitute(sub_command, substitutions) command_line = ' '.join(sub_command) commands = [] # get the download args in run() image_arg = self.data['download_action']['iso'].get('image_arg', None) action_arg = self.data['download_action']['iso'].get('file', None) substitutions["{%s}" % 'iso'] = action_arg commands.append(image_arg) command_line += ' '.join(substitute(commands, substitutions)) preseed_file = self.get_common_data('file', 'preseed') if not preseed_file: raise JobError("Unable to identify downloaded preseed filename.") substitutions = {'{preseed}': preseed_file} append_args = self.get_common_data('prepare-qemu-commands', 'append') append_args = substitute([append_args], substitutions) command_line += ' '.join(append_args) self.logger.info(command_line) shell = ShellCommand(command_line, self.timeout, logger=self.logger) if shell.exitstatus: raise JobError("%s command exited %d: %s" % (sub_command[0], shell.exitstatus, shell.readlines())) self.logger.debug("started a shell command") shell_connection = ShellSession(self.job, shell) shell_connection.prompt_str = self.get_common_data('prepare-qemu-commands', 'prompts') shell_connection = super(IsoCommandLine, self).run(shell_connection, args) return shell_connection
def run(self, connection, args=None): """Download the provided test definition file into tmpdir.""" super(UrlRepoAction, self).run(connection, args) runner_path = os.path.join(self.data['test-definition']['overlay_dir'], 'tests', self.parameters['test_name']) try: if not os.path.isdir(runner_path): self.logger.debug("Creating directory to download the url file into.") os.makedirs(runner_path) # we will not use 'testdef_file' here, we can get this info from URL # testdef_file = download_image(testdef_repo, context, urldir) # FIXME: this handler uses DownloaderAction.run() except OSError as exc: raise JobError('Unable to get test definition from url\n' + str(exc)) finally: self.logger.info("Downloaded test definition file to %s." % runner_path) i = [] for elem in " $&()\"'<>/\\|;`": i.extend(indices(self.testdef["metadata"]["name"], elem)) if i: msg = "Test name contains invalid symbol(s) at position(s): %s" % ", ".join(str(x) for x in i) raise JobError(msg) try: self.testdef["metadata"]["name"].encode() except UnicodeEncodeError as encode: msg = "Test name contains non-ascii symbols: %s" % encode raise JobError(msg) return connection
def run(self): """ Top level routine for the entire life of the Job, using the job level timeout. Python only supports one alarm on SIGALRM - any Action without a connection will have a default timeout which will use SIGALRM. So the overarching Job timeout can only stop processing actions if the job wide timeout is exceeded. """ for protocol in self.protocols: try: protocol.set_up() except KeyboardInterrupt: self.pipeline.cleanup_actions(connection=None, message="Canceled") self.logger.info("Canceled") return 1 # equivalent to len(self.pipeline.errors) except (JobError, RuntimeError, KeyError, TypeError) as exc: raise JobError(exc) if not protocol.valid: msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors) self.logger.exception(msg) raise JobError(msg) self.pipeline.run_actions(self.connection) if self.pipeline.errors: self.logger.exception(self.pipeline.errors) return len(self.pipeline.errors) return 0
def poll(self, message, timeout=None): """ Blocking, synchronous polling of the Coordinator on the configured port. Single send operations greater than 0xFFFF are rejected to prevent truncation. :param msg_str: The message to send to the Coordinator, as a JSON string. :return: a JSON string of the response to the poll """ if not timeout: timeout = self.poll_timeout.duration if isinstance(timeout, float): timeout = int(timeout) elif not isinstance(timeout, int): raise RuntimeError("Invalid timeout duration type: %s %s" % (type(timeout), timeout)) msg_len = len(message) if msg_len > 0xFFFE: raise JobError("Message was too long to send!") c_iter = 0 response = None delay = self.settings['poll_delay'] self.logger.debug( "Connecting to LAVA Coordinator on %s:%s timeout=%d seconds.", self.settings['coordinator_hostname'], self.settings['port'], timeout) while True: c_iter += self.settings['poll_delay'] if self._connect(delay): delay = self.settings['poll_delay'] else: delay += 2 continue if not c_iter % int(10 * self.settings['poll_delay']): self.logger.debug( "sending message: %s waited %s of %s seconds", json.loads(message)['request'], c_iter, timeout) # blocking synchronous call if not self._send_message(message): continue self.sock.shutdown(socket.SHUT_WR) response = self._recv_message() self.sock.close() try: json_data = json.loads(response) except ValueError: self.logger.debug("response starting '%s' was not JSON", response[:42]) self.finalise_protocol() break if json_data['response'] != 'wait': break else: time.sleep(delay) # apply the default timeout to each poll operation. if c_iter > timeout: self.finalise_protocol() raise JobError("protocol %s timed out" % self.name) return response
def run(self, connection, max_end_time, args=None): connection = super(FastbootBootAction, self).run(connection, max_end_time, args) # this is the device namespace - the lxc namespace is not accessible lxc_name = None protocol = [ protocol for protocol in self.job.protocols if protocol.name == LxcProtocol.name ][0] if protocol: lxc_name = protocol.lxc_name if not lxc_name: self.errors = "Unable to use fastboot" return connection self.logger.debug("[%s] lxc name: %s", self.parameters['namespace'], lxc_name) serial_number = self.job.device['fastboot_serial_number'] boot_img = self.get_namespace_data(action='download-action', label='boot', key='file') if not boot_img: raise JobError("Boot image not found, unable to boot") else: boot_img = os.path.join('/', os.path.basename(boot_img)) fastboot_cmd = [ 'lxc-attach', '-n', lxc_name, '--', 'fastboot', '-s', serial_number, 'boot', boot_img ] + self.job.device['fastboot_options'] command_output = self.run_command(fastboot_cmd) if command_output and 'booting' not in command_output: raise JobError("Unable to boot with fastboot: %s" % command_output) else: status = [ status.strip() for status in command_output.split('\n') if 'finished' in status ][0] self.results = {'status': status} res = 'failed' if self.errors else 'success' self.set_namespace_data(action='boot', label='shared', key='boot-result', value=res) self.set_namespace_data(action='shared', label='shared', key='connection', value=connection) lxc_active = any([ protocol for protocol in self.job.protocols if protocol.name == LxcProtocol.name ]) if self.job.device.pre_os_command and not lxc_active: self.logger.info("Running pre OS command.") command = self.job.device.pre_os_command if not self.run_command(command.split(' '), allow_silent=True): raise InfrastructureError("%s failed" % command) return connection
def _api_select(self, data): if not data: raise TestError("Protocol called without any data") if 'request' not in data: raise JobError("Bad API call over protocol - missing request") if data['request'] == 'deploy_vlans': self.deploy_vlans() else: raise JobError("Unrecognised API call in request.") return None
def run(self, connection, args=None): connection = super(ApplyNexellOverlay, self).run(connection, args) overlay_file = self.data['compress-overlay'].get('output') adb_serial_number = self.job.device['adb_serial_number'] self.logger.debug("SUKER: deploy/apply_overlay.py: " + str(adb_serial_number)) if overlay_file is None: self.logger.debug("skipped %s", self.name) self.logger.debug("[SEOJI] skipped %s", self.name) #return connection nexell_path = os.path.join(NEXELL_PATH) if not os.path.exists(nexell_path): raise JobError("Nexell path not found") tar_cmd = [ 'tar', '--warning', 'no-timestamp', '-C', nexell_path, '-xaf', overlay_file ] command_output = self.run_command(tar_cmd) if command_output and command_output is not '': raise JobError("Unable to untar overlay: %s" % command_output) # FIXME: JobError needs a unit test # FIXME: Avoid copying this special 'lava-test-runner' which does not # have 'sync' in cleanup. This should be handled during the # creation of the overlay instead. Make a special case to copy # lxc specific scripts, with distro specific versions. fname = os.path.join(self.lava_test_dir, 'lava-test-runner') output_file = '%s/bin/%s' % (nexell_path, os.path.basename(fname)) self.logger.debug("SUKER: deploy/apply_overlay.py output_file: " + str(output_file)) self.logger.debug("SUKER: deploy/apply_overlay.py nexell_path: " + str(nexell_path)) lava_test_results_dir = self.data['lava_test_results_dir'] self.logger.debug( "SUKER: deploy/apply_overlay.py var/lib/nexell path: " + str(lava_test_results_dir)) # adb push nexell_real_path = nexell_path + lava_test_results_dir adb_cmd = [ '/opt/android-sdk-linux/platform-tools/adb', '-s', adb_serial_number, 'push', nexell_real_path, '/' ] self.logger.debug("SUKER: apply_overlay.py: " + str(adb_cmd)) command_output = self.run_command(adb_cmd) adb_cmd = [ '/opt/android-sdk-linux/platform-tools/adb', '-s', adb_serial_number, 'push', fname, lava_test_results_dir + '/bin/' ] self.logger.debug("SUKER: apply_overlay.py: " + str(adb_cmd)) command_output = self.run_command(adb_cmd) return connection
def run(self, connection, args=None): lxc_name = self.get_common_data('lxc', 'name') if not lxc_name: self.logger.debug("No LXC device requested") return connection if 'device_path' in list(self.job.device.keys()): device_path = self.job.device['device_path'] if not isinstance(device_path, list): raise JobError("device_path should be a list") if device_path: # Wait USB_SHOW_UP_TIMEOUT seconds for usb device to show up self.logger.info( "[%s] Wait %d seconds for usb device to show up", self.name, USB_SHOW_UP_TIMEOUT) sleep(USB_SHOW_UP_TIMEOUT) for path in device_path: path = os.path.realpath(path) if os.path.isdir(path): devices = os.listdir(path) else: devices = [path] for device in devices: self.logger.debug('adding %s at %s', device, path) device = os.path.join(path, device) lxc_cmd = ['lxc-device', '-n', lxc_name, 'add', device] log = self.run_command(lxc_cmd) self.logger.debug(log) self.logger.debug("%s: devices added from %s", lxc_name, path) else: self.logger.warning("device_path is None") else: self.logger.error("No device path defined for this device.") cmd = "lxc-attach -n {0}".format(lxc_name) self.logger.info("%s Connecting to device using '%s'", self.name, cmd) signal.alarm(0) # clear the timeouts used without connections. # ShellCommand executes the connection command shell = self.shell_class("%s\n" % cmd, self.timeout, logger=self.logger) if shell.exitstatus: raise JobError("%s command exited %d: %s" % (cmd, shell.exitstatus, shell.readlines())) # ShellSession monitors the pexpect connection = self.session_class(self.job, shell) connection.connected = True connection = super(ConnectLxc, self).run(connection, args) connection.prompt_str = self.parameters['prompts'] self.data['boot-result'] = 'failed' if self.errors else 'success' return connection
def _validate(self, simulate): """ Validate the pipeline and raise an exception (that inherit from LAVAError) if it fails. If simulate is True, then print the pipeline description. """ label = "lava-dispatcher, installed at version: %s" % debian_package_version( split=False) self.logger.info(label) self.logger.info("start: 0 validate") start = time.time() for protocol in self.protocols: try: protocol.configure(self.device, self) except KeyboardInterrupt: self.logger.info("Canceled") raise JobError("Canceled") except LAVAError: self.logger.error("Configuration failed for protocol %s", protocol.name) raise except Exception as exc: self.logger.error("Configuration failed for protocol %s", protocol.name) self.logger.exception(traceback.format_exc()) raise LAVABug(exc) if not protocol.valid: msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors) self.logger.exception(msg) raise JobError(msg) if simulate: # output the content and then any validation errors (python3 compatible) print(yaml.dump(self.describe())) # pylint: disable=superfluous-parens try: self.pipeline.validate_actions() except KeyboardInterrupt: self.logger.info("Canceled") raise JobError("Canceled") except LAVAError as exc: self.logger.error("Invalid job definition") self.logger.exception(str(exc)) # This should be re-raised to end the job raise except Exception as exc: self.logger.error("Validation failed") self.logger.exception(traceback.format_exc()) raise LAVABug(exc) finally: self.logger.info("validate duration: %.02f", time.time() - start)
def run(self, connection, max_end_time, args=None): """ iterate through the menu sequence: items: select prompt: prompt_str enter: <str> & Ctrl-M escape: Ctrl-[ through pexpect.sendcontrol :param menu: list of menus :param connection: Connection to use to interact with the menu :param logger: Action logger :return: connection """ connection = super(SelectorMenuAction, self).run(connection, max_end_time, args) if not connection: self.logger.error("%s called without a Connection", self.name) return connection for block in self.items: if 'select' in block: change_prompt = False # ensure the prompt is changed just before sending the action to allow it to be matched. if 'wait' in block['select']: connection.prompt_str = block['select']['wait'] change_prompt = True if 'items' in block['select']: for selector in block['select']['items']: menu_text = connection.raw_connection.before action = self.selector.select(menu_text, selector) if action: self.logger.debug("Selecting option %s", action) elif 'fallback' in block['select']: action = self.selector.select( menu_text, block['select']['fallback']) if not action: raise JobError("No selection was made") connection.sendline(action, delay=self.character_delay) self._change_prompt(connection, change_prompt) if 'escape' in block['select']: self.logger.debug("Sending escape") connection.raw_connection.sendcontrol('[') self._change_prompt(connection, change_prompt) if 'enter' in block['select']: self.logger.debug("Sending %s Ctrl-M", block['select']['enter']) connection.raw_connection.send(block['select']['enter'], delay=self.character_delay) connection.raw_connection.sendcontrol('M') self._change_prompt(connection, change_prompt) else: raise JobError("Unable to recognise selection %s" % block['select']) return connection
def get_deployment_data(distro): """ Returns the deployment data by name, for the cases where we actually need that. """ if distro is '': raise JobError( "Missing 'os' value for deployment - unable to identify operating system for deployment data." ) this_module = sys.modules[__name__] try: return getattr(this_module, distro) except AttributeError: raise JobError("%s is not a supported distribution" % distro)
def check_timeout(self, duration, data): if not data: raise TestError("Protocol called without any data") if 'request' not in data: raise JobError("Bad API call over protocol - missing request") if data['request'] == 'deploy_vlans': if duration < VLAND_DEPLOY_TIMEOUT: raise JobError( "Timeout of %s is insufficient for deploy_vlans", duration) self.logger.info("Setting vland base timeout to %s seconds", duration) self.poll_timeout.duration = duration return True return False
def reader(self): res = None try: res = requests.get(self.url.geturl(), allow_redirects=True, stream=True, timeout=HTTP_DOWNLOAD_TIMEOUT) if res.status_code != requests.codes.OK: # pylint: disable=no-member raise JobError("Unable to download '%s'" % (self.url.geturl())) for buff in res.iter_content(HTTP_DOWNLOAD_CHUNK_SIZE): yield buff except requests.RequestException as exc: # TODO: improve error reporting raise JobError(exc) finally: if res is not None: res.close()
def run(self, connection, args=None): connection = super(EnterFastbootAction, self).run(connection, args) lxc_name = self.get_common_data('lxc', 'name') fastboot_serial_number = self.job.device['fastboot_serial_number'] # Try to enter fastboot mode with adb. adb_serial_number = self.job.device['adb_serial_number'] adb_cmd = [ 'lxc-attach', '-n', lxc_name, '--', 'adb', '-s', adb_serial_number, 'devices' ] command_output = self.run_command(adb_cmd) if command_output and adb_serial_number in command_output: self.logger.debug("Device is in adb: %s", command_output) adb_cmd = [ 'lxc-attach', '-n', lxc_name, '--', 'adb', '-s', adb_serial_number, 'reboot-bootloader' ] command_output = self.run_command(adb_cmd) if command_output and 'error' in command_output: raise JobError( "Unable to enter fastboot: %s" % command_output) # FIXME: JobError needs a unit test return connection # Enter fastboot mode with fastboot. fastboot_cmd = [ 'lxc-attach', '-n', lxc_name, '--', 'fastboot', '-s', fastboot_serial_number, 'devices' ] command_output = self.run_command(fastboot_cmd) if command_output and fastboot_serial_number in command_output: self.logger.debug("Device is in fastboot: %s", command_output) fastboot_cmd = [ 'lxc-attach', '-n', lxc_name, '--', 'fastboot', '-s', fastboot_serial_number, 'reboot-bootloader' ] command_output = self.run_command(fastboot_cmd) if command_output and 'OKAY' not in command_output: raise JobError( "Unable to enter fastboot: %s" % command_output) # FIXME: JobError needs a unit test else: status = [ status.strip() for status in command_output.split('\n') if 'finished' in status ][0] self.results = {'status': status} return connection
def collate(self, reply, params): """ Retrieve values from reply to the call for this action possibly multiple key:value pairs. Arguments: reply - self.get_common_data(protocol.name, self.name) params - dict containing the message to match to the reply params will not be modified, the return value is a *tuple* where the first value is the identifier to be used by other actions wanting this data (typically the API call or messageID) and the second value is the collated data from the call to the protocol. """ retval = {} if reply == {} or not isinstance(reply, dict): msg = "Unable to identify replaceable values in the parameters: %s" % params self.logger.error(msg) raise JobError(msg) self.logger.debug({ "Retrieving replaceable values from": "%s" % json.dumps(reply), "params": "%s" % json.dumps(params) }) if 'message' in params and reply: replaceables = [ key for key, value in params['message'].items() if key != 'yaml_line' and value.startswith('$') ] for item in replaceables: if 'message' in reply: target_list = [ val for val in reply['message'].items() if self.parameters['target'] in val ] else: target_list = [val for val in list(reply.items())] data = target_list[0][1] if item not in data: self.logger.warning("Skipping %s - not found in %s", item, data) continue retval.setdefault(params['messageID'], {item: data[item]}) if 'messageID' in params: ret_key = params['messageID'] if ret_key in retval: ret_value = retval[ret_key] return ret_key, ret_value msg = "Unable to identify replaceable values in the parameters: %s" % params self.logger.error(msg) raise JobError(msg)
def run(self, connection, args=None): if connection: self.logger.debug("Already connected") return connection signal.alarm(0) # clear the timeouts used without connections. # ShellCommand executes the connection command params = self._check_params() command = self.command[:] # local copy for idempotency overrides = self.get_common_data("prepare-scp-overlay", self.key) host_address = None if overrides: host_address = str( self.get_common_data("prepare-scp-overlay", overrides[0])) if host_address: self.logger.info( "Using common data to retrieve host_address for secondary connection." ) command_str = " ".join(str(item) for item in command) self.logger.info("%s Connecting to device %s using '%s'", self.name, host_address, command_str) command.append("%s@%s" % (self.ssh_user, host_address)) elif self.host and self.primary: self.logger.info( "Using device data host_address for primary connection.") command_str = " ".join(str(item) for item in command) self.logger.info("%s Connecting to device %s using '%s'", self.name, self.host, command_str) command.append("%s@%s" % (self.ssh_user, self.host)) else: raise JobError("Unable to identify host address. Primary? %s", self.primary) command_str = " ".join(str(item) for item in command) shell = ShellCommand("%s\n" % command_str, self.timeout, logger=self.logger) if shell.exitstatus: raise JobError("%s command exited %d: %s" % (self.command, shell.exitstatus, shell.readlines())) # SshSession monitors the pexpect connection = SShSession(self.job, shell) connection = super(ConnectSsh, self).run(connection, args) connection.sendline('export PS1="%s"' % DEFAULT_SHELL_PROMPT) connection.prompt_str = [DEFAULT_SHELL_PROMPT] connection.connected = True self.wait(connection) self.data["boot-result"] = 'failed' if self.errors else 'success' return connection
def _run(self): """ Run the pipeline under the run() wrapper that will catch the exceptions """ # Set the signal handler signal.signal(signal.SIGINT, self.cancelling_handler) signal.signal(signal.SIGTERM, self.cancelling_handler) # Setup the protocols for protocol in self.protocols: try: protocol.set_up() except LAVAError: raise except Exception as exc: self.logger.error("Unable to setup the protocols") self.logger.exception(traceback.format_exc()) raise LAVABug(exc) if not protocol.valid: msg = "protocol %s has errors: %s" % (protocol.name, protocol.errors) self.logger.exception(msg) raise JobError(msg) # Run the pipeline and wait for exceptions with self.timeout() as max_end_time: self.pipeline.run_actions(self.connection, max_end_time)
def copy_overlay_to_sparse_fs(image, overlay): """copy_overlay_to_sparse_fs """ mnt_dir = mkdtemp() ext4_img = image + '.ext4' logger = logging.getLogger('dispatcher') subprocess.check_output(['/usr/bin/simg2img', image, ext4_img], stderr=subprocess.STDOUT) subprocess.check_output(['/bin/mount', '-o', 'loop', ext4_img, mnt_dir], stderr=subprocess.STDOUT) if os.path.exists(overlay[:-3]): os.unlink(overlay[:-3]) decompressed_overlay = decompress_file(overlay, 'gz') untar_file(decompressed_overlay, mnt_dir) # Check if we have space left on the mounted image. df = subprocess.Popen(['df', '-k', mnt_dir], stdout=subprocess.PIPE) output = df.communicate()[0] logger.debug(output) device, size, used, available, percent, mountpoint = output.split( "\n")[1].split() subprocess.check_output(['/bin/umount', mnt_dir], stderr=subprocess.STDOUT) if int(available) is 0 or percent is '100%': raise JobError("No space in image after applying overlay: %s" % image) subprocess.check_output(['/usr/bin/img2simg', ext4_img, image], stderr=subprocess.STDOUT) os.remove(ext4_img)
def run(self, connection, args=None): """ Clones the git repo into a directory name constructed from the mount_path, lava-$hostname prefix, tests, $index_$test_name elements. e.g. /tmp/tmp.234Ga213/lava-kvm01/tests/3_smoke-tests-basic Also updates some basic metadata about the test definition. """ # use the base class to populate the runner_path and overlay_path data into the context connection = super(GitRepoAction, self).run(connection, self.parameters) # NOTE: the runner_path dir must remain empty until after the VCS clone, so let the VCS clone create the final dir runner_path = self.data['test'][self.uuid]['overlay_path'][self.parameters['test_name']] if os.path.exists(runner_path) and os.listdir(runner_path) == []: raise RuntimeError("Directory already exists and is not empty - duplicate Action?") commit_id = self.vcs.clone(runner_path, self.parameters.get('revision', None)) if commit_id is None: raise RuntimeError("Unable to get test definition from %s (%s)" % (self.vcs.binary, self.parameters)) self.results = {'success': commit_id} # now read the YAML to create a testdef dict to retrieve metadata self.logger.debug(os.path.join(runner_path, self.parameters['path'])) yaml_file = os.path.join(runner_path, self.parameters['path']) if not os.path.exists(yaml_file): raise JobError("Unable to find test definition YAML: %s" % yaml_file) with open(yaml_file, 'r') as test_file: testdef = yaml.safe_load(test_file) # set testdef metadata in base class self.store_testdef(testdef, 'git', commit_id) return connection
def run(self, connection, args=None): """ Clone the bazar repository into a directory """ connection = super(BzrRepoAction, self).run(connection, self.parameters) # NOTE: the runner_path dir must remain empty until after the VCS clone, so let the VCS clone create the final dir runner_path = os.path.join(self.data['test-definition']['overlay_dir'], 'tests', self.parameters['test_name']) commit_id = self.vcs.clone(runner_path, self.parameters.get('revision', None)) if commit_id is None: raise RuntimeError("Unable to get test definition from %s (%s)" % (self.vcs.binary, self.parameters)) self.results = {'success': commit_id} # now read the YAML to create a testdef dict to retrieve metadata yaml_file = os.path.join(runner_path, self.parameters['path']) if not os.path.exists(yaml_file): raise JobError("Unable to find test definition YAML: %s" % yaml_file) with open(yaml_file, 'r') as test_file: self.testdef = yaml.safe_load(test_file) # set testdef metadata in base class self.store_testdef(self.testdef, 'bzr', commit_id) return connection
def run(self, connection, args=None): if not self.parameters.get('modules', None): # idempotency return connection connection = super(ExtractModules, self).run(connection, args) modules = self.data['download_action']['modules']['file'] if not self.parameters.get('ramdisk', None): if not self.parameters.get('nfsrootfs', None): raise JobError( "Unable to identify a location for the unpacked modules") # if both NFS and ramdisk are specified, apply modules to both # as the kernel may need some modules to raise the network and # will need other modules to support operations within the NFS if self.parameters.get('nfsrootfs', None): root = self.get_common_data('file', 'nfsroot') self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) if self.parameters.get('ramdisk', None): root = self.data['extract-overlay-ramdisk']['extracted_ramdisk'] self.logger.info("extracting modules file %s to %s", modules, root) untar_file(modules, root) try: os.unlink(modules) except OSError as exc: raise RuntimeError("Unable to remove tarball: '%s' - %s" % (modules, exc)) return connection
def run(self, connection, args=None): connection = super(TestOverlayAction, self).run(connection, args) runner_path = self.data['test'][self.test_uuid]['overlay_path'][self.parameters['test_name']] # now read the YAML to create a testdef dict to retrieve metadata yaml_file = os.path.join(runner_path, self.parameters['path']) # FIXME: check the existence at the same time as the open. if not os.path.exists(yaml_file): raise JobError("Unable to find test definition YAML: %s" % yaml_file) with open(yaml_file, 'r') as test_file: testdef = yaml.safe_load(test_file) # FIXME: change lava-test-runner to accept a variable instead of duplicating the YAML? with open("%s/testdef.yaml" % runner_path, 'w') as run_file: yaml.safe_dump(testdef, run_file) # write out the UUID of each test definition. # FIXME: is this necessary any longer? with open('%s/uuid' % runner_path, 'w') as uuid: uuid.write(self.test_uuid) # FIXME: does this match old-world test-shell & is it needed? with open('%s/testdef_metadata' % runner_path, 'w') as metadata: metadata.write(yaml.safe_dump(self.data['test'][self.test_uuid]['testdef_metadata'])) # Need actions for the run.sh script (calling parameter support in base class) # and install script (also calling parameter support here.) # this run then only does the incidental files. self.results = {'success': self.test_uuid} return connection
def run(self, connection, args=None): """ Extracts the provided encoded tar archive into tmpdir. """ connection = super(TarRepoAction, self).run(connection, args) runner_path = os.path.join(self.data['test-definition']['overlay_dir'], 'tests', self.parameters['test_name']) temp_tar = os.path.join(self.data['test-definition']['overlay_dir'], "tar-repo.tar") try: if not os.path.isdir(runner_path): self.logger.debug("Creating directory to extract the tar archive into.") os.makedirs(runner_path) encoded_in = io.StringIO(self.parameters['repository']) decoded_out = io.StringIO() base64.decode(encoded_in, decoded_out) # The following two operations can also be done in memory # using cStringIO. # At the moment the tar file sent is not big, but that can change. with open(temp_tar, "w") as write_tar: write_tar.write(decoded_out.getvalue()) with tarfile.open(temp_tar) as tar: tar.extractall(path=runner_path) except (OSError, tarfile.TarError) as ex: raise JobError("Error extracting the tar archive.\n" + str(ex)) finally: # Remove the temporary created tar file after it has been extracted. if os.path.isfile(temp_tar): os.unlink(temp_tar) return connection