def wait(self): if self.finished: # Avoid calling communicate() on a dead process because it'll # give you stick about std* already being closed if self.check_status and self.exitstatus != 0: raise CommandFailedError(self.args, self.exitstatus) else: return out, err = self.subproc.communicate() if isinstance(self.stdout, StringIO): self.stdout.write(out.decode(errors='ignore')) else: self.stdout.write(out) if isinstance(self.stderr, StringIO): self.stderr.write(err.decode(errors='ignore')) else: self.stderr.write(err) self.exitstatus = self.returncode = self.subproc.returncode if self.exitstatus != 0: sys.stderr.write(six.ensure_str(out)) sys.stderr.write(six.ensure_str(err)) if self.check_status and self.exitstatus != 0: raise CommandFailedError(self.args, self.exitstatus)
def wait(self): if self.finished: # Avoid calling communicate() on a dead process because it'll # give you stick about std* already being closed if self.exitstatus != 0: raise CommandFailedError(self.args, self.exitstatus) else: return out, err = self.subproc.communicate() self.stdout.write(out) self.stderr.write(err) self.exitstatus = self.returncode = self.subproc.returncode if self.exitstatus != 0: sys.stderr.write(out) sys.stderr.write(err) if self.check_status and self.exitstatus != 0: raise CommandFailedError(self.args, self.exitstatus)
def run_tests(self): self.log.info("Running tests...") command = "tox -e integration %s" % self.inventory out, status = pexpect.run( command, cwd=self.repo_path, logfile=LoggerFile(self.log.getChild('tests'), logging.INFO), withexitstatus=True, timeout=None, ) if status != 0: raise CommandFailedError(command, status)
def test_wait_for_ready_sentinel(self, sentinel_present): config.fog['sentinel_file'] = '/a_file' obj = self.klass('name.fqdn', 'type', '1.0') if not sentinel_present: self.mocks['m_Remote_run'].side_effect = [ CommandFailedError(command='cmd', exitstatus=1) ] with raises(CommandFailedError): obj._wait_for_ready() else: obj._wait_for_ready() assert len(self.mocks['m_Remote_run'].call_args_list) == 1 assert "'/a_file'" in \ self.mocks['m_Remote_run'].call_args_list[0][1]['args']
def _handle_failure(self, command, status): failures = None with open(self.failure_log.name, 'r') as fail_log: try: failures = yaml.safe_load(fail_log) except yaml.parser.ParserError: log.error("Failed to parse ansible failure log: {0}".format( self.failure_log.name, )) failures = fail_log if failures: self._archive_failures() raise AnsibleFailedError(failures) raise CommandFailedError(command, status)
def _handle_failure(self, command, status): self._set_status('dead') failures = None with open(self.failure_log.name, 'r') as fail_log: try: failures = yaml.safe_load(fail_log) except yaml.YAMLError as e: log.error( "Failed to parse ansible failure log: {0} ({1})".format( self.failure_log.name, e)) failures = fail_log.read().replace('\n', '') if failures: self._archive_failures() raise AnsibleFailedError(failures) raise CommandFailedError(command, status)
def check_status(self): """ Check to see if the process has exited. :returns: The exit status, if any :raises: CommandFailedError, if the process was run with check_status=True """ proc = self.remote.run( args=self.show_cmd + ' | grep -i state', stdout=StringIO(), ) def parse_line(line): key, value = line.strip().split('=', 1) return {key.strip(): value.strip()} show_dict = dict() for line in proc.stdout.readlines(): show_dict.update(parse_line(line)) active_state = show_dict['ActiveState'] sub_state = show_dict['SubState'] if active_state == 'active': return None self.log.info("State is: %s/%s", active_state, sub_state) proc = self.remote.run( # This will match a line like: # Main PID: 13394 (code=exited, status=1/FAILURE) # Or (this is wrapped): # Apr 26 21:29:33 ovh083 systemd[1]: [email protected]: # Main process exited, code=exited, status=1/FAILURE args=self.status_cmd + " | grep 'Main.*code=exited'", stdout=StringIO(), ) line = proc.stdout.readlines()[-1] exit_code = int(re.match('.*status=(\d+).*', line).groups()[0]) if exit_code: self.remote.run( args=self.output_cmd ) raise CommandFailedError( self.start_cmd, exit_code, self.remote, ) return exit_code
def _execute_ceph_commands(self): mon_node = self.ceph_first_mon cmds = self.config.get('exec', list()) for cmd in cmds: if 'sudo' not in cmd: cmd = "sudo {}".format(cmd) log.info('Executing CEPH command : {}'.format(cmd)) retries = 3 while retries: try: out = mon_node.sh(cmd) log.info(out) break except CommandFailedError as err: if not retries: raise CommandFailedError(err) retries -= 1 time.sleep(5)
def _raise_for_status(self): if self.returncode is None: self._get_exitstatus() if self.check_status: if self.returncode in (None, -1): # command either died due to a signal, or the connection # was lost transport = self.client.get_transport() if transport is None or not transport.is_active(): # look like we lost the connection raise ConnectionLostError(command=self.command, node=self.hostname) # connection seems healthy still, assuming it was a # signal; sadly SSH does not tell us which signal raise CommandCrashedError(command=self.command) if self.returncode != 0: raise CommandFailedError(command=self.command, exitstatus=self.returncode, node=self.hostname, label=self.label)