def _run_ip_monitor(self, module): executable = self._normalize_module_name(module.__file__) proc = async_process.AsyncProcess( [executable, self.temp_file, str(self.namespace)], run_as_root=True) proc.start(block=True) return proc
def _start_async_dhclient(self): cmd = ["dhclient", '-sf', self.NO_RESOLV_CONF_DHCLIENT_SCRIPT_PATH, '--no-pid', '-d', self.port.name] self.dhclient_async = async_process.AsyncProcess( cmd, run_as_root=True, respawn_interval=5, namespace=self.namespace) self.dhclient_async.start()
def start(self): test_name = base.sanitize_log_path(self.test_name) log_dir = os.path.join(neutron_base.DEFAULT_LOG_DIR, test_name) fileutils.ensure_tree(log_dir, mode=0o755) timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S-%f") log_file = "%s/%s--%s.log" % (log_dir, self.process_name, timestamp) gobgpd_exec = spawn.find_executable(self.exec_name) if not gobgpd_exec: raise Exception("can't find gobgpd executable in PATH (%s, %s)" % (self.exec_name, os.environ['PATH'])) cmd = [ gobgpd_exec, '-t', 'json', '-f', self.config_filenames[0], '--log-level=debug', # we don't need this management API: '--api-hosts=0.0.0.0:%s' % random.randint(20000, 30000) ] if self.GOBGPD_LOG: cmd = ['sh', '-c', ('%s > %s 2>&1') % (' '.join(cmd), log_file)] self.process = async_process.AsyncProcess(cmd, namespace=self.namespace) self.process.start()
def setUp(self): super(TestFailingAsyncProcess, self).setUp() path = self.get_temp_file_path('async.tmp', self.get_new_temp_dir()) self.process = async_process.AsyncProcess(['python', failing_process.__file__, path], respawn_interval=0)
def wait_for_dscp_marked_packet(sender_vm, receiver_vm, dscp_mark): cmd = [ "tcpdump", "-i", receiver_vm.port.name, "-nlt", "src", sender_vm.ip, 'and', 'dst', receiver_vm.ip] if dscp_mark: cmd += ["and", "(ip[1] & 0xfc == %s)" % (dscp_mark << 2)] tcpdump_async = async_process.AsyncProcess(cmd, run_as_root=True, namespace=receiver_vm.namespace) tcpdump_async.start(block=True) sender_vm.block_until_ping(receiver_vm.ip) try: tcpdump_async.stop(kill_signal=signal.SIGINT) except async_process.AsyncProcessException: # If it was already stopped than we don't care about it pass tcpdump_stderr_lines = [] pattern = r"(?P<packets_count>^\d+) packets received by filter" for line in tcpdump_async.iter_stderr(): m = re.match(pattern, line) if m and int(m.group("packets_count")) != 0: return tcpdump_stderr_lines.append(line) tcpdump_stdout_lines = [line for line in tcpdump_async.iter_stdout()] LOG.debug("Captured output lines from tcpdump. Stdout: %s; Stderr: %s", tcpdump_stdout_lines, tcpdump_stderr_lines) raise TcpdumpException( "No packets marked with DSCP = %(dscp_mark)s received from %(src)s " "to %(dst)s" % {'dscp_mark': dscp_mark, 'src': sender_vm.ip, 'dst': receiver_vm.ip})
def test_get_cmdline_from_pid_and_pid_invoked_with_cmdline(self): cmd = ['tail', '-f', self.test_file_path] proc = async_process.AsyncProcess(cmd) proc.start(block=True) self.addCleanup(proc.stop) pid = proc.pid self.assertEqual(cmd, utils.get_cmdline_from_pid(pid)) self.assertTrue(utils.pid_invoked_with_cmdline(pid, cmd)) self.assertEqual([], utils.get_cmdline_from_pid(-1))
def test_stopping_async_process_lifecycle(self): proc = async_process.AsyncProcess(['tail', '-f', self.test_file_path]) self.addCleanup(self._safe_stop, proc) proc.start(block=True) self._check_stdout(proc) proc.stop(block=True) # Ensure that the process and greenthreads have stopped proc._process.wait() self.assertEqual(proc._process.returncode, -9) for watcher in proc._watchers: watcher.wait()
def _test_process(self, run_as_root): test_pid = str(os.getppid()) cmd = ['bash', '-c', '(sleep 10)'] proc = async_process.AsyncProcess(cmd, run_as_root=run_as_root) proc.start() self.addCleanup(self._stop_process, proc) common_utils.wait_until_true(lambda: proc._process.pid, sleep=0.5, timeout=10) bash_pid = utils.find_parent_pid(proc._process.pid) testcase_pid = utils.find_parent_pid(bash_pid) self.assertEqual(test_pid, testcase_pid)
def _start_async_dhclient(self, port_id, version=constants.IP_VERSION_4): cmd = [ "dhclient", '-%s' % version, '-lf', '%s/%s.lease' % (self.host.neutron_config.temp_dir, port_id), '-sf', self.NO_RESOLV_CONF_DHCLIENT_SCRIPT_PATH, '--no-pid', '-d', self.port.name ] self.dhclient_async = async_process.AsyncProcess( cmd, run_as_root=True, respawn_interval=5, namespace=self.namespace) self.dhclient_async.start()
def test_async_process_respawns(self): proc = async_process.AsyncProcess(['tail', '-f', self.test_file_path], respawn_interval=0) self.addCleanup(self._safe_stop, proc) proc.start() # Ensure that the same output is read twice self._check_stdout(proc) pid = proc.pid utils.execute(['kill', '-9', pid]) common_utils.wait_until_true( lambda: proc.is_active() and pid != proc.pid, timeout=5, sleep=0.01, exception=RuntimeError(_("Async process didn't respawn"))) self._check_stdout(proc)
def test_root_process(self): cmd = ['sleep', '100'] processes = [] for _ in range(20): process = async_process.AsyncProcess(cmd) process.start() processes.append(process) for process in processes: common_utils.wait_until_true(lambda: process._process.pid, sleep=0.5, timeout=5) self.addCleanup(self._stop_processes, processes) number_of_sleep = utils.get_process_count_by_name('sleep') # NOTE(ralonsoh): other tests can spawn sleep processes too, but at # this point we know there are, at least, 20 "sleep" processes running. self.assertLessEqual(20, number_of_sleep)
def test_get_root_helper_child_pid_returns_first_child(self): """Test that the first child, not lowest child pid is returned. Test creates following process tree: sudo + | +--rootwrap + | +--bash+ | +--sleep 100 and tests that pid of `bash' command is returned. """ def wait_for_sleep_is_spawned(parent_pid): proc_tree = utils.execute(['pstree', parent_pid], check_exit_code=False) processes = [ command.strip() for command in proc_tree.split('---') if command ] if processes: return 'sleep' == processes[-1] cmd = ['bash', '-c', '(sleep 100)'] proc = async_process.AsyncProcess(cmd, run_as_root=True) proc.start() # root helpers spawn their child processes asynchronously, and we # don't want to use proc.start(block=True) as that uses # get_root_helper_child_pid (The method under test) internally. sudo_pid = proc._process.pid common_utils.wait_until_true(functools.partial( wait_for_sleep_is_spawned, sudo_pid), sleep=0.1) child_pid = utils.get_root_helper_child_pid(sudo_pid, cmd, run_as_root=True) self.assertIsNotNone( child_pid, "get_root_helper_child_pid is expected to return the pid of the " "bash process") self._addcleanup_sleep_process(child_pid) with open('/proc/%s/cmdline' % child_pid, 'r') as f_proc_cmdline: cmdline = f_proc_cmdline.readline().split('\0')[0] self.assertIn('bash', cmdline)
def start(self): test_name = base.sanitize_log_path(self.test_name) log_dir = os.path.join(fullstack_base.DEFAULT_LOG_DIR, test_name) common_utils.ensure_dir(log_dir) timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S-%f") log_file = "%s--%s.log" % (self.process_name, timestamp) cmd = [spawn.find_executable(self.exec_name), '--log-dir', log_dir, '--log-file', log_file] for filename in self.config_filenames: cmd += ['--config-file', filename] run_as_root = bool(self.namespace) self.process = async_process.AsyncProcess( cmd, run_as_root=run_as_root, namespace=self.namespace ) self.process.start(block=True)
def start(self): test_name = base.sanitize_log_path(self.test_name) log_dir = os.path.join(fullstack_base.DEFAULT_LOG_DIR, test_name) fileutils.ensure_tree(log_dir, mode=0o755) timestamp = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S-%f") log_file = "%s--%s.log" % (self.process_name, timestamp) run_as_root = bool(self.namespace) exec_name = (self.exec_name if run_as_root else spawn.find_executable(self.exec_name)) cmd = [exec_name, '--log-dir', log_dir, '--log-file', log_file] for filename in self.config_filenames: cmd += ['--config-file', filename] self.process = async_process.AsyncProcess(cmd, run_as_root=run_as_root, namespace=self.namespace) self.process.start(block=True) LOG.debug("Process started: %s", self.process_name)
def test_find_child_pids(self): pid = os.getppid() child_pids = utils.find_child_pids(pid) child_pids_recursive = utils.find_child_pids(pid, recursive=True) for _pid in child_pids: self.assertIn(_pid, child_pids_recursive) cmd = ['sleep', '100'] process = async_process.AsyncProcess(cmd) process.start() common_utils.wait_until_true(lambda: process._process.pid, sleep=0.5, timeout=10) self.addCleanup(self._stop_process, process) child_pids_after = utils.find_child_pids(pid) child_pids_recursive_after = utils.find_child_pids(pid, recursive=True) self.assertEqual(child_pids, child_pids_after) for _pid in child_pids + [process.pid]: self.assertIn(_pid, child_pids_recursive_after)
def test_construtor_raises_exception_for_negative_respawn_interval(self): with testtools.ExpectedException(ValueError): async_process.AsyncProcess(['fake'], respawn_interval=-1)
def test__read_stderr_returns_none_on_error(self): proc = async_process.AsyncProcess(['fakecmd'], die_on_error=True) with mock.patch.object(proc, '_read', return_value='fakedata'),\ mock.patch.object(proc, '_process'): self.assertIsNone(proc._read_stderr())
def setUp(self): super(TestAsyncProcess, self).setUp() self.proc = async_process.AsyncProcess(['fake'])
def _test__read_stderr_logging(self, enable): proc = async_process.AsyncProcess(['fake'], log_output=enable) with mock.patch.object(proc, '_read', return_value='fakedata'),\ mock.patch.object(proc, '_process'): proc._read_stderr() self.assertEqual(enable, self.log_mock.error.called)
def test_cmd(self): for expected, cmd in (('ls -l file', ['ls', '-l', 'file']), ('fake', ['fake'])): proc = async_process.AsyncProcess(cmd) self.assertEqual(expected, proc.cmd)
def start(self): cmd = [spawn.find_executable(self.exec_name)] self.process = async_process.AsyncProcess(cmd, run_as_root=True, namespace=self.namespace) self.process.start()