def execute(self, server): super(PythonTest, self).execute(server) # Track the same process metrics as part of another test. sampler.register_process(server.process.pid, self.id, server.name) new_globals = dict(locals(), test_run_current_test=self, **server.__dict__) with open(self.name) as f: code = compile(f.read(), self.name, 'exec') exec(code, new_globals) # crash was detected (possibly on non-default server) if server.current_test.is_crash_reported: raise TestExecutionError
def run_server(execs, cwd, server, logfile, retval, test_id): os.putenv("LISTEN", server.iproto) server.process = Popen(execs, stdout=PIPE, stderr=PIPE, cwd=cwd) sampler.register_process(server.process.pid, test_id, server.name) test_timeout = Options().args.test_timeout timer = Timer(test_timeout, timeout_handler, (server.process, test_timeout)) timer.start() stdout, stderr = server.process.communicate() timer.cancel() sys.stdout.write_bytes(stdout) with open(logfile, 'ab') as f: f.write(stderr) retval['returncode'] = server.process.wait() server.process = None
def execute(self, server): server.current_test = self execs = server.prepare_args() proc = Popen(execs, cwd=server.vardir, stdout=PIPE, stderr=STDOUT) sampler.register_process(proc.pid, self.id, server.name) sys.stdout.write_bytes(proc.communicate()[0])
def start(self, silent=True, wait=True, wait_load=True, rais=True, args=[], **kwargs): if self._start_against_running: return if self.status == 'started': if not silent: color_stdout('The server is already started.\n', schema='lerror') return args = self.prepare_args(args) self.pidfile = '%s.pid' % self.name self.logfile = '%s.log' % self.name path = self.script_dst if self.script else \ os.path.basename(self.binary) color_log('DEBUG: [Instance {}] Starting the server...\n'.format( self.name), schema='info') color_log(' | ' + path + '\n', schema='path') color_log(prefix_each_line(' | ', self.version()) + '\n', schema='version') os.putenv("LISTEN", self.iproto.uri) os.putenv("ADMIN", self.admin.uri) if self.rpl_master: os.putenv("MASTER", self.rpl_master.iproto.uri) self.logfile_pos = self.logfile # This is strange, but tarantooctl leans on the PWD # environment variable, not a real current working # directory, when it performs search for the # .tarantoolctl configuration file. os.environ['PWD'] = self.vardir # redirect stdout from tarantoolctl and tarantool os.putenv("TEST_WORKDIR", self.vardir) self.process = subprocess.Popen(args, cwd=self.vardir, stdout=self.log_des, stderr=self.log_des) del(self.log_des) # Restore the actual PWD value. os.environ['PWD'] = os.getcwd() # Track non-default server metrics as part of current # test. if self.current_test: sampler.register_process(self.process.pid, self.current_test.id, self.name) # gh-19 crash detection self.crash_detector = TestRunGreenlet(self.crash_detect) self.crash_detector.info = "Crash detector: %s" % self.process self.crash_detector.start() if wait: try: self.wait_until_started(wait_load) except TarantoolStartError: # Python tests expect we raise an exception when non-default # server fails if self.crash_expected: raise if not (self.current_test and self.current_test.is_crash_reported): if self.current_test: self.current_test.is_crash_reported = True color_stdout('\n[Instance "{0.name}"] Tarantool server ' 'failed to start\n'.format(self), schema='error') self.print_log(15) # Raise exception when caller ask for it (e.g. in case of # non-default servers) if rais: raise # if the server fails before any test started, we should inform # a caller by the exception if not self.current_test: raise self.kill_current_test() port = self.admin.port self.admin.disconnect() self.admin = CON_SWITCH[self.tests_type]('localhost', port) self.status = 'started' # Verify that the schema actually was not upgraded. if self.disable_schema_upgrade: expected_version = extract_schema_from_snapshot(self.snapshot_path) actual_version = tuple(yaml.safe_load(self.admin.execute( 'box.space._schema:get{"version"}'))[0][1:]) if expected_version != actual_version: color_stdout('Schema version check fails: expected ' '{}, got {}\n'.format(expected_version, actual_version), schema='error') raise TarantoolStartError(self.name)
def execute(self, server): super(LuaTest, self).execute(server) # Track the same process metrics as part of another test. sampler.register_process(server.process.pid, self.id, server.name) cls_name = server.__class__.__name__.lower() if 'gdb' in cls_name or 'lldb' in cls_name or 'strace' in cls_name: # don't propagate gdb/lldb/strace mixin to non-default servers, # it doesn't work properly for now # TODO: strace isn't interactive, so it's easy to make it works for # non-default server create_server = TarantoolServer else: # propagate valgrind mixin to non-default servers create_server = server.__class__ ts = TestState( self.suite_ini, server, create_server, self.run_params ) self.inspector.set_parser(ts) lua = TestRunGreenlet(self.exec_loop, ts) self.current_test_greenlet = lua lua.start() try: save_join(lua, timeout=Options().args.test_timeout) except KeyboardInterrupt: # prevent tests greenlet from writing to the real stdout lua.kill() raise except TarantoolStartError as e: color_stdout('\n[Instance "{0}"] Failed to start tarantool ' 'instance "{1}"\n'.format(server.name, e.name), schema='error') server.kill_current_test() finally: # Stop any servers created by the test, except the # default one. # # The stop_nondefault() method calls # TarantoolServer.stop() under the hood. It sends # SIGTERM (if another signal is not passed), waits # for 5 seconds for a process termination and, if # nothing occurs, sends SIGKILL and continue waiting # for the termination. # # Look, 5 seconds (plus some delay for waiting) for # each instance if it does not follow SIGTERM[^1]. # It is unacceptable, because the difference between # --test-timeout (110 seconds by default) and # --no-output-timeout (120 seconds by default) may # be lower than (5 seconds + delay) * (non-default # instance count). # # That's why we send SIGKILL for residual instances # right away. # # Hitting --no-output-timeout is undesirable, because # in the current implementation it is the show-stopper # for a testing: test-run doesn't restart fragile # tests, doesn't continue processing of other tests, # doesn't save artifacts at the end of the testing. # # [^1]: See gh-4127 and gh-5573 for problems of this # kind. ts.stop_nondefault(signal=signal.SIGKILL)