def _freeze_support(): # we want to catch the two processes that are spawned by the # multiprocessing code: # - the semaphore tracker, which cleans up named semaphores in # the spawn multiprocessing mode # - the fork server, which keeps track of worker processes in # forkserver mode. # both of these processes are started by spawning a new copy of the # running executable, passing it the flags from # _args_from_interpreter_flags and then "-c" and an import statement. # look for those flags and the import statement, then exec() the # code ourselves. if len(sys.argv) >= 2 and \ set(sys.argv[1:-2]) == set(_args_from_interpreter_flags()) and \ sys.argv[-2] == '-c' and \ (sys.argv[-1].startswith('from multiprocessing.semaphore_tracker import main') or \ sys.argv[-1].startswith('from multiprocessing.forkserver import main')): exec(sys.argv[-1]) sys.exit() if spawn.is_forking(sys.argv): kwds = {} for arg in sys.argv[2:]: name, value = arg.split('=') if value == 'None': kwds[name] = None else: kwds[name] = int(value) spawn.spawn_main(**kwds) sys.exit()
def _use_sqlite_cli(self, env): """Pipes the test case into the "sqlite3" executable. The method _has_sqlite_cli MUST be called before this method is called. PARAMETERS: env -- mapping; represents shell environment variables. Primarily, this allows modifications to PATH to check the current directory first. RETURNS: (test, expected, result), where test -- str; test input that is piped into sqlite3 expected -- str; the expected output, for display purposes result -- str; the actual output from piping input into sqlite3 """ test = [] expected = [] for line in self._setup + self._code + self._teardown: if isinstance(line, interpreter.CodeAnswer): expected.extend(line.output) elif line.startswith(self.PS1): test.append(line[len(self.PS1):]) elif line.startswith(self.PS2): test.append(line[len(self.PS2):]) test = '\n'.join(test) result, error = (None, None) process = None args = ['sqlite3'] sqlite_shell = get_sqlite_shell() if sqlite_shell: if self.timeout is None: (stdin, stdout, stderr) = (io.StringIO(test), io.StringIO(), io.StringIO()) sqlite_shell.main(*args, stdin=stdin, stdout=stdout, stderr=stderr) result, error = (stdout.getvalue(), stderr.getvalue()) else: args[:] = [sys.executable] + subprocess._args_from_interpreter_flags() + ["--", sqlite_shell.__file__] + args[1:] if result is None: process = subprocess.Popen(args, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if process: try: result, error = process.communicate(test, timeout=self.timeout) except subprocess.TimeoutExpired as e: process.kill() print('# Error: evaluation exceeded {} seconds.'.format(self.timeout)) raise interpreter.ConsoleException(exceptions.Timeout(self.timeout)) return test, '\n'.join(expected), (error + '\n' + result).strip()
async def spawn(worker_spec, **kwargs): """ Spawn a new python based worker process that looks very close to the current one. For safety and compatibility reasons we do not use a fork-without-exec pattern; Instead the current python env is inspected and used as a reference for a new python execution. Arguments and context are serialized and passed to a bootloader function that turns them back into python types. """ pycmd = sys.executable pyflags = subprocess._args_from_interpreter_flags() wp = WorkerProcess(worker_spec, pycmd, pyflags, **kwargs) logger.debug("Spawning new worker: %s" % wp) await wp.start() logger.info("Spawned: %s" % wp) return wp
def _get_interpreter_argv(): """Retrieve current Python interpreter's arguments. Returns empty tuple in case of frozen mode, uses built-in arguments reproduction function otherwise. Frozen mode is possible for the app has been packaged into a binary executable using py2exe. In this case the interpreter's arguments are already built-in into that executable. :seealso: https://github.com/cherrypy/cherrypy/issues/1526 Ref: https://pythonhosted.org/PyInstaller/runtime-information.html """ return ([] if getattr(sys, 'frozen', False) else subprocess._args_from_interpreter_flags())
def get_stack_trace(self, source=None, script=None, breakpoint=BREAKPOINT_FN, cmds_after_breakpoint=None, import_site=False): """ Run 'python -c SOURCE' under gdb with a breakpoint. Support injecting commands after the breakpoint is reached Returns the stdout from gdb cmds_after_breakpoint: if provided, a list of strings: gdb commands """ commands = [ 'set breakpoint pending yes', 'break %s' % breakpoint, 'set print address off', 'run' ] if (gdb_major_version, gdb_minor_version) >= (7, 4): commands += ['set print entry-values no'] if cmds_after_breakpoint: commands += cmds_after_breakpoint else: commands += ['backtrace'] args = [('--eval-command=%s' % cmd) for cmd in commands] args += ['--args', sys.executable] args.extend(subprocess._args_from_interpreter_flags()) if not import_site: args += ['-S'] if source: args += ['-c', source] elif script: args += [script] out, err = run_gdb(*args, PYTHONHASHSEED=PYTHONHASHSEED) errlines = err.splitlines() unexpected_errlines = [] ignore_patterns = ( 'Function "%s" not defined.' % breakpoint, 'Do you need "set solib-search-path" or "set sysroot"?', 'BFD: ', 'warning: ') for line in errlines: if not line: continue if not line.startswith(ignore_patterns): unexpected_errlines.append(line) self.assertEqual(unexpected_errlines, []) return out
def restart() -> None: python_args = [] if not getattr(sys, "frozen", False): python_args = subprocess._args_from_interpreter_flags() args = python_args + sys.argv path_prefix = "." + os.pathsep python_path = os.environ.get("PYTHONPATH", "") if sys.path[0] == "" and not python_path.startswith(path_prefix): os.environ["PYTHONPATH"] = path_prefix + python_path try: close_fds() except Exception: log.exception("Failed to FD_CLOEXEC all file descriptors") kill_children(SIGTERM, ensure_death=True) os.chdir(initial_dir) os.execv(sys.executable, [sys.executable] + args) log.fatal("Failed to restart - exiting") os._exit(1)
def get_stack_trace(self, source=None, script=None, breakpoint=BREAKPOINT_FN, cmds_after_breakpoint=None, import_site=False): ''' Run 'python -c SOURCE' under gdb with a breakpoint. Support injecting commands after the breakpoint is reached Returns the stdout from gdb cmds_after_breakpoint: if provided, a list of strings: gdb commands ''' # We use "set breakpoint pending yes" to avoid blocking with a: # Function "foo" not defined. # Make breakpoint pending on future shared library load? (y or [n]) # error, which typically happens python is dynamically linked (the # breakpoints of interest are to be found in the shared library) # When this happens, we still get: # Function "textiowrapper_write" not defined. # emitted to stderr each time, alas. # Initially I had "--eval-command=continue" here, but removed it to # avoid repeated print breakpoints when traversing hierarchical data # structures # Generate a list of commands in gdb's language: commands = [ 'set breakpoint pending yes', 'break %s' % breakpoint, # The tests assume that the first frame of printed # backtrace will not contain program counter, # that is however not guaranteed by gdb # therefore we need to use 'set print address off' to # make sure the counter is not there. For example: # #0 in PyObject_Print ... # is assumed, but sometimes this can be e.g. # #0 0x00003fffb7dd1798 in PyObject_Print ... 'set print address off', 'run' ] # GDB as of 7.4 onwards can distinguish between the # value of a variable at entry vs current value: # http://sourceware.org/gdb/onlinedocs/gdb/Variables.html # which leads to the selftests failing with errors like this: # AssertionError: 'v@entry=()' != '()' # Disable this: if (gdb_major_version, gdb_minor_version) >= (7, 4): commands += ['set print entry-values no'] if cmds_after_breakpoint: commands += cmds_after_breakpoint else: commands += ['backtrace'] # print commands # Use "commands" to generate the arguments with which to invoke "gdb": args = ['--eval-command=%s' % cmd for cmd in commands] args += ["--args", sys.executable] args.extend(subprocess._args_from_interpreter_flags()) if not import_site: # -S suppresses the default 'import site' args += ["-S"] if source: args += ["-c", source] elif script: args += [script] # print args # print (' '.join(args)) # Use "args" to invoke gdb, capturing stdout, stderr: out, err = run_gdb(*args, PYTHONHASHSEED=PYTHONHASHSEED) errlines = err.splitlines() unexpected_errlines = [] # Ignore some benign messages on stderr. ignore_patterns = ( 'Function "%s" not defined.' % breakpoint, 'Do you need "set solib-search-path" or ' '"set sysroot"?', # BFD: /usr/lib/debug/(...): unable to initialize decompress # status for section .debug_aranges 'BFD: ', # ignore all warnings 'warning: ', ) for line in errlines: if not line: continue if not line.startswith(ignore_patterns): unexpected_errlines.append(line) # Ensure no unexpected error messages: self.assertEqual(unexpected_errlines, []) return out
def get_stack_trace(self, source=None, script=None, breakpoint=BREAKPOINT_FN, cmds_after_breakpoint=None, import_site=False): ''' Run 'python -c SOURCE' under gdb with a breakpoint. Support injecting commands after the breakpoint is reached Returns the stdout from gdb cmds_after_breakpoint: if provided, a list of strings: gdb commands ''' # We use "set breakpoint pending yes" to avoid blocking with a: # Function "foo" not defined. # Make breakpoint pending on future shared library load? (y or [n]) # error, which typically happens python is dynamically linked (the # breakpoints of interest are to be found in the shared library) # When this happens, we still get: # Function "textiowrapper_write" not defined. # emitted to stderr each time, alas. # Initially I had "--eval-command=continue" here, but removed it to # avoid repeated print breakpoints when traversing hierarchical data # structures # Generate a list of commands in gdb's language: commands = ['set breakpoint pending yes', 'break %s' % breakpoint, # The tests assume that the first frame of printed # backtrace will not contain program counter, # that is however not guaranteed by gdb # therefore we need to use 'set print address off' to # make sure the counter is not there. For example: # #0 in PyObject_Print ... # is assumed, but sometimes this can be e.g. # #0 0x00003fffb7dd1798 in PyObject_Print ... 'set print address off', 'run'] # GDB as of 7.4 onwards can distinguish between the # value of a variable at entry vs current value: # http://sourceware.org/gdb/onlinedocs/gdb/Variables.html # which leads to the selftests failing with errors like this: # AssertionError: 'v@entry=()' != '()' # Disable this: if (gdb_major_version, gdb_minor_version) >= (7, 4): commands += ['set print entry-values no'] if cmds_after_breakpoint: if CET_PROTECTION: # bpo-32962: When Python is compiled with -mcet # -fcf-protection, function arguments are unusable before # running the first instruction of the function entry point. # The 'next' command makes the required first step. commands += ['next'] commands += cmds_after_breakpoint else: commands += ['backtrace'] # print commands # Use "commands" to generate the arguments with which to invoke "gdb": args = ['--eval-command=%s' % cmd for cmd in commands] args += ["--args", sys.executable] args.extend(subprocess._args_from_interpreter_flags()) if not import_site: # -S suppresses the default 'import site' args += ["-S"] if source: args += ["-c", source] elif script: args += [script] # Use "args" to invoke gdb, capturing stdout, stderr: out, err = run_gdb(*args, PYTHONHASHSEED=PYTHONHASHSEED) for line in err.splitlines(): print(line, file=sys.stderr) # bpo-34007: Sometimes some versions of the shared libraries that # are part of the traceback are compiled in optimised mode and the # Program Counter (PC) is not present, not allowing gdb to walk the # frames back. When this happens, the Python bindings of gdb raise # an exception, making the test impossible to succeed. if "PC not saved" in err: raise unittest.SkipTest("gdb cannot walk the frame object" " because the Program Counter is" " not present") # bpo-40019: Skip the test if gdb failed to read debug information # because the Python binary is optimized. for pattern in ( '(frame information optimized out)', 'Unable to read information on python frame', ): if pattern in out: raise unittest.SkipTest(f"{pattern!r} found in gdb output") return out
def args_from_interpreter_flags(): """Return a list of command-line arguments reproducing the current settings in sys.flags.""" import subprocess return subprocess._args_from_interpreter_flags()
def get_stack_trace(self, source=None, script=None, breakpoint=BREAKPOINT_FN, cmds_after_breakpoint=None, import_site=False): ''' Run 'python -c SOURCE' under gdb with a breakpoint. Support injecting commands after the breakpoint is reached Returns the stdout from gdb cmds_after_breakpoint: if provided, a list of strings: gdb commands ''' # We use "set breakpoint pending yes" to avoid blocking with a: # Function "foo" not defined. # Make breakpoint pending on future shared library load? (y or [n]) # error, which typically happens python is dynamically linked (the # breakpoints of interest are to be found in the shared library) # When this happens, we still get: # Function "textiowrapper_write" not defined. # emitted to stderr each time, alas. # Initially I had "--eval-command=continue" here, but removed it to # avoid repeated print breakpoints when traversing hierarchical data # structures # Generate a list of commands in gdb's language: commands = ['set breakpoint pending yes', 'break %s' % breakpoint, # The tests assume that the first frame of printed # backtrace will not contain program counter, # that is however not guaranteed by gdb # therefore we need to use 'set print address off' to # make sure the counter is not there. For example: # #0 in PyObject_Print ... # is assumed, but sometimes this can be e.g. # #0 0x00003fffb7dd1798 in PyObject_Print ... 'set print address off', 'run'] # GDB as of 7.4 onwards can distinguish between the # value of a variable at entry vs current value: # http://sourceware.org/gdb/onlinedocs/gdb/Variables.html # which leads to the selftests failing with errors like this: # AssertionError: 'v@entry=()' != '()' # Disable this: if (gdb_major_version, gdb_minor_version) >= (7, 4): commands += ['set print entry-values no'] if cmds_after_breakpoint: commands += cmds_after_breakpoint else: commands += ['backtrace'] # print commands # Use "commands" to generate the arguments with which to invoke "gdb": args = ['--eval-command=%s' % cmd for cmd in commands] args += ["--args", sys.executable] args.extend(subprocess._args_from_interpreter_flags()) if not import_site: # -S suppresses the default 'import site' args += ["-S"] if source: args += ["-c", source] elif script: args += [script] # print args # print (' '.join(args)) # Use "args" to invoke gdb, capturing stdout, stderr: out, err = run_gdb(*args, PYTHONHASHSEED=PYTHONHASHSEED) errlines = err.splitlines() unexpected_errlines = [] # Ignore some benign messages on stderr. ignore_patterns = ( 'Function "%s" not defined.' % breakpoint, 'Do you need "set solib-search-path" or ' '"set sysroot"?', # BFD: /usr/lib/debug/(...): unable to initialize decompress # status for section .debug_aranges 'BFD: ', # ignore all warnings 'warning: ', ) for line in errlines: if not line: continue if not line.startswith(ignore_patterns): unexpected_errlines.append(line) # Ensure no unexpected error messages: self.assertEqual(unexpected_errlines, []) return out
def python_cmd(): cmd = [sys.executable] cmd.extend(subprocess._args_from_interpreter_flags()) cmd.extend(subprocess._optim_args_from_interpreter_flags()) return cmd
def update_event(self, inp=-1): self.set_output_val(0, subprocess._args_from_interpreter_flags())
def _is_executing_inline_python_code(): return \ len(sys.argv) >= 2 and \ set(sys.argv[1:-2]) == set(_args_from_interpreter_flags()) and sys.argv[-2] == '-c'