def code_to_run(): import debuggee import sys import subprocess import os debuggee.setup() if sys.platform == "win32": args = ["dir", "-c", "."] else: args = ["ls", "-c", "-la"] p = subprocess.Popen( args, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, cwd=os.path.dirname(os.path.abspath(__file__)), ) stdout, _stderr = p.communicate() if sys.version_info[0] >= 3: stdout = stdout.decode("utf-8") if "code_to_run.py" not in stdout: raise AssertionError( 'Did not find "code_to_run.py" when listing this dir with subprocess. Contents: %s' % (stdout, ))
def code_to_debug(): import debuggee from debuggee import backchannel debuggee.setup() a = 1 backchannel.send(a) # @bp
def script1(): import debuggee debuggee.setup() def do_something(): print("do something") # @bp
def script2(): import debuggee import script1 debuggee.setup() script1.do_something() # @bp print("Done")
def code_to_debug(): import debuggee import debugpy import sys import time from debuggee import backchannel, scratchpad debuggee.setup() _, host, port, wait_for_client, is_client_connected, stop_method = sys.argv port = int(port) debugpy.listen(address=(host, port)) if wait_for_client: backchannel.send("wait_for_client") debugpy.wait_for_client() if is_client_connected: backchannel.send("is_client_connected") while not debugpy.is_client_connected(): print("looping until is_client_connected()") time.sleep(0.1) if stop_method == "breakpoint": backchannel.send("breakpoint?") assert backchannel.receive() == "proceed" debugpy.breakpoint() print("break") # @breakpoint else: scratchpad["paused"] = False backchannel.send("loop?") assert backchannel.receive() == "proceed" while not scratchpad["paused"]: print("looping until paused") time.sleep(0.1)
def code_to_debug(): import debuggee debuggee.setup() import this # @bp assert this
def code_to_debug(): import sys import debuggee from debuggee import backchannel debuggee.setup() backchannel.send(sys.executable)
def code_to_debug(): import debuggee debuggee.setup() for i in range(0, 10): print(i) # @bp () # @wait_for_output
def parent(q, a): from debuggee import backchannel debuggee.setup() print("spawning child") p = multiprocessing.Process(target=child, args=(q, a)) p.start() print("child spawned") q.put("foo?") foo = a.get() assert isinstance(foo, Foo), repr(foo) q.put("child_pid?") what, child_pid = a.get() assert what == "child_pid" backchannel.send(child_pid) q.put("grandchild_pid?") what, grandchild_pid = a.get() assert what == "grandchild_pid" backchannel.send(grandchild_pid) assert backchannel.receive() == "continue" q.put("exit!") p.join()
def code_to_debug(): import debuggee import debugpy debuggee.setup() debugpy.breakpoint() print()
def code_to_debug(): import debuggee debuggee.setup() a = "\t".join(("Hello", "World")) print(a) () # @wait_for_output
def code_to_debug(): import debuggee debuggee.setup() # fmt: off b = True while b: # @bp1-expected pass # @bp1-requested break print() # @bp2-expected [ # @bp2-requested 1, 2, 3, # @bp3-expected ] # @bp3-requested # Python 2.7 only. print() # @bp4-expected print( 1, # @bp4-requested-1 2, 3, # @bp4-requested-2 4, 5, 6)
def code_to_debug(): import debuggee debuggee.setup() a = 1 b = {"one": 1, 2: "two"} print(a, b) # @bp
def code_to_debug(): import debuggee from debuggee import backchannel debuggee.setup() backchannel.receive() # @ bp1 print("ok") # @ bp2
def code_to_debug(): import debuggee import threading import time import sys debuggee.setup() stop = False def worker(tid, offset): i = 0 global stop while not stop: time.sleep(0.01) i += 1 threads = [] if sys.argv[1] != "1": for i in [111, 222]: thread = threading.Thread(target=worker, args=(i, len(threads))) threads.append(thread) thread.start() print("check here") # @bp stop = True
def code_to_debug(): import debuggee import debugpy debuggee.setup() def func(expected_tracing): print(1) # @inner1 # Test nested change/restore. Going from False to True only works entirely # correctly on Python 3.6+; on earlier versions, if tracing wasn't enabled # when the function is entered, re-enabling it later will not cause the # breakpoints in this function to light up. However, it will allow hitting # breakpoints in functions called from here. def inner2(): print(2) # @inner2 debugpy.trace_this_thread(not expected_tracing) inner2() debugpy.trace_this_thread(expected_tracing) print(3) # @inner3 debugpy.trace_this_thread(False) print(0) # @outer1 func(False) debugpy.trace_this_thread(True) print(0) # @outer2 func(True)
def code_to_debug(): import sys import debuggee from debuggee import backchannel debuggee.setup() backchannel.send(sys.stdin == sys.__stdin__)
def code_to_debug(): import os import debuggee from debuggee import backchannel debuggee.setup() backchannel.send(dict(os.environ))
def code_to_debug(): import debuggee debuggee.setup() a = 1 b = {"one": 1, 2: "two"} c = 3 print([a, b, c]) # @bp
def code_to_debug(): import debuggee import sys debuggee.setup() exit_code = eval(sys.argv[1]) print("sys.exit(%r)" % (exit_code, )) sys.exit(exit_code)
def code_to_debug(): import sys import debuggee from debuggee import backchannel debuggee.setup() backchannel.send([sys.flags.optimize, sys.flags.dont_write_bytecode])
def code_to_debug(): import os import debuggee from debuggee import backchannel debuggee.setup() backchannel.send(os.getenv("DEBUGPY_CUSTOM_PYTHON"))
def test_code(): import debuggee debuggee.setup() from module1 import do_something do_something()
def code_to_debug(): import sys import debuggee from debuggee import backchannel debuggee.setup() backchannel.send( [sys.executable, sys.flags.optimize, sys.flags.verbose])
def code_to_debug(): import os import debuggee from debuggee import backchannel debuggee.setup() backchannel.send(os.path.abspath(__file__)) print("done") # @bp
def code_to_debug(): import os import debuggee from debuggee import backchannel debuggee.setup() backchannel.send(os.getenv("DEBUGPY_SUDO", "0"))
def code_to_debug(): import debuggee debuggee.setup() for i in [111, 222, 333, 444]: print(i) () # @wait_for_output
def code_to_debug(): """ The idea here is that a secondary thread does the processing of instructions, so, when all threads are stopped, doing an evaluation for: processor.process('xxx') would be locked until secondary threads start running. See: https://github.com/microsoft/debugpy/issues/157 """ import debuggee import threading from debugpy.common.compat import queue debuggee.setup() class EchoThread(threading.Thread): def __init__(self, queue): threading.Thread.__init__(self) self._queue = queue def run(self): while True: obj = self._queue.get() if obj == "finish": break print("processed", obj.value) obj.event.set() class NotificationObject(object): def __init__(self, value): self.value = value self.event = threading.Event() class Processor(object): def __init__(self, queue): self._queue = queue def process(self, i): obj = NotificationObject(i) self._queue.put(obj) assert obj.event.wait() def finish(self): self._queue.put("finish") if __name__ == "__main__": q = queue.Queue() echo_thread = EchoThread(q) processor = Processor(q) echo_thread.start() processor.process(1) processor.process(2) # @bp processor.process(3) processor.finish()
def code_to_debug(): import debuggee import debugpy from debuggee import backchannel debuggee.setup() a = 1 debugpy.breakpoint() backchannel.send(a)
def code_to_debug(): import debuggee import sys debuggee.setup() filename = sys.argv[1] # @bp # Disconnect happens here; subsequent lines should not run. with open(filename, "w") as f: f.write("failed")