def __init__(self, suspended_frames_manager, py_db): self._suspended_frames_manager = suspended_frames_manager self.py_db = py_db self._frame_id_to_frame = {} # Note that a given frame may appear in multiple threads when we have custom # frames added, but as those are coroutines, this map will point to the actual # main thread (which is the one that needs to be suspended for us to get the # variables). self._frame_id_to_main_thread_id = {} # A map of the suspended thread id -> list(frames ids) -- note that # frame ids are kept in order (the first one is the suspended frame). self._thread_id_to_frame_ids = {} # A map of the lines where it's suspended (needed for exceptions where the frame # lineno is not correct). self._frame_id_to_lineno = {} # The main suspended thread (if this is a coroutine this isn't the id of the # coroutine thread, it's the id of the actual suspended thread). self._main_thread_id = None # Helper to know if it was already untracked. self._untracked = False # We need to be thread-safe! self._lock = threading.Lock() self._variable_reference_to_variable = {}
def __init__(self, func_name, condition, expression, suspend_policy="NONE", hit_condition=None, is_logpoint=False): self.condition = condition self.func_name = func_name self.expression = expression self.suspend_policy = suspend_policy self.hit_condition = hit_condition self._hit_count = 0 self._hit_condition_lock = threading.Lock() self.is_logpoint = is_logpoint
def __init__(self, py_db): PyDBDaemonThread.__init__(self, py_db) self._event = threading.Event() self._handles = [] # We could probably do things valid without this lock so that it's possible to add # handles while processing, but the implementation would also be harder to follow, # so, for now, we're either processing or adding handles, not both at the same time. self._lock = threading.Lock()
def __init__(self): self.interruptable = False self.exec_queue = _queue.Queue(0) self.buffer = None self.mpl_modules_for_patching = {} self.init_mpl_modules_for_patching() # `vars_lock` synchronizes commands which modify and read variables values. It's needed, because we call # `getFrame()` and `add_exec()` from different threads, which can lead to incorrect variables presentation in UI self.vars_lock = threading.Lock()
def __init__(self, line, condition, func_name, expression, suspend_policy="NONE", hit_condition=None, is_logpoint=False): self.line = line self.condition = condition self.func_name = func_name self.expression = expression self.suspend_policy = suspend_policy self.hit_condition = hit_condition self._hit_count = 0 self._hit_condition_lock = threading.Lock() # need for frame evaluation: list of code objects, which bytecode was modified by this breakpoint self.code_objects = set() self.is_logpoint = is_logpoint
def ForkSafeLock(rlock=False): if rlock: return threading.RLock() else: return threading.Lock()
def __init__(self): self._lock = threading.Lock() self._modules = {} self._next_id = partial(next, itertools.count(0))
Gets the topmost frame for the given thread. Note that it may be None and callers should remove the reference to the frame as soon as possible to avoid disturbing user code. ''' # sys._current_frames(): dictionary with thread id -> topmost frame current_frames = _current_frames() return current_frames.get(thread.ident) def __str__(self): return 'State:%s Stop:%s Cmd: %s Kill:%s' % ( self.pydev_state, self.pydev_step_stop, self.pydev_step_cmd, self.pydev_notify_kill) from _pydev_imps._pydev_saved_modules import threading _set_additional_thread_info_lock = threading.Lock() def set_additional_thread_info(thread): try: additional_info = thread.additional_info if additional_info is None: raise AttributeError() except: with _set_additional_thread_info_lock: # If it's not there, set it within a lock to avoid any racing # conditions. additional_info = getattr(thread, 'additional_info', None) if additional_info is None: additional_info = PyDBAdditionalThreadInfo() thread.additional_info = additional_info
# Note: those are now inlined on cython. CMD_STEP_INTO = 107 CMD_STEP_INTO_MY_CODE = 144 CMD_STEP_RETURN = 109 CMD_STEP_RETURN_MY_CODE = 160 # ENDIF # Cache where we should keep that we completely skipped entering some context. # It needs to be invalidated when: # - Breakpoints are changed # It can be used when running regularly (without step over/step in/step return) global_cache_skips = {} global_cache_frame_skips = {} _global_notify_skipped_step_in = False _global_notify_skipped_step_in_lock = threading.Lock() def notify_skipped_step_in_because_of_filters(py_db, frame): global _global_notify_skipped_step_in with _global_notify_skipped_step_in_lock: if _global_notify_skipped_step_in: # Check with lock in place (callers should actually have checked # before without the lock in place due to performance). return _global_notify_skipped_step_in = True py_db.notify_skipped_step_in_because_of_filters(frame) # IFDEF CYTHON
def __init__(self, py_db): self._thread = None self._lock = threading.Lock() self._py_db = weakref.ref(py_db)