Example #1
0
def _is_django_render_call(frame, debug=False):
    try:
        name = frame.f_code.co_name
        if name != 'render':
            return False

        if 'self' not in frame.f_locals:
            return False

        cls = frame.f_locals['self'].__class__

        inherits_node = _inherits(cls, 'Node')

        if not inherits_node:
            return False

        clsname = cls.__name__
        if IS_DJANGO19:
            # in Django 1.9 we need to save the flag that there is included template
            if clsname == 'IncludeNode':
                if 'context' in frame.f_locals:
                    context = frame.f_locals['context']
                    context._has_included_template = True

        return clsname != 'TextNode' and clsname != 'NodeList'
    except:
        pydev_log.exception()
        return False
Example #2
0
def frame_vars_to_xml(frame_f_locals, hidden_ns=None):
    """ dumps frame variables to XML
    <var name="var_name" scope="local" type="type" value="value"/>
    """
    xml = ""

    keys = dict_keys(frame_f_locals)
    if hasattr(keys, 'sort'):
        keys.sort()  # Python 3.0 does not have it
    else:
        keys = sorted(keys)  # Jython 2.1 does not have it

    return_values_xml = ''

    for k in keys:
        try:
            v = frame_f_locals[k]
            eval_full_val = should_evaluate_full_value(v)

            if k == RETURN_VALUES_DICT:
                for name, val in dict_iter_items(v):
                    return_values_xml += var_to_xml(val, name, additional_in_xml=' isRetVal="True"')

            else:
                if hidden_ns is not None and k in hidden_ns:
                    xml += var_to_xml(v, str(k), additional_in_xml=' isIPythonHidden="True"',
                                      evaluate_full_value=eval_full_val)
                else:
                    xml += var_to_xml(v, str(k), evaluate_full_value=eval_full_val)
        except Exception:
            pydev_log.exception("Unexpected error, recovered safely.")

    # Show return values as the first entry.
    return return_values_xml + xml
    def make_thread_stack_str(self, frame, frame_id_to_lineno=None):
        '''
        :param frame_id_to_lineno:
            If available, the line number for the frame will be gotten from this dict,
            otherwise frame.f_lineno will be used (needed for unhandled exceptions as
            the place where we report may be different from the place where it's raised).
        '''
        if frame_id_to_lineno is None:
            frame_id_to_lineno = {}
        make_valid_xml_value = pydevd_xml.make_valid_xml_value
        cmd_text_list = []
        append = cmd_text_list.append

        curr_frame = frame
        frame = None  # Clear frame reference
        try:
            py_db = get_global_debugger()
            for frame_id, frame, method_name, _original_filename, filename_in_utf8, lineno in self._iter_visible_frames_info(
                    py_db, curr_frame, frame_id_to_lineno
                ):

                # print("file is ", filename_in_utf8)
                # print("line is ", lineno)

                # Note: variables are all gotten 'on-demand'.
                append('<frame id="%s" name="%s" ' % (frame_id , make_valid_xml_value(method_name)))
                append('file="%s" line="%s">' % (quote(make_valid_xml_value(filename_in_utf8), '/>_= \t'), lineno))
                append("</frame>")
        except:
            pydev_log.exception()

        curr_frame = None  # Clear frame reference
        return ''.join(cmd_text_list)
Example #4
0
    def _get_type(self, o, type_object, type_name):
        resolver = self._type_to_resolver_cache.get(type_object)
        if resolver is not None:
            return type_object, type_name, resolver

        if not self._initialized:
            self._initialize()

        try:
            for resolver in self._resolve_providers:
                if resolver.can_provide(type_object, type_name):
                    # Cache it
                    self._type_to_resolver_cache[type_object] = resolver
                    return type_object, type_name, resolver

            for t in self._default_type_map:
                if isinstance(o, t[0]):
                    # Cache it
                    resolver = t[1]
                    self._type_to_resolver_cache[type_object] = resolver
                    return (type_object, type_name, resolver)
        except:
            pydev_log.exception()

        # No match return default (and cache it).
        resolver = pydevd_resolver.defaultResolver
        self._type_to_resolver_cache[type_object] = resolver
        return type_object, type_name, resolver
    def _schedule_callback(prev, next):
        '''
        Called when a context is stopped or a new context is made runnable.
        '''
        try:
            if not prev and not next:
                return

            if next:
                register_tasklet_info(next)

                # Ok, making next runnable: set the tracing facility in it.
                debugger = get_global_debugger()
                if debugger is not None and next.frame:
                    if hasattr(next.frame, 'f_trace'):
                        next.frame.f_trace = debugger.get_thread_local_trace_func()
                debugger = None

            if prev:
                register_tasklet_info(prev)

            try:
                for tasklet_ref, tasklet_info in dict_items(_weak_tasklet_registered_to_info):  # Make sure it's a copy!
                    tasklet = tasklet_ref()
                    if tasklet is None or not tasklet.alive:
                        # Garbage-collected already!
                        try:
                            del _weak_tasklet_registered_to_info[tasklet_ref]
                        except KeyError:
                            pass
                        if tasklet_info.frame_id is not None:
                            remove_custom_frame(tasklet_info.frame_id)
                    else:
                        if tasklet.paused or tasklet.blocked or tasklet.scheduled:
                            if tasklet.frame and tasklet.frame.f_back:
                                f_back = tasklet.frame.f_back
                                abs_real_path_and_base = get_abs_path_real_path_and_base_from_frame(f_back)
                                if debugger.get_file_type(abs_real_path_and_base) is None:
                                    if tasklet_info.frame_id is None:
                                        tasklet_info.frame_id = add_custom_frame(f_back, tasklet_info.tasklet_name, tasklet.thread_id)
                                    else:
                                        update_custom_frame(tasklet_info.frame_id, f_back, tasklet.thread_id)

                        elif tasklet.is_current:
                            if tasklet_info.frame_id is not None:
                                # Remove info about stackless suspended when it starts to run.
                                remove_custom_frame(tasklet_info.frame_id)
                                tasklet_info.frame_id = None

            finally:
                tasklet = None
                tasklet_info = None
                f_back = None

        except:
            pydev_log.exception()

        if _application_set_schedule_callback is not None:
            return _application_set_schedule_callback(prev, next)
Example #6
0
def _is_jinja2_render_call(frame):
    try:
        name = frame.f_code.co_name
        if "__jinja_template__" in frame.f_globals and name in ("root", "loop", "macro") or name.startswith("block_"):
            return True
        return False
    except:
        pydev_log.exception()
        return False
 def create_signature(self, frame, filename, with_args=True):
     try:
         _, modulename, funcname = self.file_module_function_of(frame)
         signature = Signature(filename, funcname)
         if with_args:
             signature.set_args(frame, recursive=True)
         return signature
     except:
         pydev_log.exception()
Example #8
0
    def getCompletions(self, text, act_tok):
        try:
            from _pydev_bundle._pydev_completer import Completer

            completer = Completer(self.namespace, None)
            return completer.complete(act_tok)
        except:
            pydev_log.exception()
            return []
Example #9
0
def _is_django_context_get_call(frame):
    try:
        if 'self' not in frame.f_locals:
            return False

        cls = frame.f_locals['self'].__class__

        return _inherits(cls, 'BaseContext')
    except:
        pydev_log.exception()
        return False
Example #10
0
 def get_func_name(self, frame):
     code_obj = frame.f_code
     func_name = code_obj.co_name
     try:
         cls_name = get_clsname_for_code(code_obj, frame)
         if cls_name is not None:
             return "%s.%s" % (cls_name, func_name)
         else:
             return func_name
     except:
         pydev_log.exception()
         return func_name
 def get_extension_classes(self, extension_type):
     self._ensure_loaded()
     if extension_type in self.type_to_instance:
         return self.type_to_instance[extension_type]
     handlers = self.type_to_instance.setdefault(extension_type, [])
     for attr_name, attr in self._iter_attr():
         if isinstance(attr, type) and issubclass(attr, extension_type) and attr is not extension_type:
             try:
                 handlers.append(attr())
             except:
                 pydev_log.exception('Unable to load extension class: %s', attr_name)
     return handlers
Example #12
0
    def remove_python_exception_breakpoint(self, py_db, exception):
        try:
            cp = py_db.break_on_uncaught_exceptions.copy()
            cp.pop(exception, None)
            py_db.break_on_uncaught_exceptions = cp

            cp = py_db.break_on_caught_exceptions.copy()
            cp.pop(exception, None)
            py_db.break_on_caught_exceptions = cp
        except:
            pydev_log.exception("Error while removing exception %s", sys.exc_info()[0])

        py_db.on_breakpoints_changed(removed=True)
Example #13
0
    def get_dictionary(self, obj):
        ret = {}

        declaredFields = obj.__class__.getDeclaredFields()
        for i in xrange(len(declaredFields)):
            name = declaredFields[i].getName()
            try:
                declaredFields[i].setAccessible(True)
                ret[name] = declaredFields[i].get(obj)
            except:
                pydev_log.exception()

        return ret
Example #14
0
def test_pydevd_log():
    from _pydev_bundle import pydev_log
    try:
        import StringIO as io
    except:
        import io
    from _pydev_bundle.pydev_log import log_context

    stream = io.StringIO()
    with log_context(0, stream=stream):
        pydev_log.critical('always')
        pydev_log.info('never')

    assert stream.getvalue() == 'always\n'

    stream = io.StringIO()
    with log_context(1, stream=stream):
        pydev_log.critical('always')
        pydev_log.info('this too')

    assert stream.getvalue() == 'always\nthis too\n'

    stream = io.StringIO()
    with log_context(0, stream=stream):
        pydev_log.critical('always %s', 1)

    assert stream.getvalue() == 'always 1\n'

    stream = io.StringIO()
    with log_context(0, stream=stream):
        pydev_log.critical('always %s %s', 1, 2)

    assert stream.getvalue() == 'always 1 2\n'

    stream = io.StringIO()
    with log_context(0, stream=stream):
        pydev_log.critical('always %s %s', 1)

    # Even if there's an error in the formatting, don't fail, just print the message and args.
    assert stream.getvalue() == 'always %s %s - (1,)\n'

    stream = io.StringIO()
    with log_context(0, stream=stream):
        try:
            raise RuntimeError()
        except:
            pydev_log.exception('foo')

        assert 'foo\n' in stream.getvalue()
        assert 'raise RuntimeError()' in stream.getvalue()
Example #15
0
    def remove_return_values(self, main_debugger, frame):
        try:
            try:
                # Showing return values was turned off, we should remove them from locals dict.
                # The values can be in the current frame or in the back one
                frame.f_locals.pop(RETURN_VALUES_DICT, None)

                f_locals_back = getattr(frame.f_back, "f_locals", None)
                if f_locals_back is not None:
                    f_locals_back.pop(RETURN_VALUES_DICT, None)
            except:
                pydev_log.exception()
        finally:
            f_locals_back = None
Example #16
0
 def show_return_values(self, frame, arg):
     try:
         try:
             f_locals_back = getattr(frame.f_back, "f_locals", None)
             if f_locals_back is not None:
                 return_values_dict = f_locals_back.get(RETURN_VALUES_DICT, None)
                 if return_values_dict is None:
                     return_values_dict = {}
                     f_locals_back[RETURN_VALUES_DICT] = return_values_dict
                 name = self.get_func_name(frame)
                 return_values_dict[name] = arg
         except:
             pydev_log.exception()
     finally:
         f_locals_back = None
def update_custom_frame(frame_custom_thread_id, frame, thread_id, name=None):
    with CustomFramesContainer.custom_frames_lock:
        if DEBUG:
            sys.stderr.write('update_custom_frame: %s\n' % frame_custom_thread_id)
        try:
            old = CustomFramesContainer.custom_frames[frame_custom_thread_id]
            if name is not None:
                old.name = name
            old.mod_time += 1
            old.thread_id = thread_id
        except:
            sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_custom_thread_id,))
            pydev_log.exception()

        CustomFramesContainer._py_db_command_thread_event.set()
Example #18
0
def _is_django_resolve_call(frame):
    try:
        name = frame.f_code.co_name
        if name != '_resolve_lookup':
            return False

        if 'self' not in frame.f_locals:
            return False

        cls = frame.f_locals['self'].__class__

        clsname = cls.__name__
        return clsname == 'Variable'
    except:
        pydev_log.exception()
        return False
Example #19
0
def process_exec_queue(interpreter):
    init_mpl_in_console(interpreter)
    from pydev_ipython.inputhook import get_inputhook
    try:
        kill_if_pid_not_alive = int(os.environ.get('PYDEV_ECLIPSE_PID', '-1'))
    except:
        kill_if_pid_not_alive = -1

    while 1:
        if kill_if_pid_not_alive != -1:
            if not pid_exists(kill_if_pid_not_alive):
                exit()

        # Running the request may have changed the inputhook in use
        inputhook = get_inputhook()

        if _ProcessExecQueueHelper._debug_hook:
            _ProcessExecQueueHelper._debug_hook()

        if inputhook:
            try:
                # Note: it'll block here until return_control returns True.
                inputhook()
            except:
                pydev_log.exception()
        try:
            try:
                code_fragment = interpreter.exec_queue.get(block=True, timeout=1 / 20.)  # 20 calls/second
            except _queue.Empty:
                continue

            if callable(code_fragment):
                # It can be a callable (i.e.: something that must run in the main
                # thread can be put in the queue for later execution).
                code_fragment()
            else:
                more = interpreter.add_exec(code_fragment)
        except KeyboardInterrupt:
            interpreter.buffer = None
            continue
        except SystemExit:
            raise
        except:
            type, value, tb = sys.exc_info()
            traceback.print_exception(type, value, tb, file=sys.__stderr__)
            exit()
def replace_builtin_property(new_property=None):
    if new_property is None:
        new_property = DebugProperty
    original = property
    if IS_PY2:
        try:
            import __builtin__
            __builtin__.__dict__['property'] = new_property
        except:
            pydev_log.exception()  # @Reimport
    else:
        try:
            import builtins  # Python 3.0 does not have the __builtin__ module @UnresolvedImport
            builtins.__dict__['property'] = new_property
        except:
            pydev_log.exception()  # @Reimport
    return original
Example #21
0
def _get_smart_step_into_targets(code):
    '''
    :return list(Target)
    '''
    b = bytecode.Bytecode.from_code(code)
    cfg = bytecode_cfg.ControlFlowGraph.from_bytecode(b)

    ret = []

    for block in cfg:
        if DEBUG:
            print('\nStart block----')
        stack = _StackInterpreter(block)
        for instr in block:
            try:
                func_name = 'on_%s' % (instr.name, )
                func = getattr(stack, func_name, None)
                if func is None:
                    if STRICT_MODE:
                        raise AssertionError('%s not found.' % (func_name, ))
                    else:
                        continue
                if DEBUG:
                    print('\nWill handle: ', instr, '>>',
                          stack._getname(instr), '<<')
                func(instr)
                if DEBUG:
                    for entry in stack._stack:
                        print('    arg:', stack._getname(entry), '(', entry,
                              ')')
            except:
                if STRICT_MODE:
                    raise  # Error in strict mode.
                else:
                    # In non-strict mode, log it (if in verbose mode) and keep on going.
                    if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 2:
                        pydev_log.exception(
                            'Exception computing step into targets (handled).')

        ret.extend(stack.function_calls)
        ret.extend(stack.load_attrs.values())

    return ret
Example #22
0
def resolve_compound_var_object_fields(var, attrs):
    """
    Resolve compound variable by its object and attributes

    :param var: an object of variable
    :param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
    :return: a dictionary of variables's fields
    """
    attr_list = attrs.split('\t')

    for k in attr_list:
        type, _type_name, resolver = get_type(var)
        var = resolver.resolve(var, k)

    try:
        type, _type_name, resolver = get_type(var)
        return resolver.get_dictionary(var)
    except:
        pydev_log.exception()
Example #23
0
def frame_vars_to_xml(frame_f_locals, hidden_ns=None):
    """ dumps frame variables to XML
    <var name="var_name" scope="local" type="type" value="value"/>
    """
    xml = ""

    keys = dict_keys(frame_f_locals)
    if hasattr(keys, 'sort'):
        keys.sort()  # Python 3.0 does not have it
    else:
        keys = sorted(keys)  # Jython 2.1 does not have it

    return_values_xml = ''

    for k in keys:
        try:
            v = frame_f_locals[k]
            eval_full_val = should_evaluate_full_value(v)

            if k == '_pydev_stop_at_break':
                continue

            if k == RETURN_VALUES_DICT:
                for name, val in dict_iter_items(v):
                    return_values_xml += var_to_xml(
                        val, name, additional_in_xml=' isRetVal="True"')

            else:
                if hidden_ns is not None and k in hidden_ns:
                    xml += var_to_xml(
                        v,
                        str(k),
                        additional_in_xml=' isIPythonHidden="True"',
                        evaluate_full_value=eval_full_val)
                else:
                    xml += var_to_xml(v,
                                      str(k),
                                      evaluate_full_value=eval_full_val)
        except Exception:
            pydev_log.exception("Unexpected error, recovered safely.")

    # Show return values as the first entry.
    return return_values_xml + xml
Example #24
0
        def _get_path_with_real_case(filename):
            # Note: this previously made:
            # convert_to_long_pathname(convert_to_short_pathname(filename))
            # but this is no longer done because we can't rely on getting the shortname
            # consistently (there are settings to disable it on Windows).
            # So, using approach which resolves by listing the dir.

            if IS_PY2 and isinstance(filename, unicode):  # noqa
                filename = filename.encode(getfilesystemencoding())

            if '~' in filename:
                filename = convert_to_long_pathname(filename)

            if filename.startswith('<') or not os_path_exists(filename):
                return filename  # Not much we can do.

            drive, parts = os.path.splitdrive(os.path.normpath(filename))
            drive = drive.upper()
            while parts.startswith(os.path.sep):
                parts = parts[1:]
                drive += os.path.sep
            parts = parts.lower().split(os.path.sep)

            try:
                return _resolve_listing(drive, iter(parts))
            except FileNotFoundError:
                _listdir_cache.clear()
                # Retry once after clearing the cache we have.
                try:
                    return _resolve_listing(drive, iter(parts))
                except FileNotFoundError:
                    if os_path_exists(filename):
                        # This is really strange, ask the user to report as error.
                        pydev_log.critical(
                            'pydev debugger: critical: unable to get real case for file. Details:\n'
                            'filename: %s\ndrive: %s\nparts: %s\n'
                            '(please create a ticket in the tracker to address this).',
                            filename, drive, parts
                        )
                        pydev_log.exception()
                    # Don't fail, just return the original file passed.
                    return filename
def get_text_list_for_frame(frame):
    # partial copy-paste from make_thread_suspend_str
    curFrame = frame
    cmdTextList = []
    try:
        while curFrame:
            # print cmdText
            myId = str(id(curFrame))
            # print "id is ", myId

            if curFrame.f_code is None:
                break  # Iron Python sometimes does not have it!

            myName = curFrame.f_code.co_name  # method name (if in method) or ? if global
            if myName is None:
                break  # Iron Python sometimes does not have it!

            # print "name is ", myName

            filename = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(curFrame)[1]

            my_file, _applied_mapping = pydevd_file_utils.norm_file_to_client(filename)

            # print "file is ", my_file
            # my_file = inspect.getsourcefile(curFrame) or inspect.getfile(frame)

            myLine = str(curFrame.f_lineno)
            # print "line is ", myLine

            # the variables are all gotten 'on-demand'
            # variables = pydevd_xml.frame_vars_to_xml(curFrame.f_locals)

            variables = ''
            cmdTextList.append('<frame id="%s" name="%s" ' % (myId , pydevd_xml.make_valid_xml_value(myName)))
            cmdTextList.append('file="%s" line="%s">' % (quote(my_file, '/>_= \t'), myLine))
            cmdTextList.append(variables)
            cmdTextList.append("</frame>")
            curFrame = curFrame.f_back
    except :
        pydev_log.exception()

    return cmdTextList
def get_text_list_for_frame(frame):
    # partial copy-paste from make_thread_suspend_str
    curFrame = frame
    cmdTextList = []
    try:
        while curFrame:
            # print cmdText
            myId = str(id(curFrame))
            # print "id is ", myId

            if curFrame.f_code is None:
                break  # Iron Python sometimes does not have it!

            myName = curFrame.f_code.co_name  # method name (if in method) or ? if global
            if myName is None:
                break  # Iron Python sometimes does not have it!

            # print "name is ", myName

            filename = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(curFrame)[1]

            myFile = pydevd_file_utils.norm_file_to_client(filename)

            # print "file is ", myFile
            # myFile = inspect.getsourcefile(curFrame) or inspect.getfile(frame)

            myLine = str(curFrame.f_lineno)
            # print "line is ", myLine

            # the variables are all gotten 'on-demand'
            # variables = pydevd_xml.frame_vars_to_xml(curFrame.f_locals)

            variables = ''
            cmdTextList.append('<frame id="%s" name="%s" ' % (myId , pydevd_xml.make_valid_xml_value(myName)))
            cmdTextList.append('file="%s" line="%s">' % (quote(myFile, '/>_= \t'), myLine))
            cmdTextList.append(variables)
            cmdTextList.append("</frame>")
            curFrame = curFrame.f_back
    except :
        pydev_log.exception()

    return cmdTextList
Example #27
0
def resolve_compound_variable_fields(dbg, thread_id, frame_id, scope, attrs):
    """
    Resolve compound variable in debugger scopes by its name and attributes

    :param thread_id: id of the variable's thread
    :param frame_id: id of the variable's frame
    :param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
    :param attrs: after reaching the proper scope, we have to get the attributes until we find
            the proper location (i.e.: obj\tattr1\tattr2)
    :return: a dictionary of variables's fields
    """

    var = getVariable(dbg, thread_id, frame_id, scope, attrs)

    try:
        _type, type_name, resolver = get_type(var)
        return type_name, resolver.get_dictionary(var)
    except:
        pydev_log.exception('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s.',
            thread_id, frame_id, scope, attrs)
Example #28
0
def _load_python_helper_lib_uncached():
    if (not IS_CPYTHON or sys.version_info[:2] > (3, 10)
            or hasattr(sys, 'gettotalrefcount') or LOAD_NATIVE_LIB_FLAG in ENV_FALSE_LOWER_VALUES):
        pydev_log.info('Helper lib to set tracing to all threads not loaded.')
        return None

    try:
        filename = get_python_helper_lib_filename()
        if filename is None:
            return None
        # Load as pydll so that we don't release the gil.
        lib = ctypes.pydll.LoadLibrary(filename)
        pydev_log.info('Successfully Loaded helper lib to set tracing to all threads.')
        return lib
    except:
        if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
            # Only show message if tracing is on (we don't have pre-compiled
            # binaries for all architectures -- i.e.: ARM).
            pydev_log.exception('Error loading: %s', filename)
        return None
Example #29
0
    def exec_on_timeout(self):
        # Note: lock should already be obtained when executing this function.
        kwargs = self.kwargs
        on_timeout = self.on_timeout

        if not self.disposed:
            self.disposed = True
            self.kwargs = None
            self.on_timeout = None

            try:
                if _DEBUG:
                    pydev_log.critical(
                        'pydevd_timeout: Calling on timeout: %s with kwargs: %s',
                        on_timeout, kwargs)

                on_timeout(**kwargs)
            except Exception:
                pydev_log.exception(
                    'pydevd_timeout: Exception on callback timeout.')
Example #30
0
def _get_source_django_18_or_lower(frame):
    # This method is usable only for the Django <= 1.8
    try:
        node = frame.f_locals['self']
        if hasattr(node, 'source'):
            return node.source
        else:
            if IS_DJANGO18:
                # The debug setting was changed since Django 1.8
                pydev_log.error_once("WARNING: Template path is not available. Set the 'debug' option in the OPTIONS of a DjangoTemplates "
                                     "backend.")
            else:
                # The debug setting for Django < 1.8
                pydev_log.error_once("WARNING: Template path is not available. Please set TEMPLATE_DEBUG=True in your settings.py to make "
                                     "django template breakpoints working")
            return None

    except:
        pydev_log.exception()
        return None
Example #31
0
def custom_operation(dbg, thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
    """
    We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.

    code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
    operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
    """
    expressionValue = getVariable(dbg, thread_id, frame_id, scope, attrs)

    try:
        namespace = {'__name__': '<custom_operation>'}
        if style == "EXECFILE":
            namespace['__file__'] = code_or_file
            execfile(code_or_file, namespace, namespace)
        else:  # style == EXEC
            namespace['__file__'] = '<customOperationCode>'
            Exec(code_or_file, namespace, namespace)

        return str(namespace[operation_fn_name](expressionValue))
    except:
        pydev_log.exception()
Example #32
0
def mark_as_pydevd_daemon_thread(thread):
    if not IS_JYTHON and not IS_IRONPYTHON and PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS:
        global _patched_threading_to_hide_pydevd_threads
        if not _patched_threading_to_hide_pydevd_threads:
            # When we mark the first thread as a pydevd daemon thread, we also change the threading
            # functions to hide pydevd threads.
            # Note: we don't just "hide" the pydevd threads from the threading module by not using it
            # (i.e.: just using the `thread.start_new_thread` instead of `threading.Thread`)
            # because there's 1 thread (the `CheckAliveThread`) which is a pydevd thread but
            # isn't really a daemon thread (so, we need CPython to wait on it for shutdown,
            # in which case it needs to be in `threading` and the patching would be needed anyways).
            _patched_threading_to_hide_pydevd_threads = True
            try:
                _patch_threading_to_hide_pydevd_threads()
            except:
                pydev_log.exception(
                    'Error applying patching to hide pydevd threads.')

    thread.pydev_do_not_trace = True
    thread.is_pydev_daemon_thread = True
    thread.daemon = True
Example #33
0
def change_attr_expression(frame, attr, expression, dbg, value=SENTINEL_VALUE):
    '''Changes some attribute in a given frame.
    '''
    if frame is None:
        return

    try:
        expression = expression.replace('@LINE@', '\n')

        if dbg.plugin and value is SENTINEL_VALUE:
            result = dbg.plugin.change_variable(frame, attr, expression)
            if result:
                return result

        if attr[:7] == "Globals":
            attr = attr[8:]
            if attr in frame.f_globals:
                if value is SENTINEL_VALUE:
                    value = eval(expression, frame.f_globals, frame.f_locals)
                frame.f_globals[attr] = value
                return frame.f_globals[attr]
        else:
            if '.' not in attr:  # i.e.: if we have a '.', we're changing some attribute of a local var.
                if pydevd_save_locals.is_save_locals_available():
                    if value is SENTINEL_VALUE:
                        value = eval(expression, frame.f_globals,
                                     frame.f_locals)
                    frame.f_locals[attr] = value
                    pydevd_save_locals.save_locals(frame)
                    return frame.f_locals[attr]

            # i.e.: case with '.' or save locals not available (just exec the assignment in the frame).
            if value is SENTINEL_VALUE:
                value = eval(expression, frame.f_globals, frame.f_locals)
            result = value
            Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
            return result

    except Exception:
        pydev_log.exception()
Example #34
0
    def cmd_set_py_exception_json(self, py_db, cmd_id, seq, text):
        # This API is optional and works 'in bulk' -- it's possible
        # to get finer-grained control with CMD_ADD_EXCEPTION_BREAK/CMD_REMOVE_EXCEPTION_BREAK
        # which allows setting caught/uncaught per exception, although global settings such as:
        # - skip_on_exceptions_thrown_in_same_context
        # - ignore_exceptions_thrown_in_lines_with_ignore_exception
        # must still be set through this API (before anything else as this clears all existing
        # exception breakpoints).
        try:
            py_db.break_on_uncaught_exceptions = {}
            py_db.break_on_caught_exceptions = {}
            py_db.break_on_user_uncaught_exceptions = {}

            as_json = json.loads(text)
            break_on_uncaught = as_json.get('break_on_uncaught', False)
            break_on_caught = as_json.get('break_on_caught', False)
            break_on_user_caught = as_json.get('break_on_user_caught', False)
            py_db.skip_on_exceptions_thrown_in_same_context = as_json.get('skip_on_exceptions_thrown_in_same_context', False)
            py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = as_json.get('ignore_exceptions_thrown_in_lines_with_ignore_exception', False)
            ignore_libraries = as_json.get('ignore_libraries', False)
            exception_types = as_json.get('exception_types', [])

            for exception_type in exception_types:
                if not exception_type:
                    continue

                py_db.add_break_on_exception(
                    exception_type,
                    condition=None,
                    expression=None,
                    notify_on_handled_exceptions=break_on_caught,
                    notify_on_unhandled_exceptions=break_on_uncaught,
                    notify_on_user_unhandled_exceptions=break_on_user_caught,
                    notify_on_first_raise_only=True,
                    ignore_libraries=ignore_libraries,
                )

                py_db.on_breakpoints_changed()
        except:
            pydev_log.exception("Error when setting exception list. Received: %s", text)
        def _get_path_with_real_case(filename):
            # Note: this previously made:
            # convert_to_long_pathname(convert_to_short_pathname(filename))
            # but this is no longer done because we can't rely on getting the shortname
            # consistently (there are settings to disable it on Windows).
            # So, using approach which resolves by listing the dir.

            if IS_PY2 and isinstance(filename, unicode):  # noqa
                filename = filename.encode(getfilesystemencoding())

            if '~' in filename:
                filename = convert_to_long_pathname(filename)

            if filename.startswith('<') or not os.path.exists(filename):
                return filename  # Not much we can do.

            drive, parts = os.path.splitdrive(os.path.normpath(filename))
            drive = drive.upper()
            while parts.startswith(os.path.sep):
                parts = parts[1:]
                drive += os.path.sep
            parts = parts.lower().split(os.path.sep)

            try:
                return _resolve_listing(drive, iter(parts))
            except FileNotFoundError:
                _listdir_cache.clear()
                # Retry once after clearing the cache we have.
                try:
                    return _resolve_listing(drive, iter(parts))
                except FileNotFoundError:
                    if os.path.exists(filename):
                        # This is really strange, ask the user to report as error.
                        sys.stderr.write('\npydev debugger: critical: unable to get real case for file. Details:\n'
                                         'filename: %s\ndrive: %s\nparts: %s\n'
                                         '(please create a ticket in the tracker to address this).\n\n' % (
                                             filename, drive, parts))
                        pydev_log.exception()
                    # Don't fail, just return the original file passed.
                    return filename
Example #36
0
def _get_source_django_18_or_lower(frame):
    # This method is usable only for the Django <= 1.8
    try:
        node = frame.f_locals['self']
        if hasattr(node, 'source'):
            return node.source
        else:
            if IS_DJANGO18:
                # The debug setting was changed since Django 1.8
                pydev_log.error_once(
                    "WARNING: Template path is not available. Set the 'debug' option in the OPTIONS of a DjangoTemplates "
                    "backend.")
            else:
                # The debug setting for Django < 1.8
                pydev_log.error_once(
                    "WARNING: Template path is not available. Please set TEMPLATE_DEBUG=True in your settings.py to make "
                    "django template breakpoints working")
            return None

    except:
        pydev_log.exception()
        return None
    def find_frame(self, thread_id, frame_id):
        try:
            if frame_id == "*":
                return get_frame()  # any frame is specified with "*"
            frame_id = int(frame_id)

            fake_frames = self._thread_id_to_fake_frames.get(thread_id)
            if fake_frames is not None:
                frame = fake_frames.get(frame_id)
                if frame is not None:
                    return frame

            frames_tracker = self._thread_id_to_tracker.get(thread_id)
            if frames_tracker is not None:
                frame = frames_tracker.find_frame(thread_id, frame_id)
                if frame is not None:
                    return frame

            return None
        except:
            pydev_log.exception()
            return None
Example #38
0
    def str_from_providers(self, o, type_object, type_name):
        provider = self._type_to_str_provider_cache.get(type_object)

        if provider is self.NO_PROVIDER:
            return None

        if provider is not None:
            return provider.get_str(o)

        if not self._initialized:
            self._initialize()

        for provider in self._str_providers:
            if provider.can_provide(type_object, type_name):
                self._type_to_str_provider_cache[type_object] = provider
                try:
                    return provider.get_str(o)
                except:
                    pydev_log.exception("Error when getting str with custom provider: %s." % (provider,))

        self._type_to_str_provider_cache[type_object] = self.NO_PROVIDER
        return None
    def find_frame(self, thread_id, frame_id):
        try:
            if frame_id == "*":
                return get_frame()  # any frame is specified with "*"
            frame_id = int(frame_id)

            fake_frames = self._thread_id_to_fake_frames.get(thread_id)
            if fake_frames is not None:
                frame = fake_frames.get(frame_id)
                if frame is not None:
                    return frame

            frames_tracker = self._thread_id_to_tracker.get(thread_id)
            if frames_tracker is not None:
                frame = frames_tracker.find_frame(thread_id, frame_id)
                if frame is not None:
                    return frame

            return None
        except:
            pydev_log.exception()
            return None
Example #40
0
    def make_thread_stack_str(self, py_db, frames_list):
        assert frames_list.__class__ == FramesList
        make_valid_xml_value = pydevd_xml.make_valid_xml_value
        cmd_text_list = []
        append = cmd_text_list.append

        try:
            for frame_id, frame, method_name, _original_filename, filename_in_utf8, lineno, _applied_mapping in self._iter_visible_frames_info(
                    py_db, frames_list
                ):

                # print("file is ", filename_in_utf8)
                # print("line is ", lineno)

                # Note: variables are all gotten 'on-demand'.
                append('<frame id="%s" name="%s" ' % (frame_id , make_valid_xml_value(method_name)))
                append('file="%s" line="%s">' % (quote(make_valid_xml_value(filename_in_utf8), '/>_= \t'), lineno))
                append("</frame>")
        except:
            pydev_log.exception()

        return ''.join(cmd_text_list)
Example #41
0
def enable_gevent_integration():
    # References:
    # https://greenlet.readthedocs.io/en/latest/api.html#greenlet.settrace
    # https://greenlet.readthedocs.io/en/latest/tracing.html

    # Note: gevent.version_info is WRONG (gevent.__version__ must be used).
    try:
        if tuple(int(x) for x in gevent.__version__.split('.')[:2]) <= (20, 0):
            if not GEVENT_SHOW_PAUSED_GREENLETS:
                return

            if not hasattr(greenlet, 'settrace'):
                # In older versions it was optional.
                # We still try to use if available though (because without it
                pydev_log.debug(
                    'greenlet.settrace not available. GEVENT_SHOW_PAUSED_GREENLETS will have no effect.'
                )
                return
        try:
            greenlet.settrace(greenlet_events)
        except:
            pydev_log.exception('Error with greenlet.settrace.')
    except:
        pydev_log.exception('Error setting up gevent %s.', gevent.__version__)
Example #42
0
except:
    # jython does not support os.path.realpath
    # realpath is a no-op on systems without islink support
    rPath = os.path.abspath

# defined as a list of tuples where the 1st element of the tuple is the path in the client machine
# and the 2nd element is the path in the server machine.
# see module docstring for more details.
try:
    PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(
        os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))
except Exception:
    sys.stderr.write(
        'Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\n'
    )
    pydev_log.exception()
    PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
    if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):
        sys.stderr.write(
            'Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\n'
        )
        PATHS_FROM_ECLIPSE_TO_PYTHON = []
    else:
        # Converting json lists to tuple
        PATHS_FROM_ECLIPSE_TO_PYTHON = [
            tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON
        ]

# example:
# PATHS_FROM_ECLIPSE_TO_PYTHON = [
Example #43
0
    def apply(self):
        mod = self.mod
        self._on_finish_callbacks = []
        try:
            # Get the module name, e.g. 'foo.bar.whatever'
            modname = mod.__name__
            # Get the module namespace (dict) early; this is part of the type check
            modns = mod.__dict__
            # Parse it into package name and module name, e.g. 'foo.bar' and 'whatever'
            i = modname.rfind(".")
            if i >= 0:
                pkgname, modname = modname[:i], modname[i + 1:]
            else:
                pkgname = None
            # Compute the search path
            if pkgname:
                # We're not reloading the package, only the module in it
                pkg = sys.modules[pkgname]
                path = pkg.__path__  # Search inside the package
            else:
                # Search the top-level module path
                pkg = None
                path = None  # Make find_module() uses the default search path
            # Find the module; may raise ImportError
            (stream, filename, (suffix, mode,
                                kind)) = imp.find_module(modname, path)
            # Turn it into a code object
            try:
                # Is it Python source code or byte code read from a file?
                if kind not in (imp.PY_COMPILED, imp.PY_SOURCE):
                    # Fall back to built-in reload()
                    notify_error('Could not find source to reload (mod: %s)' %
                                 (modname, ))
                    return
                if kind == imp.PY_SOURCE:
                    source = stream.read()
                    code = compile(source, filename, "exec")
                else:
                    import marshal
                    code = marshal.load(stream)
            finally:
                if stream:
                    stream.close()
            # Execute the code.  We copy the module dict to a temporary; then
            # clear the module dict; then execute the new code in the module
            # dict; then swap things back and around.  This trick (due to
            # Glyph Lefkowitz) ensures that the (readonly) __globals__
            # attribute of methods and functions is set to the correct dict
            # object.
            new_namespace = modns.copy()
            new_namespace.clear()
            new_namespace["__name__"] = modns["__name__"]
            Exec(code, new_namespace)
            # Now we get to the hard part
            oldnames = set(modns)
            newnames = set(new_namespace)

            # Create new tokens (note: not deleting existing)
            for name in newnames - oldnames:
                notify_info0('Added:', name, 'to namespace')
                self.found_change = True
                modns[name] = new_namespace[name]

            # Update in-place what we can
            for name in oldnames & newnames:
                self._update(modns, name, modns[name], new_namespace[name])

            self._handle_namespace(modns)

            for c in self._on_finish_callbacks:
                c()
            del self._on_finish_callbacks[:]
        except:
            pydev_log.exception()
Example #44
0
    def _update(self,
                namespace,
                name,
                oldobj,
                newobj,
                is_class_namespace=False):
        """Update oldobj, if possible in place, with newobj.

        If oldobj is immutable, this simply returns newobj.

        Args:
          oldobj: the object to be updated
          newobj: the object used as the source for the update
        """
        try:
            notify_info2('Updating: ', oldobj)
            if oldobj is newobj:
                # Probably something imported
                return

            if type(oldobj) is not type(newobj):
                # Cop-out: if the type changed, give up
                notify_error('Type of: %s changed... Skipping.' % (oldobj, ))
                return

            if isinstance(newobj, types.FunctionType):
                self._update_function(oldobj, newobj)
                return

            if isinstance(newobj, types.MethodType):
                self._update_method(oldobj, newobj)
                return

            if isinstance(newobj, classmethod):
                self._update_classmethod(oldobj, newobj)
                return

            if isinstance(newobj, staticmethod):
                self._update_staticmethod(oldobj, newobj)
                return

            if hasattr(types, 'ClassType'):
                classtype = (types.ClassType, type
                             )  # object is not instance of types.ClassType.
            else:
                classtype = type

            if isinstance(newobj, classtype):
                self._update_class(oldobj, newobj)
                return

            # New: dealing with metaclasses.
            if hasattr(newobj, '__metaclass__') and hasattr(
                    newobj,
                    '__class__') and newobj.__metaclass__ == newobj.__class__:
                self._update_class(oldobj, newobj)
                return

            if namespace is not None:

                if oldobj != newobj and str(oldobj) != str(newobj) and repr(
                        oldobj) != repr(newobj):
                    xreload_old_new = None
                    if is_class_namespace:
                        xreload_old_new = getattr(namespace,
                                                  '__xreload_old_new__', None)
                        if xreload_old_new is not None:
                            self.found_change = True
                            xreload_old_new(name, oldobj, newobj)

                    elif '__xreload_old_new__' in namespace:
                        xreload_old_new = namespace['__xreload_old_new__']
                        xreload_old_new(namespace, name, oldobj, newobj)
                        self.found_change = True

                    # Too much information to the user...
                    # else:
                    #     notify_info0('%s NOT updated. Create __xreload_old_new__(name, old, new) for custom reload' % (name,))

        except:
            notify_error(
                'Exception found when updating %s. Proceeding for other items.'
                % (name, ))
            pydev_log.exception()
Example #45
0
 def _call(self, cmdline, **kwargs):
     try:
         subprocess.check_call(cmdline, **kwargs)
     except:
         if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
             pydev_log.exception('Error running: %s' % (' '.join(cmdline)))
Example #46
0
    def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False):
        """Update oldobj, if possible in place, with newobj.

        If oldobj is immutable, this simply returns newobj.

        Args:
          oldobj: the object to be updated
          newobj: the object used as the source for the update
        """
        try:
            notify_info2('Updating: ', oldobj)
            if oldobj is newobj:
                # Probably something imported
                return

            if type(oldobj) is not type(newobj):
                # Cop-out: if the type changed, give up
                notify_error('Type of: %s changed... Skipping.' % (oldobj,))
                return

            if isinstance(newobj, types.FunctionType):
                self._update_function(oldobj, newobj)
                return

            if isinstance(newobj, types.MethodType):
                self._update_method(oldobj, newobj)
                return

            if isinstance(newobj, classmethod):
                self._update_classmethod(oldobj, newobj)
                return

            if isinstance(newobj, staticmethod):
                self._update_staticmethod(oldobj, newobj)
                return

            if hasattr(types, 'ClassType'):
                classtype = (types.ClassType, type)  # object is not instance of types.ClassType.
            else:
                classtype = type

            if isinstance(newobj, classtype):
                self._update_class(oldobj, newobj)
                return

            # New: dealing with metaclasses.
            if hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and newobj.__metaclass__ == newobj.__class__:
                self._update_class(oldobj, newobj)
                return

            if namespace is not None:

                if oldobj != newobj and str(oldobj) != str(newobj) and repr(oldobj) != repr(newobj):
                    xreload_old_new = None
                    if is_class_namespace:
                        xreload_old_new = getattr(namespace, '__xreload_old_new__', None)
                        if xreload_old_new is not None:
                            self.found_change = True
                            xreload_old_new(name, oldobj, newobj)

                    elif '__xreload_old_new__' in namespace:
                        xreload_old_new = namespace['__xreload_old_new__']
                        xreload_old_new(namespace, name, oldobj, newobj)
                        self.found_change = True

                    # Too much information to the user...
                    # else:
                    #     notify_info0('%s NOT updated. Create __xreload_old_new__(name, old, new) for custom reload' % (name,))

        except:
            notify_error('Exception found when updating %s. Proceeding for other items.' % (name,))
            pydev_log.exception()
Example #47
0
    def handle_exception(self, frame, event, arg):
        try:
            # print('handle_exception', frame.f_lineno, frame.f_code.co_name)

            # We have 3 things in arg: exception type, description, traceback object
            trace_obj = arg[2]
            main_debugger = self._args[0]

            initial_trace_obj = trace_obj
            if trace_obj.tb_next is None and trace_obj.tb_frame is frame:
                # I.e.: tb_next should be only None in the context it was thrown (trace_obj.tb_frame is frame is just a double check).
                pass
            else:
                # Get the trace_obj from where the exception was raised...
                while trace_obj.tb_next is not None:
                    trace_obj = trace_obj.tb_next

            if main_debugger.ignore_exceptions_thrown_in_lines_with_ignore_exception:
                for check_trace_obj in (initial_trace_obj, trace_obj):
                    filename = get_abs_path_real_path_and_base_from_frame(
                        check_trace_obj.tb_frame)[1]

                    filename_to_lines_where_exceptions_are_ignored = self.filename_to_lines_where_exceptions_are_ignored

                    lines_ignored = filename_to_lines_where_exceptions_are_ignored.get(
                        filename)
                    if lines_ignored is None:
                        lines_ignored = filename_to_lines_where_exceptions_are_ignored[
                            filename] = {}

                    try:
                        curr_stat = os.stat(filename)
                        curr_stat = (curr_stat.st_size, curr_stat.st_mtime)
                    except:
                        curr_stat = None

                    last_stat = self.filename_to_stat_info.get(filename)
                    if last_stat != curr_stat:
                        self.filename_to_stat_info[filename] = curr_stat
                        lines_ignored.clear()
                        try:
                            linecache.checkcache(filename)
                        except:
                            # Jython 2.1
                            linecache.checkcache()

                    from_user_input = main_debugger.filename_to_lines_where_exceptions_are_ignored.get(
                        filename)
                    if from_user_input:
                        merged = {}
                        merged.update(lines_ignored)
                        # Override what we have with the related entries that the user entered
                        merged.update(from_user_input)
                    else:
                        merged = lines_ignored

                    exc_lineno = check_trace_obj.tb_lineno

                    # print ('lines ignored', lines_ignored)
                    # print ('user input', from_user_input)
                    # print ('merged', merged, 'curr', exc_lineno)

                    if exc_lineno not in merged:  # Note: check on merged but update lines_ignored.
                        try:
                            line = linecache.getline(
                                filename, exc_lineno,
                                check_trace_obj.tb_frame.f_globals)
                        except:
                            # Jython 2.1
                            line = linecache.getline(filename, exc_lineno)

                        if IGNORE_EXCEPTION_TAG.match(line) is not None:
                            lines_ignored[exc_lineno] = 1
                            return
                        else:
                            # Put in the cache saying not to ignore
                            lines_ignored[exc_lineno] = 0
                    else:
                        # Ok, dict has it already cached, so, let's check it...
                        if merged.get(exc_lineno, 0):
                            return

            thread = self._args[3]

            try:
                frame_id_to_frame = {}
                frame_id_to_frame[id(frame)] = frame
                f = trace_obj.tb_frame
                while f is not None:
                    frame_id_to_frame[id(f)] = f
                    f = f.f_back
                f = None

                main_debugger.send_caught_exception_stack(
                    thread, arg, id(frame))
                self.set_suspend(thread, CMD_STEP_CAUGHT_EXCEPTION)
                self.do_wait_suspend(thread, frame, event, arg)
                main_debugger.send_caught_exception_stack_proceeded(thread)
            except:
                pydev_log.exception()

            main_debugger.set_trace_for_frame_and_parents(frame)
        finally:
            # Make sure the user cannot see the '__exception__' we added after we leave the suspend state.
            remove_exception_from_frame(frame)
            # Clear some local variables...
            frame = None
            trace_obj = None
            initial_trace_obj = None
            check_trace_obj = None
            f = None
            frame_id_to_frame = None
            main_debugger = None
            thread = None
Example #48
0
    def trace_dispatch(self, frame, event, arg):
        # ENDIF
        # Note: this is a big function because most of the logic related to hitting a breakpoint and
        # stepping is contained in it. Ideally this could be split among multiple functions, but the
        # problem in this case is that in pure-python function calls are expensive and even more so
        # when tracing is on (because each function call will get an additional tracing call). We
        # try to address this by using the info.is_tracing for the fastest possible return, but the
        # cost is still high (maybe we could use code-generation in the future and make the code
        # generation be better split among what each part does).

        # DEBUG = '_debugger_case_generator.py' in frame.f_code.co_filename
        main_debugger, filename, info, thread, frame_skips_cache, frame_cache_key = self._args
        # if DEBUG: print('frame trace_dispatch %s %s %s %s %s %s, stop: %s' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename, event, constant_to_str(info.pydev_step_cmd), arg, info.pydev_step_stop))
        try:
            info.is_tracing += 1
            line = frame.f_lineno
            line_cache_key = (frame_cache_key, line)

            if main_debugger.pydb_disposed:
                return None if event == 'call' else NO_FTRACE

            plugin_manager = main_debugger.plugin
            is_coroutine_or_generator = frame.f_code.co_flags & 0xa0  # 0xa0 ==  CO_GENERATOR = 0x20 | CO_COROUTINE = 0x80
            is_exception_event = event == 'exception'
            has_exception_breakpoints = main_debugger.break_on_caught_exceptions or main_debugger.has_plugin_exception_breaks

            stop_frame = info.pydev_step_stop
            step_cmd = info.pydev_step_cmd
            if is_coroutine_or_generator:
                # Dealing with coroutines and generators:
                # When in a coroutine we change the perceived event to the debugger because
                # a call, StopIteration exception and return are usually just pausing/unpausing it.
                if event == 'line':
                    is_line = True
                    is_call = False
                    is_return = False

                elif event == 'return':
                    is_line = False
                    is_call = False
                    is_return = True

                    returns_cache_key = (frame_cache_key, 'returns')
                    return_lines = frame_skips_cache.get(returns_cache_key)
                    if return_lines is None:
                        # Note: we're collecting the return lines by inspecting the bytecode as
                        # there are multiple returns and multiple stop iterations when awaiting and
                        # it doesn't give any clear indication when a coroutine or generator is
                        # finishing or just pausing.
                        return_lines = set()
                        for x in main_debugger.collect_return_info(
                                frame.f_code):
                            # Note: cython does not support closures in cpdefs (so we can't use
                            # a list comprehension).
                            return_lines.add(x.return_line)

                        frame_skips_cache[returns_cache_key] = return_lines

                    if line not in return_lines:
                        # Not really a return (coroutine/generator paused).
                        return self.trace_dispatch
                    else:
                        # Tricky handling: usually when we're on a frame which is about to exit
                        # we set the step mode to step into, but in this case we'd end up in the
                        # asyncio internal machinery, which is not what we want, so, we just
                        # ask the stop frame to be a level up.
                        #
                        # Note that there's an issue here which we may want to fix in the future: if
                        # the back frame is a frame which is filtered, we won't stop properly.
                        # Solving this may not be trivial as we'd need to put a scope in the step
                        # in, but we may have to do it anyways to have a step in which doesn't end
                        # up in asyncio).
                        if stop_frame is frame:
                            if step_cmd in (CMD_STEP_OVER,
                                            CMD_STEP_OVER_MY_CODE):
                                info.pydev_step_stop = frame.f_back

                elif is_exception_event:
                    if has_exception_breakpoints:
                        should_stop, frame = self.should_stop_on_exception(
                            frame, event, arg)
                        if should_stop:
                            self.handle_exception(frame, event, arg)
                            return self.trace_dispatch

                    return self.trace_dispatch
                else:
                    # event == 'call' or event == 'c_XXX'
                    return self.trace_dispatch

            else:
                if is_exception_event:
                    if has_exception_breakpoints:
                        should_stop, frame = self.should_stop_on_exception(
                            frame, event, arg)
                        if should_stop:
                            self.handle_exception(frame, event, arg)
                            return self.trace_dispatch
                    is_line = False
                    is_return = False
                    is_call = False
                else:
                    if event == 'line':
                        is_line = True
                        is_call = False
                        is_return = False
                    else:
                        is_line = False
                        is_return = event == 'return'
                        is_call = event == 'call'

                    if not is_line and not is_return and not is_call:
                        # Unexpected: just keep the same trace func (i.e.: event == 'c_XXX').
                        return self.trace_dispatch

            if is_exception_event:
                breakpoints_for_file = None
            else:
                # If we are in single step mode and something causes us to exit the current frame, we need to make sure we break
                # eventually.  Force the step mode to step into and the step stop frame to None.
                # I.e.: F6 in the end of a function should stop in the next possible position (instead of forcing the user
                # to make a step in or step over at that location).
                # Note: this is especially troublesome when we're skipping code with the
                # @DontTrace comment.
                if stop_frame is frame and is_return and step_cmd in (
                        CMD_STEP_OVER, CMD_STEP_RETURN, CMD_STEP_OVER_MY_CODE,
                        CMD_STEP_RETURN_MY_CODE):
                    if not is_coroutine_or_generator:  # i.e.: not a coroutine
                        if step_cmd in (CMD_STEP_OVER, CMD_STEP_RETURN):
                            info.pydev_step_cmd = CMD_STEP_INTO
                        else:
                            info.pydev_step_cmd = CMD_STEP_INTO_MY_CODE
                        info.pydev_step_stop = None

                breakpoints_for_file = main_debugger.breakpoints.get(filename)

                can_skip = False

                if info.pydev_state == 1:  # STATE_RUN = 1
                    # we can skip if:
                    # - we have no stop marked
                    # - we should make a step return/step over and we're not in the current frame
                    can_skip = step_cmd == -1 or (step_cmd in (
                        CMD_STEP_OVER, CMD_STEP_RETURN, CMD_STEP_OVER_MY_CODE,
                        CMD_STEP_RETURN_MY_CODE) and stop_frame is not frame)

                    if can_skip:
                        if plugin_manager is not None and (
                                main_debugger.has_plugin_line_breaks
                                or main_debugger.has_plugin_exception_breaks):
                            can_skip = plugin_manager.can_skip(
                                main_debugger, frame)

                        if can_skip and main_debugger.show_return_values and info.pydev_step_cmd in (
                                CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE
                        ) and frame.f_back is info.pydev_step_stop:
                            # trace function for showing return values after step over
                            can_skip = False

                # Let's check to see if we are in a function that has a breakpoint. If we don't have a breakpoint,
                # we will return nothing for the next trace
                # also, after we hit a breakpoint and go to some other debugging state, we have to force the set trace anyway,
                # so, that's why the additional checks are there.
                if not breakpoints_for_file:
                    if can_skip:
                        if has_exception_breakpoints:
                            return self.trace_exception
                        else:
                            return None if is_call else NO_FTRACE

                else:
                    # When cached, 0 means we don't have a breakpoint and 1 means we have.
                    if can_skip:
                        breakpoints_in_line_cache = frame_skips_cache.get(
                            line_cache_key, -1)
                        if breakpoints_in_line_cache == 0:
                            return self.trace_dispatch

                    breakpoints_in_frame_cache = frame_skips_cache.get(
                        frame_cache_key, -1)
                    if breakpoints_in_frame_cache != -1:
                        # Gotten from cache.
                        has_breakpoint_in_frame = breakpoints_in_frame_cache == 1

                    else:
                        has_breakpoint_in_frame = False
                        # Checks the breakpoint to see if there is a context match in some function
                        curr_func_name = frame.f_code.co_name

                        # global context is set with an empty name
                        if curr_func_name in ('?', '<module>', '<lambda>'):
                            curr_func_name = ''

                        for breakpoint in dict_iter_values(
                                breakpoints_for_file
                        ):  # jython does not support itervalues()
                            # will match either global or some function
                            if breakpoint.func_name in ('None',
                                                        curr_func_name):
                                has_breakpoint_in_frame = True
                                break

                        # Cache the value (1 or 0 or -1 for default because of cython).
                        if has_breakpoint_in_frame:
                            frame_skips_cache[frame_cache_key] = 1
                        else:
                            frame_skips_cache[frame_cache_key] = 0

                    if can_skip and not has_breakpoint_in_frame:
                        if has_exception_breakpoints:
                            return self.trace_exception
                        else:
                            return None if is_call else NO_FTRACE

            # We may have hit a breakpoint or we are already in step mode. Either way, let's check what we should do in this frame
            # if DEBUG: print('NOT skipped: %s %s %s %s' % (frame.f_lineno, frame.f_code.co_name, event, frame.__class__.__name__))

            try:
                flag = False
                # return is not taken into account for breakpoint hit because we'd have a double-hit in this case
                # (one for the line and the other for the return).

                stop_info = {}
                breakpoint = None
                exist_result = False
                stop = False
                bp_type = None
                if not is_return and info.pydev_state != STATE_SUSPEND and breakpoints_for_file is not None and line in breakpoints_for_file:
                    breakpoint = breakpoints_for_file[line]
                    new_frame = frame
                    stop = True
                    if step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE) and (
                            stop_frame is frame and is_line):
                        stop = False  # we don't stop on breakpoint if we have to stop by step-over (it will be processed later)
                elif plugin_manager is not None and main_debugger.has_plugin_line_breaks:
                    result = plugin_manager.get_breakpoint(
                        main_debugger, self, frame, event, self._args)
                    if result:
                        exist_result = True
                        flag, breakpoint, new_frame, bp_type = result

                if breakpoint:
                    # ok, hit breakpoint, now, we have to discover if it is a conditional breakpoint
                    # lets do the conditional stuff here
                    if stop or exist_result:
                        eval_result = False
                        if breakpoint.has_condition:
                            eval_result = main_debugger.handle_breakpoint_condition(
                                info, breakpoint, new_frame)

                        if breakpoint.expression is not None:
                            main_debugger.handle_breakpoint_expression(
                                breakpoint, info, new_frame)
                            if breakpoint.is_logpoint and info.pydev_message is not None and len(
                                    info.pydev_message) > 0:
                                cmd = main_debugger.cmd_factory.make_io_message(
                                    info.pydev_message + os.linesep, '1')
                                main_debugger.writer.add_command(cmd)

                        if breakpoint.has_condition:
                            if not eval_result:
                                return self.trace_dispatch
                        elif breakpoint.is_logpoint:
                            return self.trace_dispatch

                    if is_call and frame.f_code.co_name in ('<module>',
                                                            '<lambda>'):
                        # If we find a call for a module, it means that the module is being imported/executed for the
                        # first time. In this case we have to ignore this hit as it may later duplicated by a
                        # line event at the same place (so, if there's a module with a print() in the first line
                        # the user will hit that line twice, which is not what we want).
                        #
                        # As for lambda, as it only has a single statement, it's not interesting to trace
                        # its call and later its line event as they're usually in the same line.

                        return self.trace_dispatch

                if main_debugger.show_return_values:
                    if is_return and info.pydev_step_cmd in (
                            CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE
                    ) and frame.f_back == info.pydev_step_stop:
                        self.show_return_values(frame, arg)

                elif main_debugger.remove_return_values_flag:
                    try:
                        self.remove_return_values(main_debugger, frame)
                    finally:
                        main_debugger.remove_return_values_flag = False

                if stop:
                    self.set_suspend(
                        thread,
                        CMD_SET_BREAK,
                        suspend_other_threads=breakpoint
                        and breakpoint.suspend_policy == "ALL",
                    )

                elif flag and plugin_manager is not None:
                    result = plugin_manager.suspend(main_debugger, thread,
                                                    frame, bp_type)
                    if result:
                        frame = result

                # if thread has a suspend flag, we suspend with a busy wait
                if info.pydev_state == STATE_SUSPEND:
                    self.do_wait_suspend(thread, frame, event, arg)
                    return self.trace_dispatch
                else:
                    if not breakpoint and is_line:
                        # No stop from anyone and no breakpoint found in line (cache that).
                        frame_skips_cache[line_cache_key] = 0

            except:
                pydev_log.exception()
                raise

            # step handling. We stop when we hit the right frame
            try:
                should_skip = 0
                if pydevd_dont_trace.should_trace_hook is not None:
                    if self.should_skip == -1:
                        # I.e.: cache the result on self.should_skip (no need to evaluate the same frame multiple times).
                        # Note that on a code reload, we won't re-evaluate this because in practice, the frame.f_code
                        # Which will be handled by this frame is read-only, so, we can cache it safely.
                        if not pydevd_dont_trace.should_trace_hook(
                                frame, filename):
                            # -1, 0, 1 to be Cython-friendly
                            should_skip = self.should_skip = 1
                        else:
                            should_skip = self.should_skip = 0
                    else:
                        should_skip = self.should_skip

                plugin_stop = False
                if should_skip:
                    stop = False

                elif step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE):
                    force_check_project_scope = step_cmd == CMD_STEP_INTO_MY_CODE
                    if is_line:
                        if force_check_project_scope or main_debugger.is_files_filter_enabled:
                            stop = not main_debugger.apply_files_filter(
                                frame, frame.f_code.co_filename,
                                force_check_project_scope)
                        else:
                            stop = True

                    elif is_return and frame.f_back is not None:
                        if main_debugger.get_file_type(
                                frame.f_back) == main_debugger.PYDEV_FILE:
                            stop = False
                        else:
                            if force_check_project_scope or main_debugger.is_files_filter_enabled:
                                stop = not main_debugger.apply_files_filter(
                                    frame.f_back,
                                    frame.f_back.f_code.co_filename,
                                    force_check_project_scope)
                            else:
                                stop = True

                    if plugin_manager is not None:
                        result = plugin_manager.cmd_step_into(
                            main_debugger, frame, event, self._args, stop_info,
                            stop)
                        if result:
                            stop, plugin_stop = result

                elif step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE):
                    # Note: when dealing with a step over my code it's the same as a step over (the
                    # difference is that when we return from a frame in one we go to regular step
                    # into and in the other we go to a step into my code).
                    stop = stop_frame is frame and is_line
                    # Note: don't stop on a return for step over, only for line events
                    # i.e.: don't stop in: (stop_frame is frame.f_back and is_return) as we'd stop twice in that line.

                    if plugin_manager is not None:
                        result = plugin_manager.cmd_step_over(
                            main_debugger, frame, event, self._args, stop_info,
                            stop)
                        if result:
                            stop, plugin_stop = result

                elif step_cmd == CMD_SMART_STEP_INTO:
                    stop = False
                    if info.pydev_smart_step_stop is frame:
                        info.pydev_func_name = '.invalid.'  # Must match the type in cython
                        info.pydev_smart_step_stop = None

                    if is_line or is_exception_event:
                        curr_func_name = frame.f_code.co_name

                        # global context is set with an empty name
                        if curr_func_name in (
                                '?', '<module>') or curr_func_name is None:
                            curr_func_name = ''

                        if curr_func_name == info.pydev_func_name:
                            stop = True

                elif step_cmd in (CMD_STEP_RETURN, CMD_STEP_RETURN_MY_CODE):
                    stop = is_return and stop_frame is frame

                else:
                    stop = False

                if stop and step_cmd != -1 and is_return and IS_PY3K and hasattr(
                        frame, "f_back"):
                    f_code = getattr(frame.f_back, 'f_code', None)
                    if f_code is not None:
                        if main_debugger.get_file_type(
                                frame.f_back) == main_debugger.PYDEV_FILE:
                            stop = False

                if plugin_stop:
                    stopped_on_plugin = plugin_manager.stop(
                        main_debugger, frame, event, self._args, stop_info,
                        arg, step_cmd)
                elif stop:
                    if is_line:
                        self.set_suspend(
                            thread,
                            step_cmd,
                            original_step_cmd=info.pydev_original_step_cmd)
                        self.do_wait_suspend(thread, frame, event, arg)
                    elif is_return:  # return event
                        back = frame.f_back
                        if back is not None:
                            # When we get to the pydevd run function, the debugging has actually finished for the main thread
                            # (note that it can still go on for other threads, but for this one, we just make it finish)
                            # So, just setting it to None should be OK
                            _, back_filename, base = get_abs_path_real_path_and_base_from_frame(
                                back)
                            if (base,
                                    back.f_code.co_name) in (DEBUG_START,
                                                             DEBUG_START_PY3K):
                                back = None

                            elif base == TRACE_PROPERTY:
                                # We dont want to trace the return event of pydevd_traceproperty (custom property for debugging)
                                # if we're in a return, we want it to appear to the user in the previous frame!
                                return None if is_call else NO_FTRACE

                            elif pydevd_dont_trace.should_trace_hook is not None:
                                if not pydevd_dont_trace.should_trace_hook(
                                        back, back_filename):
                                    # In this case, we'll have to skip the previous one because it shouldn't be traced.
                                    # Also, we have to reset the tracing, because if the parent's parent (or some
                                    # other parent) has to be traced and it's not currently, we wouldn't stop where
                                    # we should anymore (so, a step in/over/return may not stop anywhere if no parent is traced).
                                    # Related test: _debugger_case17a.py
                                    main_debugger.set_trace_for_frame_and_parents(
                                        back)
                                    return None if is_call else NO_FTRACE

                        if back is not None:
                            # if we're in a return, we want it to appear to the user in the previous frame!
                            self.set_suspend(
                                thread,
                                step_cmd,
                                original_step_cmd=info.pydev_original_step_cmd)
                            self.do_wait_suspend(thread, back, event, arg)
                        else:
                            # in jython we may not have a back frame
                            info.pydev_step_stop = None
                            info.pydev_original_step_cmd = -1
                            info.pydev_step_cmd = -1
                            info.pydev_state = STATE_RUN

            except KeyboardInterrupt:
                raise
            except:
                try:
                    pydev_log.exception()
                    info.pydev_original_step_cmd = -1
                    info.pydev_step_cmd = -1
                except:
                    return None if is_call else NO_FTRACE

            # if we are quitting, let's stop the tracing
            if not main_debugger.quitting:
                return self.trace_dispatch
            else:
                return None if is_call else NO_FTRACE
        finally:
            info.is_tracing -= 1
Example #49
0
def start_console_server(host, port, interpreter):
    try:
        if port == 0:
            host = ''

        # I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse.
        from _pydev_bundle.pydev_imports import SimpleXMLRPCServer as XMLRPCServer  # @Reimport

        try:
            if IS_PY24:
                server = XMLRPCServer((host, port), logRequests=False)
            else:
                server = XMLRPCServer((host, port), logRequests=False, allow_none=True)

        except:
            sys.stderr.write('Error starting server with host: "%s", port: "%s", client_port: "%s"\n' % (host, port, interpreter.client_port))
            sys.stderr.flush()
            raise

        # Tell UMD the proper default namespace
        _set_globals_function(interpreter.get_namespace)

        server.register_function(interpreter.execLine)
        server.register_function(interpreter.execMultipleLines)
        server.register_function(interpreter.getCompletions)
        server.register_function(interpreter.getFrame)
        server.register_function(interpreter.getVariable)
        server.register_function(interpreter.changeVariable)
        server.register_function(interpreter.getDescription)
        server.register_function(interpreter.close)
        server.register_function(interpreter.interrupt)
        server.register_function(interpreter.handshake)
        server.register_function(interpreter.connectToDebugger)
        server.register_function(interpreter.hello)
        server.register_function(interpreter.getArray)
        server.register_function(interpreter.evaluate)
        server.register_function(interpreter.ShowConsole)
        server.register_function(interpreter.loadFullValue)

        # Functions for GUI main loop integration
        server.register_function(interpreter.enableGui)

        if port == 0:
            (h, port) = server.socket.getsockname()

            print(port)
            print(interpreter.client_port)

        while True:
            try:
                server.serve_forever()
            except:
                # Ugly code to be py2/3 compatible
                # https://sw-brainwy.rhcloud.com/tracker/PyDev/534:
                # Unhandled "interrupted system call" error in the pydevconsol.py
                e = sys.exc_info()[1]
                retry = False
                try:
                    retry = e.args[0] == 4  # errno.EINTR
                except:
                    pass
                if not retry:
                    raise
                    # Otherwise, keep on going
        return server
    except:
        pydev_log.exception()
        # Notify about error to avoid long waiting
        connection_queue = interpreter.get_connect_status_queue()
        if connection_queue is not None:
            connection_queue.put(False)
Example #50
0
    def should_stop_on_exception(self, frame, event, arg):
        # ENDIF

        # main_debugger, _filename, info, _thread = self._args
        main_debugger = self._args[0]
        info = self._args[2]
        should_stop = False

        # STATE_SUSPEND = 2
        if info.pydev_state != 2:  # and breakpoint is not None:
            exception, value, trace = arg

            if trace is not None and hasattr(trace, 'tb_next'):
                # on jython trace is None on the first event and it may not have a tb_next.

                should_stop = False
                exception_breakpoint = None
                try:
                    if main_debugger.plugin is not None:
                        result = main_debugger.plugin.exception_break(
                            main_debugger, self, frame, self._args, arg)
                        if result:
                            should_stop, frame = result
                except:
                    pydev_log.exception()

                if not should_stop:
                    # It was not handled by any plugin, lets check exception breakpoints.
                    exception_breakpoint = main_debugger.get_exception_breakpoint(
                        exception, main_debugger.break_on_caught_exceptions)

                    if exception_breakpoint is not None:
                        if exception is SystemExit and main_debugger.ignore_system_exit_code(
                                value):
                            return False, frame

                        if exception_breakpoint.condition is not None:
                            eval_result = main_debugger.handle_breakpoint_condition(
                                info, exception_breakpoint, frame)
                            if not eval_result:
                                return False, frame

                        if main_debugger.exclude_exception_by_filter(
                                exception_breakpoint, trace, False):
                            pydev_log.debug(
                                "Ignore exception %s in library %s -- (%s)" %
                                (exception, frame.f_code.co_filename,
                                 frame.f_code.co_name))
                            return False, frame

                        if ignore_exception_trace(trace):
                            return False, frame

                        was_just_raised = just_raised(trace)
                        if was_just_raised:

                            if main_debugger.skip_on_exceptions_thrown_in_same_context:
                                # Option: Don't break if an exception is caught in the same function from which it is thrown
                                return False, frame

                        if exception_breakpoint.notify_on_first_raise_only:
                            if main_debugger.skip_on_exceptions_thrown_in_same_context:
                                # In this case we never stop if it was just raised, so, to know if it was the first we
                                # need to check if we're in the 2nd method.
                                if not was_just_raised and not just_raised(
                                        trace.tb_next):
                                    return False, frame  # I.e.: we stop only when we're at the caller of a method that throws an exception

                            else:
                                if not was_just_raised:
                                    return False, frame  # I.e.: we stop only when it was just raised

                        # If it got here we should stop.
                        should_stop = True
                        try:
                            info.pydev_message = exception_breakpoint.qname
                        except:
                            info.pydev_message = exception_breakpoint.qname.encode(
                                'utf-8')

                if should_stop:
                    # Always add exception to frame (must remove later after we proceed).
                    add_exception_to_frame(frame, (exception, value, trace))

                    if exception_breakpoint is not None and exception_breakpoint.expression is not None:
                        main_debugger.handle_breakpoint_expression(
                            exception_breakpoint, info, frame)

        return should_stop, frame
Example #51
0
def before_after_each_function(request):
    global _global_collect_info
    import psutil
    current_pids = set(proc.pid for proc in psutil.process_iter())
    before_curr_proc_memory_info = psutil.Process().memory_info()

    if _global_collect_info and DEBUG_MEMORY_INFO:
        try:
            from pympler import summary, muppy
            sum1 = summary.summarize(muppy.get_objects())
        except:
            pydev_log.exception()

    sys.stdout.write(
'''
===============================================================================
Memory before: %s
%s
===============================================================================
''' % (request.function, format_memory_info(psutil.virtual_memory(), before_curr_proc_memory_info)))
    yield

    processes_info = []
    for proc in psutil.process_iter():
        if proc.pid not in current_pids:
            try:
                processes_info.append(
                    'New Process: %s(%s) - %s' % (
                        proc.name(),
                        proc.pid,
                        format_process_memory_info(proc.memory_info())
                    )
                )
            except psutil.NoSuchProcess:
                pass  # The process could've died in the meanwhile

    after_curr_proc_memory_info = psutil.Process().memory_info()

    if DEBUG_MEMORY_INFO:
        try:
            if after_curr_proc_memory_info.rss - before_curr_proc_memory_info.rss > 10 * 1000 * 1000:
                # 10 MB leak
                if _global_collect_info:
                    sum2 = summary.summarize(muppy.get_objects())
                    diff = summary.get_diff(sum1, sum2)
                    sys.stdout.write('===============================================================================\n')
                    sys.stdout.write('Leak info:\n')
                    sys.stdout.write('===============================================================================\n')
                    summary.print_(diff)
                    sys.stdout.write('===============================================================================\n')

                _global_collect_info = True
                # We'll only really collect the info on the next test (i.e.: if at one test
                # we used too much memory, the next one will start collecting)
            else:
                _global_collect_info = False
        except:
            pydev_log.exception()

    sys.stdout.write(
'''
===============================================================================
Memory after: %s
%s%s
===============================================================================


''' % (
    request.function,
    format_memory_info(psutil.virtual_memory(), after_curr_proc_memory_info),
    '' if not processes_info else '\nLeaked processes:\n' + '\n'.join(processes_info)),
    )
Example #52
0
    def apply(self):
        mod = self.mod
        self._on_finish_callbacks = []
        try:
            # Get the module name, e.g. 'foo.bar.whatever'
            modname = mod.__name__
            # Get the module namespace (dict) early; this is part of the type check
            modns = mod.__dict__
            # Parse it into package name and module name, e.g. 'foo.bar' and 'whatever'
            i = modname.rfind(".")
            if i >= 0:
                pkgname, modname = modname[:i], modname[i + 1:]
            else:
                pkgname = None
            # Compute the search path
            if pkgname:
                # We're not reloading the package, only the module in it
                pkg = sys.modules[pkgname]
                path = pkg.__path__  # Search inside the package
            else:
                # Search the top-level module path
                pkg = None
                path = None  # Make find_module() uses the default search path
            # Find the module; may raise ImportError
            (stream, filename, (suffix, mode, kind)) = imp.find_module(modname, path)
            # Turn it into a code object
            try:
                # Is it Python source code or byte code read from a file?
                if kind not in (imp.PY_COMPILED, imp.PY_SOURCE):
                    # Fall back to built-in reload()
                    notify_error('Could not find source to reload (mod: %s)' % (modname,))
                    return
                if kind == imp.PY_SOURCE:
                    source = stream.read()
                    code = compile(source, filename, "exec")
                else:
                    import marshal
                    code = marshal.load(stream)
            finally:
                if stream:
                    stream.close()
            # Execute the code.  We copy the module dict to a temporary; then
            # clear the module dict; then execute the new code in the module
            # dict; then swap things back and around.  This trick (due to
            # Glyph Lefkowitz) ensures that the (readonly) __globals__
            # attribute of methods and functions is set to the correct dict
            # object.
            new_namespace = modns.copy()
            new_namespace.clear()
            new_namespace["__name__"] = modns["__name__"]
            Exec(code, new_namespace)
            # Now we get to the hard part
            oldnames = set(modns)
            newnames = set(new_namespace)

            # Create new tokens (note: not deleting existing)
            for name in newnames - oldnames:
                notify_info0('Added:', name, 'to namespace')
                self.found_change = True
                modns[name] = new_namespace[name]

            # Update in-place what we can
            for name in oldnames & newnames:
                self._update(modns, name, modns[name], new_namespace[name])

            self._handle_namespace(modns)

            for c in self._on_finish_callbacks:
                c()
            del self._on_finish_callbacks[:]
        except:
            pydev_log.exception()
Example #53
0
    def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False):
        """Update oldobj, if possible in place, with newobj.

        If oldobj is immutable, this simply returns newobj.

        Args:
          oldobj: the object to be updated
          newobj: the object used as the source for the update
        """
        try:
            notify_info2('Updating: ', oldobj)
            if oldobj is newobj:
                # Probably something imported
                return

            if type(oldobj) is not type(newobj):
                # Cop-out: if the type changed, give up
                if name not in ('__builtins__',):
                    notify_error('Type of: %s (old: %s != new: %s) changed... Skipping.' % (name, type(oldobj), type(newobj)))
                return

            if isinstance(newobj, types.FunctionType):
                self._update_function(oldobj, newobj)
                return

            if isinstance(newobj, types.MethodType):
                self._update_method(oldobj, newobj)
                return

            if isinstance(newobj, classmethod):
                self._update_classmethod(oldobj, newobj)
                return

            if isinstance(newobj, staticmethod):
                self._update_staticmethod(oldobj, newobj)
                return

            if hasattr(types, 'ClassType'):
                classtype = (types.ClassType, type)  # object is not instance of types.ClassType.
            else:
                classtype = type

            if isinstance(newobj, classtype):
                self._update_class(oldobj, newobj)
                return

            # New: dealing with metaclasses.
            if hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and newobj.__metaclass__ == newobj.__class__:
                self._update_class(oldobj, newobj)
                return

            if namespace is not None:
                # Check for the `__xreload_old_new__` protocol (don't even compare things
                # as even doing a comparison may break things -- see: https://github.com/microsoft/debugpy/issues/615).
                xreload_old_new = None
                if is_class_namespace:
                    xreload_old_new = getattr(namespace, '__xreload_old_new__', None)
                    if xreload_old_new is not None:
                        self.found_change = True
                        xreload_old_new(name, oldobj, newobj)

                elif '__xreload_old_new__' in namespace:
                    xreload_old_new = namespace['__xreload_old_new__']
                    xreload_old_new(namespace, name, oldobj, newobj)
                    self.found_change = True

                # Too much information to the user...
                # else:
                #     notify_info0('%s NOT updated. Create __xreload_old_new__(name, old, new) for custom reload' % (name,))

        except:
            notify_error('Exception found when updating %s. Proceeding for other items.' % (name,))
            pydev_log.exception()
Example #54
0
def _separate_future_imports(code):
    '''
    :param code:
        The code from where we want to get the __future__ imports (note that it's possible that
        there's no such entry).

    :return tuple(str, str):
        The return is a tuple(future_import, code).

        If the future import is not available a return such as ('', code) is given, otherwise, the
        future import will end with a ';' (so that it can be put right before the pydevd attach
        code).
    '''
    try:
        node = ast.parse(code, '<string>', 'exec')
        visitor = _LastFutureImportFinder()
        visitor.visit(node)

        if visitor.last_future_import_found is None:
            return '', code

        node = visitor.last_future_import_found
        offset = -1
        if hasattr(node, 'end_lineno') and hasattr(node, 'end_col_offset'):
            # Python 3.8 onwards has these (so, use when possible).
            line, col = node.end_lineno, node.end_col_offset
            offset = _get_offset_from_line_col(code, line - 1, col)  # ast lines are 1-based, make it 0-based.

        else:
            # end line/col not available, let's just find the offset and then search
            # for the alias from there.
            line, col = node.lineno, node.col_offset
            offset = _get_offset_from_line_col(code, line - 1, col)  # ast lines are 1-based, make it 0-based.
            if offset >= 0 and node.names:
                from_future_import_name = node.names[-1].name
                i = code.find(from_future_import_name, offset)
                if i < 0:
                    offset = -1
                else:
                    offset = i + len(from_future_import_name)

        if offset >= 0:
            for i in range(offset, len(code)):
                if code[i] in (' ', '\t', ';', ')', '\n'):
                    offset += 1
                else:
                    break

            future_import = code[:offset]
            code_remainder = code[offset:]

            # Now, put '\n' lines back into the code remainder (we had to search for
            # `\n)`, but in case we just got the `\n`, it should be at the remainder,
            # not at the future import.
            while future_import.endswith('\n'):
                future_import = future_import[:-1]
                code_remainder = '\n' + code_remainder

            if not future_import.endswith(';'):
                future_import += ';'
            return future_import, code_remainder

        # This shouldn't happen...
        pydev_log.info('Unable to find line %s in code:\n%r', line, code)
        return '', code

    except:
        pydev_log.exception('Error getting from __future__ imports from: %r', code)
        return '', code
try:
    rPath = os.path.realpath  # @UndefinedVariable
except:
    # jython does not support os.path.realpath
    # realpath is a no-op on systems without islink support
    rPath = os.path.abspath

# defined as a list of tuples where the 1st element of the tuple is the path in the client machine
# and the 2nd element is the path in the server machine.
# see module docstring for more details.
try:
    PATHS_FROM_ECLIPSE_TO_PYTHON = json.loads(os.environ.get('PATHS_FROM_ECLIPSE_TO_PYTHON', '[]'))
except Exception:
    sys.stderr.write('Error loading PATHS_FROM_ECLIPSE_TO_PYTHON from environment variable.\n')
    pydev_log.exception()
    PATHS_FROM_ECLIPSE_TO_PYTHON = []
else:
    if not isinstance(PATHS_FROM_ECLIPSE_TO_PYTHON, list):
        sys.stderr.write('Expected PATHS_FROM_ECLIPSE_TO_PYTHON loaded from environment variable to be a list.\n')
        PATHS_FROM_ECLIPSE_TO_PYTHON = []
    else:
        # Converting json lists to tuple
        PATHS_FROM_ECLIPSE_TO_PYTHON = [tuple(x) for x in PATHS_FROM_ECLIPSE_TO_PYTHON]

# example:
# PATHS_FROM_ECLIPSE_TO_PYTHON = [
#  (r'd:\temp\temp_workspace_2\test_python\src\yyy\yyy',
#   r'd:\temp\temp_workspace_2\test_python\src\hhh\xxx')
# ]
Example #56
0
 def on_exception(msg):
     from _pydev_bundle import pydev_log
     pydev_log.exception(msg)
Example #57
0
def patch_args(args, is_exec=False):
    '''
    :param list args:
        Arguments to patch.

    :param bool is_exec:
        If it's an exec, the current process will be replaced (this means we have
        to keep the same ppid).
    '''
    try:
        pydev_log.debug("Patching args: %s", args)
        original_args = args
        try:
            unquoted_args = remove_quotes_from_args(args)
        except InvalidTypeInArgsException as e:
            pydev_log.info('Unable to monkey-patch subprocess arguments because a type found in the args is invalid: %s', e)
            return original_args

        # Internally we should reference original_args (if we want to return them) or unquoted_args
        # to add to the list which will be then quoted in the end.
        del args

        from pydevd import SetupHolder
        if not unquoted_args:
            return original_args

        if not is_python(unquoted_args[0]):
            pydev_log.debug("Process is not python, returning.")
            return original_args

        # Note: we create a copy as string to help with analyzing the arguments, but
        # the final list should have items from the unquoted_args as they were initially.
        args_as_str = _get_str_type_compatible('', unquoted_args)

        params_with_value_in_separate_arg = (
            '--check-hash-based-pycs',
            '--jit'  # pypy option
        )

        # All short switches may be combined together. The ones below require a value and the
        # value itself may be embedded in the arg.
        #
        # i.e.: Python accepts things as:
        #
        # python -OQold -qmtest
        #
        # Which is the same as:
        #
        # python -O -Q old -q -m test
        #
        # or even:
        #
        # python -OQold "-vcimport sys;print(sys)"
        #
        # Which is the same as:
        #
        # python -O -Q old -v -c "import sys;print(sys)"

        params_with_combinable_arg = set(('W', 'X', 'Q', 'c', 'm'))

        module_name = None
        before_module_flag = ''
        module_name_i_start = -1
        module_name_i_end = -1

        code = None
        code_i = -1
        code_i_end = -1
        code_flag = ''

        filename = None
        filename_i = -1

        ignore_next = True  # start ignoring the first (the first entry is the python executable)
        for i, arg_as_str in enumerate(args_as_str):
            if ignore_next:
                ignore_next = False
                continue

            if arg_as_str.startswith('-'):
                if arg_as_str == '-':
                    # Contents will be read from the stdin. This is not currently handled.
                    pydev_log.debug('Unable to fix arguments to attach debugger on subprocess when reading from stdin ("python ... -").')
                    return original_args

                if arg_as_str.startswith(params_with_value_in_separate_arg):
                    if arg_as_str in params_with_value_in_separate_arg:
                        ignore_next = True
                    continue

                break_out = False
                for j, c in enumerate(arg_as_str):

                    # i.e.: Python supports -X faulthandler as well as -Xfaulthandler
                    # (in one case we have to ignore the next and in the other we don't
                    # have to ignore it).
                    if c in params_with_combinable_arg:
                        remainder = arg_as_str[j + 1:]
                        if not remainder:
                            ignore_next = True

                        if c == 'm':
                            # i.e.: Something as
                            # python -qm test
                            # python -m test
                            # python -qmtest
                            before_module_flag = arg_as_str[:j]  # before_module_flag would then be "-q"
                            if before_module_flag == '-':
                                before_module_flag = ''
                            module_name_i_start = i
                            if not remainder:
                                module_name = unquoted_args[i + 1]
                                module_name_i_end = i + 1
                            else:
                                # i.e.: python -qmtest should provide 'test' as the module_name
                                module_name = unquoted_args[i][j + 1:]
                                module_name_i_end = module_name_i_start
                            break_out = True
                            break

                        elif c == 'c':
                            # i.e.: Something as
                            # python -qc "import sys"
                            # python -c "import sys"
                            # python "-qcimport sys"
                            code_flag = arg_as_str[:j + 1]  # code_flag would then be "-qc"

                            if not remainder:
                                # arg_as_str is something as "-qc", "import sys"
                                code = unquoted_args[i + 1]
                                code_i_end = i + 2
                            else:
                                # if arg_as_str is something as "-qcimport sys"
                                code = remainder  # code would be "import sys"
                                code_i_end = i + 1
                            code_i = i
                            break_out = True
                            break

                        else:
                            break

                if break_out:
                    break

            else:
                # It doesn't start with '-' and we didn't ignore this entry:
                # this means that this is the file to be executed.
                filename = unquoted_args[i]
                filename_i = i

                # When executing .zip applications, don't attach the debugger.
                extensions = _get_str_type_compatible(filename, ['.zip', '.pyz', '.pyzw'])
                for ext in extensions:
                    if filename.endswith(ext):
                        pydev_log.debug('Executing a PyZip (debugger will not be attached to subprocess).')
                        return original_args

                if _is_managed_arg(filename):  # no need to add pydevd twice
                    pydev_log.debug('Skipped monkey-patching as pydevd.py is in args already.')
                    return original_args

                break
        else:
            # We didn't find the filename (something is unexpected).
            pydev_log.debug('Unable to fix arguments to attach debugger on subprocess (filename not found).')
            return original_args

        if code_i != -1:
            host, port = _get_host_port()

            if port is not None:
                new_args = []
                new_args.extend(unquoted_args[:code_i])
                new_args.append(code_flag)
                new_args.append(_get_python_c_args(host, port, code, unquoted_args, SetupHolder.setup))
                new_args.extend(unquoted_args[code_i_end:])

                return quote_args(new_args)

        first_non_vm_index = max(filename_i, module_name_i_start)
        if first_non_vm_index == -1:
            pydev_log.debug('Unable to fix arguments to attach debugger on subprocess (could not resolve filename nor module name).')
            return original_args

        # Original args should be something as:
        # ['X:\\pysrc\\pydevd.py', '--multiprocess', '--print-in-debugger-startup',
        #  '--vm_type', 'python', '--client', '127.0.0.1', '--port', '56352', '--file', 'x:\\snippet1.py']
        from _pydevd_bundle.pydevd_command_line_handling import setup_to_argv
        new_args = []
        new_args.extend(unquoted_args[:first_non_vm_index])
        if before_module_flag:
            new_args.append(before_module_flag)

        add_module_at = len(new_args) + 1

        new_args.extend(setup_to_argv(
            _get_setup_updated_with_protocol_and_ppid(SetupHolder.setup, is_exec=is_exec)
        ))
        new_args.append('--file')

        if module_name is not None:
            assert module_name_i_start != -1
            assert module_name_i_end != -1
            # Always after 'pydevd' (i.e.: pydevd "--module" --multiprocess ...)
            new_args.insert(add_module_at, '--module')
            new_args.append(module_name)
            new_args.extend(unquoted_args[module_name_i_end + 1:])

        elif filename is not None:
            assert filename_i != -1
            new_args.append(filename)
            new_args.extend(unquoted_args[filename_i + 1:])

        else:
            raise AssertionError('Internal error (unexpected condition)')

        return quote_args(new_args)
    except:
        pydev_log.exception('Error patching args (debugger not attached to subprocess).')
        return original_args
Example #58
0
def patch_args(args):
    try:
        pydev_log.debug("Patching args: %s", args)
        args = remove_quotes_from_args(args)

        from pydevd import SetupHolder
        new_args = []
        if len(args) == 0:
            return args

        if is_python(args[0]):
            ind_c = get_c_option_index(args)

            if ind_c != -1:
                host, port = _get_host_port()

                if port is not None:
                    new_args.extend(args)
                    new_args[ind_c + 1] = _get_python_c_args(
                        host, port, ind_c, args, SetupHolder.setup)
                    return quote_args(new_args)
            else:
                # Check for Python ZIP Applications and don't patch the args for them.
                # Assumes the first non `-<flag>` argument is what we need to check.
                # There's probably a better way to determine this but it works for most cases.
                continue_next = False
                for i in range(1, len(args)):
                    if continue_next:
                        continue_next = False
                        continue

                    arg = args[i]
                    if arg.startswith(_get_str_type_compatible(arg, '-')):
                        # Skip the next arg too if this flag expects a value.
                        continue_next = arg in _get_str_type_compatible(
                            arg, ['-m', '-W', '-X'])
                        continue

                    dot = _get_str_type_compatible(arg, '.')
                    extensions = _get_str_type_compatible(
                        arg, ['zip', 'pyz', 'pyzw'])
                    if arg.rsplit(dot)[-1] in extensions:
                        pydev_log.debug('Executing a PyZip, returning')
                        return args
                    break

                new_args.append(args[0])
        else:
            pydev_log.debug("Process is not python, returning.")
            return args

        i = 1
        # Original args should be something as:
        # ['X:\\pysrc\\pydevd.py', '--multiprocess', '--print-in-debugger-startup',
        #  '--vm_type', 'python', '--client', '127.0.0.1', '--port', '56352', '--file', 'x:\\snippet1.py']
        from _pydevd_bundle.pydevd_command_line_handling import setup_to_argv
        original = setup_to_argv(
            _get_setup_updated_with_protocol(SetupHolder.setup)) + ['--file']

        module_name = None
        m_flag = _get_str_type_compatible(args[i], '-m')
        while i < len(args):
            if args[i] == m_flag:
                # Always insert at pos == 1 (i.e.: pydevd "--module" --multiprocess ...)
                original.insert(1, '--module')
            elif args[i].startswith(m_flag):
                # Case where the user does: python -mmodule_name (using a single parameter).
                original.insert(1, '--module')
                module_name = args[i][2:]
            else:
                if args[i].startswith(_get_str_type_compatible(args[i], '-')):
                    new_args.append(args[i])
                else:
                    break
            i += 1

        # Note: undoing https://github.com/Elizaveta239/PyDev.Debugger/commit/053c9d6b1b455530bca267e7419a9f63bf51cddf
        # (i >= len(args) instead of i < len(args))
        # in practice it'd raise an exception here and would return original args, which is not what we want... providing
        # a proper fix for https://youtrack.jetbrains.com/issue/PY-9767 elsewhere.
        if i < len(args) and _is_managed_arg(
                args[i]):  # no need to add pydevd twice
            return args

        for x in original:
            new_args.append(x)
            if x == _get_str_type_compatible(x, '--file'):
                break

        if module_name is not None:
            new_args.append(module_name)

        while i < len(args):
            new_args.append(args[i])
            i += 1

        return quote_args(new_args)
    except:
        pydev_log.exception('Error patching args')
        return args
Example #59
0
def _schedule_callback(prev, next):
    '''
    Called when a context is stopped or a new context is made runnable.
    '''
    try:
        if not prev and not next:
            return

        current_frame = sys._getframe()

        if next:
            register_tasklet_info(next)

            # Ok, making next runnable: set the tracing facility in it.
            debugger = get_global_debugger()
            if debugger is not None:
                next.trace_function = debugger.get_thread_local_trace_func()
                frame = next.frame
                if frame is current_frame:
                    frame = frame.f_back
                if hasattr(frame, 'f_trace'):  # Note: can be None (but hasattr should cover for that too).
                    frame.f_trace = debugger.get_thread_local_trace_func()

            debugger = None

        if prev:
            register_tasklet_info(prev)

        try:
            for tasklet_ref, tasklet_info in dict_items(_weak_tasklet_registered_to_info):  # Make sure it's a copy!
                tasklet = tasklet_ref()
                if tasklet is None or not tasklet.alive:
                    # Garbage-collected already!
                    try:
                        del _weak_tasklet_registered_to_info[tasklet_ref]
                    except KeyError:
                        pass
                    if tasklet_info.frame_id is not None:
                        remove_custom_frame(tasklet_info.frame_id)
                else:
                    is_running = stackless.get_thread_info(tasklet.thread_id)[1] is tasklet
                    if tasklet is prev or (tasklet is not next and not is_running):
                        # the tasklet won't run after this scheduler action:
                        # - the tasklet is the previous tasklet
                        # - it is not the next tasklet and it is not an already running tasklet
                        frame = tasklet.frame
                        if frame is current_frame:
                            frame = frame.f_back
                        if frame is not None:
                            abs_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
                            # print >>sys.stderr, "SchedCB: %r, %d, '%s', '%s'" % (tasklet, frame.f_lineno, _filename, base)
                            if debugger.get_file_type(abs_real_path_and_base) is None:
                                tasklet_info.update_name()
                                if tasklet_info.frame_id is None:
                                    tasklet_info.frame_id = add_custom_frame(frame, tasklet_info.tasklet_name, tasklet.thread_id)
                                else:
                                    update_custom_frame(tasklet_info.frame_id, frame, tasklet.thread_id, name=tasklet_info.tasklet_name)

                    elif tasklet is next or is_running:
                        if tasklet_info.frame_id is not None:
                            # Remove info about stackless suspended when it starts to run.
                            remove_custom_frame(tasklet_info.frame_id)
                            tasklet_info.frame_id = None

        finally:
            tasklet = None
            tasklet_info = None
            frame = None

    except:
        pydev_log.exception()

    if _application_set_schedule_callback is not None:
        return _application_set_schedule_callback(prev, next)