Ejemplo n.º 1
0
 def _assertCompletionsAre(self, markedup_content, buf, trg, completions,
                           lang, implicit):
     if trg is None:
         self.fail("given position is not a %s trigger point, "
                   "expected completions to be %r:\n%s"
                   % (lang, completions, indent(markedup_content)))
     if isinstance(buf, CitadelBuffer):
         buf.unload()  # remove any entry from CIDB to ensure clean test
     ctlr = _CaptureEvalController()
     actual_completions = buf.cplns_from_trg(trg, ctlr=ctlr)
     self.assertEqual(completions, actual_completions,
         "unexpected %s completions at the given position\n"
         "  expected: %r\n"
         "  got:      %r\n"
         "  extra:    %r\n"
         "  missing:  %r\n"
         "  eval log\n%s\n"
         "  buffer:\n%s"
         % (lang, completions,
            actual_completions,
            list(set(actual_completions or []).difference(
                completions or [])),
            list(set(completions or []).difference(
                actual_completions or [])),
            indent('\n'.join('%5s: %s' % (lvl, m) for lvl, m in ctlr.log)),
            indent(markedup_content)))
Ejemplo n.º 2
0
    def assertNoDuplicateDefns2(self, buf, pos):
        markedup_content = markup_text(buf.accessor.text, pos=pos)
        ctlr = _CaptureEvalController()
        trg = buf.defn_trg_from_pos(pos)
        actual_defns = buf.defns_from_trg(trg, ctlr=ctlr)
        if not actual_defns:
            self.fail("%s trigger resulted in no definitions when expecting "
                      "to check for duplicate definitions:\n%s"
                      % (buf.lang, indent(markedup_content)))

        count_from_defn_repr = {}
        for defn_repr in (repr(d) for d in actual_defns):
            if defn_repr not in count_from_defn_repr:
                count_from_defn_repr[defn_repr] = 0
            count_from_defn_repr[defn_repr] += 1
        defn_dupes = [(count, defn_repr)
                      for defn_repr, count in count_from_defn_repr.items()
                      if count > 1]
        self.failIf(defn_dupes,
                    "unexpectedly got duplicate completions at the given position\n"
                    "  duplicates:\n%s\n"
                    "  eval log\n%s\n"
                    "  buffer:\n%s"
                    % (indent('\n'.join('%d of %s' % d for d in defn_dupes)),
                       indent('\n'.join('%5s: %s' % (
                       lvl, m) for lvl, m in ctlr.log)),
                       indent(markedup_content)))
Ejemplo n.º 3
0
    def assertNoDuplicateCompletions(self, markedup_content, lang=None,
                                     implicit=True, env=None):
        if lang is None:
            lang = self.lang
        buf, trg = self._get_buf_and_trg(markedup_content, lang,
                                         implicit=implicit, env=env)
        if trg is None:
            self.fail("given position is not a %s trigger point, "
                      "expected there to be completions:\n%s"
                      % (lang, indent(markedup_content)))

        if isinstance(buf, CitadelBuffer):
            buf.unload()  # remove any entry from CIDB to ensure clean test
        ctlr = _CaptureEvalController()
        actual_completions = buf.cplns_from_trg(trg, ctlr=ctlr)
        if actual_completions is None:
            self.fail("%s trigger resulted in no completions when expecting "
                      "to check for duplicate completions:\n%s"
                      % (lang, indent(markedup_content)))

        count_from_cpln = {}
        for cpln in actual_completions:
            if cpln not in count_from_cpln:
                count_from_cpln[cpln] = 0
            count_from_cpln[cpln] += 1
        cpln_dupes = [(count, cpln) for cpln, count in count_from_cpln.items()
                      if count > 1]
        self.failIf(cpln_dupes,
            "unexpectedly got duplicate completions at the given position\n"
            "  duplicates:\n%s\n"
            "  eval log\n%s\n"
            "  buffer:\n%s"
            % (indent('\n'.join('%d of %r' % d for d in cpln_dupes)),
               indent('\n'.join('%5s: %s' % (lvl, m) for lvl, m in ctlr.log)),
               indent(markedup_content)))
Ejemplo n.º 4
0
 def _assertDefnMatches(self, buf, pos, lang=None, **fields):
     ctlr = _CaptureEvalController()
     trg = buf.defn_trg_from_pos(pos)
     defns = buf.defns_from_trg(trg, ctlr=ctlr)
     if not defns:
         self.fail(
             "unexpectedly did not find a definition in %r at pos %d\n"
             "  eval log\n%s\n"
             "  buffer:\n%s" % (buf, pos,
                                indent('\n'.join('%5s: %s' % (lvl, m)
                                                 for lvl, m in ctlr.log)),
                                indent(buf.accessor.text)))
     if "pos" in fields:
         fields["pos"] = self.adjust_pos(fields["pos"])
     defn = defns[0]
     for name, value in fields.items():
         try:
             actual_value = getattr(defn, name)
         except AttributeError:
             actual_value = None
         self.assertEqual(
             actual_value, value,
             "%s definition, unexpected value for field %r\n"
             "  defn:     %r\n"
             "  expected: %r\n"
             "  got:      %r\n"
             "  eval log\n%s\n"
             "  buffer:\n%s" % (buf.lang, name, defn, value, actual_value,
                                indent('\n'.join('%5s: %s' % (lvl, m)
                                                 for lvl, m in ctlr.log)),
                                indent(buf.accessor.text)))
Ejemplo n.º 5
0
 def _assertDefnMatches(self, buf, pos, lang=None, **fields):
     ctlr = _CaptureEvalController()
     trg = buf.defn_trg_from_pos(pos)
     defns = buf.defns_from_trg(trg, ctlr=ctlr)
     if not defns:
         self.fail("unexpectedly did not find a definition in %r at pos %d\n"
                   "  eval log\n%s\n"
                   "  buffer:\n%s"
                   % (buf, pos,
                      indent('\n'.join('%5s: %s' % (
                                       lvl, m) for lvl, m in ctlr.log)),
                      indent(buf.accessor.text)))
     if "pos" in fields:
         fields["pos"] = self.adjust_pos(fields["pos"])
     defn = defns[0]
     for name, value in fields.items():
         try:
             actual_value = getattr(defn, name)
         except AttributeError:
             actual_value = None
         self.assertEqual(actual_value, value,
                          "%s definition, unexpected value for field %r\n"
                          "  defn:     %r\n"
                          "  expected: %r\n"
                          "  got:      %r\n"
                          "  eval log\n%s\n"
                          "  buffer:\n%s"
                          % (buf.lang, name, defn, value, actual_value,
                             indent('\n'.join('%5s: %s' % (
                                              lvl, m) for lvl, m in ctlr.log)),
                             indent(buf.accessor.text)))
Ejemplo n.º 6
0
    def assertDefnIncludes(self, buf, pos, lang=None, **fields):
        """Check that a definition is found within one of the results for a
        given position.
        Note that this is not very useful; we normally only use the first
        definition.  It can however be used to ensure we don't regress things
        before they get fixed correctly.
        """
        ctlr = _CaptureEvalController()
        trg = buf.defn_trg_from_pos(pos)
        defns = buf.defns_from_trg(trg, ctlr=ctlr)
        if not defns:
            self.fail("unexpectedly did not find a definition in %r at pos %d\n"
                      "  eval log\n%s\n"
                      "  buffer:\n%s"
                      % (buf, pos,
                         indent('\n'.join('%5s: %s' % (
                                          lvl, m) for lvl, m in ctlr.log)),
                         indent(buf.accessor.text)))
        if "pos" in fields:
            fields["pos"] = self.adjust_pos(fields["pos"])

        # Copy defns over to filter for keys we care about
        filtered_defns = []
        for defn in defns:
            filtered_defn = dict((key, getattr(defn, key, None))
                                 for key in fields.keys())
            filtered_defns.append(filtered_defn)
        self.assertIn(fields, filtered_defns)
Ejemplo n.º 7
0
    def assertNoDuplicateDefns2(self, buf, pos):
        markedup_content = markup_text(buf.accessor.text, pos=pos)
        ctlr = _CaptureEvalController()
        trg = buf.defn_trg_from_pos(pos)
        actual_defns = buf.defns_from_trg(trg, ctlr=ctlr)
        if not actual_defns:
            self.fail("%s trigger resulted in no definitions when expecting "
                      "to check for duplicate definitions:\n%s" %
                      (buf.lang, indent(markedup_content)))

        count_from_defn_repr = {}
        for defn_repr in (repr(d) for d in actual_defns):
            if defn_repr not in count_from_defn_repr:
                count_from_defn_repr[defn_repr] = 0
            count_from_defn_repr[defn_repr] += 1
        defn_dupes = [(count, defn_repr)
                      for defn_repr, count in count_from_defn_repr.items()
                      if count > 1]
        self.failIf(
            defn_dupes,
            "unexpectedly got duplicate completions at the given position\n"
            "  duplicates:\n%s\n"
            "  eval log\n%s\n"
            "  buffer:\n%s" % (indent('\n'.join('%d of %s' % d
                                                for d in defn_dupes)),
                               indent('\n'.join('%5s: %s' % (lvl, m)
                                                for lvl, m in ctlr.log)),
                               indent(markedup_content)))
Ejemplo n.º 8
0
 def assertTriggerMatches(self,
                          markedup_content,
                          lang=None,
                          implicit=True,
                          env=None,
                          **fields):
     if lang is None:
         lang = self.lang
     buf, trg = self._get_buf_and_trg(markedup_content,
                                      lang,
                                      implicit=implicit,
                                      env=env)
     if trg is None:
         self.fail("unexpectedly did not find a %s trigger, buffer:\n%s" %
                   (lang, indent(markedup_content)))
     if "pos" in fields:
         fields["pos"] = self.adjust_pos(fields["pos"])
     for name, value in fields.items():
         try:
             actual_value = getattr(trg, name)
         except AttributeError:
             actual_value = trg.extra[name]
         self.assertEqual(
             actual_value, value,
             "unexpected %s trigger '%s' value: expected %r, "
             "got %r, buffer:\n%s" %
             (lang, name, value, actual_value, indent(markedup_content)))
Ejemplo n.º 9
0
    def assertEvalError(self, markedup_content, log_pattern, lang=None,
                        implicit=True, env=None):
        if lang is None:
            lang = self.lang

        buf, trg = self._get_buf_and_trg(markedup_content, lang,
                                         implicit=implicit, env=env)
        if trg is None:
            self.fail("given position is not a %s trigger point, "
                      "no completion can be done to see if errors"
                      % self.lang)
        if isinstance(buf, CitadelBuffer):
            buf.unload()  # remove any entry from CIDB to ensure clean test

        class TestEvalController(EvalController):
            """A completion controller that captures all eval logging."""
            def __init__(self):
                EvalController.__init__(self)
                self.log = []

            def debug(self, msg, *args):
                self.log.append(("debug", msg % args))

            def info(self, msg, *args):
                self.log.append(("info", msg % args))

            def warn(self, msg, *args):
                self.log.append(("warn", msg % args))

            def error(self, msg, *args):
                self.log.append(("error", msg % args))

        ctlr = TestEvalController()
        buf.async_eval_at_trg(trg, ctlr=ctlr)
        ctlr.wait()
        if not ctlr.is_done():
            self.fail("evaluation is not 'done': didn't expect that")
        if trg.form == TRG_FORM_CPLN and ctlr.cplns:
            self.fail("evalution had results: didn't expect that: %r"
                      % ctlr.cplns)
        elif trg.form == TRG_FORM_CALLTIP and ctlr.calltips:
            self.fail("evalution had results: didn't expect that: %r"
                      % ctlr.cplns)
        if log_pattern:
            # pprint(ctlr.log)
            matching_logs = [(level, msg) for level, msg in ctlr.log
                             if log_pattern.search(msg)]
            self.failUnless(matching_logs,
                "the given completion failed but no logs matched the given pattern:\n"
                "  log_pattern: /%s/\n"
                "  log messages:\n%s\n"
                "  buffer:\n%s"
                % (log_pattern.pattern,
                   indent('\n'.join(['%s: %s' % lg for lg in ctlr.log])),
                   indent(markedup_content)))
Ejemplo n.º 10
0
    def assertCITDLExprIs(self, markedup_content, citdl_expr, lang=None,
                          prefix_filter=None, implicit=True, trigger_name=None,
                          **fields):
        """Assert that the preceding CITDL expression at the current position
        is as expected.

        This uses buf.citdl_expr_from_trg() -- or, for Perl,
        buf.citdl_expr_and_prefix_filter_from_trg().

        The "prefix_filter" optional argument can be used for Perl to test
        the value its relevant function returns.
        """
        if lang is None:
            lang = self.lang
        content, data = unmark_text(
            self.adjust_content(markedup_content))
        path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
        buf = self.mgr.buf_from_content(content, lang=lang, path=path)
        langintel = self.mgr.langintel_from_lang(lang)
        if trigger_name is None:
            trigger_name = "fakey-completion-type"

        if lang == "Perl":
            # Bit of a hack to fake the trigger length.
            if content[data["pos"]-1] in ('>', ':'):  # '->' or '::' triggers
                length = 2
            else:
                length = 1
            trg = Trigger(lang, TRG_FORM_CPLN, trigger_name,
                          data["pos"], implicit=implicit, length=length,
                          **fields)
            actual_citdl_expr, actual_prefix_filter \
                = langintel.citdl_expr_and_prefix_filter_from_trg(buf, trg)
        else:
            trg = Trigger(lang, TRG_FORM_CPLN, trigger_name,
                          data["pos"], implicit=implicit,
                          **fields)
            actual_citdl_expr = langintel.citdl_expr_from_trg(buf, trg)
        self.assertEqual(actual_citdl_expr, citdl_expr,
                         "unexpected actual %s CITDL expr preceding trigger:\n"
                         "  expected: %r\n"
                         "  got:      %r\n"
                         "  buffer:\n%s"
                         % (lang, citdl_expr, actual_citdl_expr,
                            indent(markedup_content)))
        if lang == "Perl" and prefix_filter is not None:
            self.assertEqual(actual_prefix_filter, prefix_filter,
                             "unexpected actual %s variable prefix filter "
                             "preceding trigger:\n"
                             "  expected: %r\n"
                             "  got:      %r\n"
                             "  buffer:\n%s"
                             % (lang, prefix_filter, actual_prefix_filter,
                                indent(markedup_content)))
Ejemplo n.º 11
0
 def assertCompletionsDoNotInclude(self, markedup_content, completions,
                                   implicit=True):
     """Check that the given completions are NOT found
     @param markedup_content Content with a <|> marking
     @param completions List of expected completions; each item is a tuple of
         (type, completion string)
     @param implicit Whether the trigger should be implicit
     """
     actual_completions = self._doEval(markedup_content,
                                       implicit=implicit).completions
     extra_completions = set()
     for typ, cpln in completions:
         if UIHandler.AutoCompleteInfo(cpln, typ) in actual_completions:
             extra_completions.add((typ, cpln))
     self.failIf(extra_completions and True,
                 "%s completions at the given position included "
                 "some unexpected values\n"
                 "  shouldn't have had these: %r\n"
                 "  expected none of:         %r\n"
                 "  got:                      %r\n"
                 "  buffer:\n%s"
                 % (self.language, list(extra_completions), completions,
                    [(cpln.type, cpln.completion)
                     for cpln in actual_completions],
                    indent(markedup_content)))
Ejemplo n.º 12
0
 def assertCompletionsDoNotInclude(self,
                                   markedup_content,
                                   completions,
                                   implicit=True):
     """Check that the given completions are NOT found
     @param markedup_content Content with a <|> marking
     @param completions List of expected completions; each item is a tuple of
         (type, completion string)
     @param implicit Whether the trigger should be implicit
     """
     actual_completions = self._doEval(markedup_content,
                                       implicit=implicit).completions
     extra_completions = set()
     for typ, cpln in completions:
         if UIHandler.AutoCompleteInfo(cpln, typ) in actual_completions:
             extra_completions.add((typ, cpln))
     self.failIf(
         extra_completions and True,
         "%s completions at the given position included "
         "some unexpected values\n"
         "  shouldn't have had these: %r\n"
         "  expected none of:         %r\n"
         "  got:                      %r\n"
         "  buffer:\n%s" %
         (self.language, list(extra_completions), completions, [
             (cpln.type, cpln.completion) for cpln in actual_completions
         ], indent(markedup_content)))
Ejemplo n.º 13
0
 def assertCalltipIs(self, markedup_content, calltip, implicit=True):
     """Check that the calltip is as expected
     @param markedup_content Content with a <|> marking
     @param calltip The calltip string
     @param implicit Whether the trigger should be implicit
     """
     handler = self._doEval(markedup_content, implicit=implicit)
     actual_calltip = getattr(handler, "calltip", None)
     self.assertEqual(calltip, actual_calltip,
         "unexpected calltip at the given position\n"
         "  expected:\n%s\n"
         "  got:\n%s\n"
         "  buffer:\n%s"
         % (indent(calltip if calltip else "(none)"),
            indent(actual_calltip if actual_calltip else "(none)"),
            indent(markedup_content)))
Ejemplo n.º 14
0
 def assertCompletionsInclude(self, markedup_content, completions,
                              implicit=True):
     """Check that the given completions are found
     @param markedup_content Content with a <|> marking
     @param completions List of expected completions; each item is a tuple of
         (type, completion string)
     @param implicit Whether the trigger should be implicit
     """
     actual_completions = self._doEval(markedup_content,
                                       implicit=implicit).completions
     missing_completions = set()
     for typ, cpln in completions:
         if UIHandler.AutoCompleteInfo(cpln, typ) not in actual_completions:
             missing_completions.add((typ, cpln))
     self.failIf(missing_completions and True,
                 "%s completions at the given position did not "
                 "include all expected values\n"
                 "  missing:         %r\n"
                 "  expected all of: %r\n"
                 "  got:             %r\n"
                 "  buffer:\n%s"
                 % (self.language, list(missing_completions), completions,
                    [(cpln.type, cpln.completion)
                     for cpln in actual_completions],
                    indent(markedup_content)))
Ejemplo n.º 15
0
 def assertCompletionsInclude(self,
                              markedup_content,
                              completions,
                              implicit=True):
     """Check that the given completions are found
     @param markedup_content Content with a <|> marking
     @param completions List of expected completions; each item is a tuple of
         (type, completion string)
     @param implicit Whether the trigger should be implicit
     """
     actual_completions = self._doEval(markedup_content,
                                       implicit=implicit).completions
     missing_completions = set()
     for typ, cpln in completions:
         if UIHandler.AutoCompleteInfo(cpln, typ) not in actual_completions:
             missing_completions.add((typ, cpln))
     self.failIf(
         missing_completions and True,
         "%s completions at the given position did not "
         "include all expected values\n"
         "  missing:         %r\n"
         "  expected all of: %r\n"
         "  got:             %r\n"
         "  buffer:\n%s" %
         (self.language, list(missing_completions), completions, [
             (cpln.type, cpln.completion) for cpln in actual_completions
         ], indent(markedup_content)))
Ejemplo n.º 16
0
 def assertCalltipIs(self, markedup_content, calltip, implicit=True):
     """Check that the calltip is as expected
     @param markedup_content Content with a <|> marking
     @param calltip The calltip string
     @param implicit Whether the trigger should be implicit
     """
     handler = self._doEval(markedup_content, implicit=implicit)
     actual_calltip = getattr(handler, "calltip", None)
     self.assertEqual(
         calltip, actual_calltip,
         "unexpected calltip at the given position\n"
         "  expected:\n%s\n"
         "  got:\n%s\n"
         "  buffer:\n%s" %
         (indent(calltip if calltip else "(none)"),
          indent(actual_calltip if actual_calltip else "(none)"),
          indent(markedup_content)))
Ejemplo n.º 17
0
 def assertNoTrigger(self, markedup_content, lang=None, implicit=True,
                     env=None):
     if lang is None:
         lang = self.lang
     buf, trg = self._get_buf_and_trg(markedup_content, lang,
                                      implicit=implicit, env=env)
     if trg is not None:
         self.fail("unexpectedly found a %s trigger %r when didn't expect "
                   "one, buffer:\n%s"
                   % (lang, trg, indent(markedup_content)))
Ejemplo n.º 18
0
    def assertCITDLExprUnderPosIs(self,
                                  markedup_content,
                                  citdl_expr,
                                  lang=None,
                                  prefix_filter=None,
                                  implicit=True,
                                  trigger_name=None,
                                  **fields):
        """Assert that the CITDL expression at the current position
        is as expected.

        This uses buf.citdl_expr_under_pos() -- or, for Perl,
        buf.citdl_expr_and_prefix_filter_from_trg().
        Note: This API is a mess right now. C.f. bug 65776.

        The "prefix_filter" optional argument can be used for Perl to test
        the value its relevant function returns.
        """
        if lang is None:
            lang = self.lang
        content, data = unmark_text(self.adjust_content(markedup_content))
        path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
        buf = self.mgr.buf_from_content(content, lang=lang, path=path)
        langintel = self.mgr.langintel_from_lang(lang)
        if trigger_name is None:
            trigger_name = "fakey-completion-type"

        if lang == "Perl":
            trg = Trigger(lang,
                          TRG_FORM_DEFN,
                          trigger_name,
                          data["pos"],
                          implicit=implicit,
                          length=0,
                          **fields)
            actual_citdl_expr, actual_prefix_filter \
                = langintel.citdl_expr_and_prefix_filter_from_trg(buf, trg)
        else:
            #actual_citdl_expr = langintel.citdl_expr_under_pos(buf, data["pos"])
            trg = Trigger(lang,
                          TRG_FORM_DEFN,
                          trigger_name,
                          data["pos"],
                          implicit=implicit,
                          **fields)
            actual_citdl_expr = langintel.citdl_expr_from_trg(buf, trg)
        self.assertEqual(
            actual_citdl_expr, citdl_expr,
            "unexpected actual %s CITDL expr under pos:\n"
            "  expected: %r\n"
            "  got:      %r\n"
            "  buffer:\n%s" %
            (lang, citdl_expr, actual_citdl_expr, indent(markedup_content)))
        if prefix_filter is not None:
            XXX  #TODO: compare prefix_filter to given value
Ejemplo n.º 19
0
    def assertLex(self, markedup_content, lang=None):
        """Lex the given content and assert that the lexed tokens are as
        expected.

        What is "expected" is given via pseudo-xml markup like this:

            fuzzy wuzzy <SCE_UDL_SSL_COMMENTBLOCK>wuzza</SCE_UDL_SSL_COMMENTBLOCK> bear

        This example expects that "wuzza" will be a token with style
        SCE_UDL_SSL_COMMENTBLOCK.
        """
        from codeintel2.accessor import SilverCityAccessor
        if lang is None:
            lang = self.lang

        content, tokens = self._unmark_lex_text(markedup_content)

        # Do lexing of this content via the codeintel Buffer's, because
        # they already handle all the SilverCity lexer hookup.
        path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
        buf = self.mgr.buf_from_content(content, lang=lang, path=path)
        assert isinstance(buf.accessor, SilverCityAccessor)
        actual_tokens = buf.accessor.tokens  # cheating
        for actual_token in actual_tokens:
            # There are a few SilverCity token dict keys that we
            # don't bother checking.
            del actual_token["end_column"]
            del actual_token["end_line"]
            del actual_token["start_column"]
            del actual_token["start_line"]

        unmatched_tokens = [t for t in tokens if t not in actual_tokens]
        if unmatched_tokens:
            self.fail("not all expected %s lex tokens were found in the "
                      "actual lexer output:\n"
                      "  buffer:\n%s\n"
                      "  actual lexer tokens:\n%s\n"
                      "  unmatched tokens:\n%s\n"
                      % (lang,
                         indent(content),
                         indent(pformat(actual_tokens)),
                         indent(pformat(unmatched_tokens))))
Ejemplo n.º 20
0
 def _assertCompletionsInclude(self, buf, trg, completions):
     markedup_content = markup_text(buf.accessor.text, pos=trg.pos)
     if isinstance(buf, CitadelBuffer):
         buf.unload()  # remove any entry from CIDB to ensure clean test
     ctlr = _CaptureEvalController()
     actual_completions = buf.cplns_from_trg(trg, ctlr=ctlr)
     missing_completions = [c for c in completions
                            if c not in (actual_completions or [])]
     self.failIf(missing_completions,
         "%s completions at the given position did not "
         "include all expected values\n"
         "  missing:         %r\n"
         "  expected all of: %r\n"
         "  got:             %r\n"
         "  eval log:\n%s\n"
         "  buffer:\n%s"
         % (buf.lang, missing_completions, completions,
            actual_completions,
            indent('\n'.join('%5s: %s' % (lvl, m) for lvl, m in ctlr.log)),
            indent(markedup_content)))
Ejemplo n.º 21
0
 def assertCompletionsDoNotInclude(self, markedup_content, completions,
                                   lang=None, implicit=True, env=None):
     if lang is None:
         lang = self.lang
     buf, trg = self._get_buf_and_trg(markedup_content, lang,
                                      implicit=implicit, env=env)
     if trg is None:
         self.fail("given position is not a %s trigger point, "
                   "expected completions to exclude %r:\n%s"
                   % (lang, completions, indent(markedup_content)))
     self._assertCompletionsDoNotInclude(buf, trg, completions)
Ejemplo n.º 22
0
 def assertCurrCalltipArgRange(self, markedup_content, calltip,
                               expected_range, lang=None, implicit=True):
     if lang is None:
         lang = self.lang
     path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
     content, data = unmark_text(
         self.adjust_content(markedup_content))
     pos = data["pos"]
     buf = self.mgr.buf_from_content(content, lang=lang, path=path)
     trg = buf.trg_from_pos(data["trg_pos"], implicit=implicit)
     actual_range = buf.curr_calltip_arg_range(trg.pos, calltip,
                                               curr_pos=data["pos"])
     self.assertEqual(actual_range, expected_range,
         "unexpected current calltip arg range\n"
         "   expected: %s\n"
         "   got:      %s\n"
         "   calltip:\n%s\n"
         "   buffer:\n%s"
         % (expected_range, actual_range, indent(calltip),
            indent(markedup_content)))
Ejemplo n.º 23
0
 def assertNoPrecedingTrigger(self, markedup_content, lang=None):
     if lang is None:
         lang = self.lang
     path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
     content, data = unmark_text(self.adjust_content(markedup_content))
     buf = self.mgr.buf_from_content(content, lang=lang, path=path)
     trg = buf.preceding_trg_from_pos(data["start_pos"], data["pos"])
     if trg is not None:
         self.fail("unexpectedly found a preceding %s trigger '%s' when "
                   "didn't expect one, buffer:\n%s" %
                   (lang, trg.name, indent(markedup_content)))
Ejemplo n.º 24
0
    def _python_info_from_python(self, python, env):
        """Call the given Python and return:
            (<version>, <sys.prefix>, <lib-dir>, <site-lib-dir>, <sys.path>)

        TODO: Unicode path issues?
        """
        import process

        argv = [python, "-c", self.info_cmd]
        log.debug("run `%s -c ...'", python)
        p = process.ProcessOpen(argv, env=env.get_all_envvars(), stdin=None)
        stdout, stderr = p.communicate()
        stdout_lines = stdout.splitlines(0)
        retval = p.returncode
        if retval:
            log.warn(
                "failed to determine Python info:\n"
                "  path: %s\n"
                "  retval: %s\n"
                "  stdout:\n%s\n"
                "  stderr:\n%s\n",
                python,
                retval,
                indent("\n".join(stdout_lines)),
                indent(stderr),
            )

        # We are only to rely on the first 2 digits being in the form x.y.
        ver_match = re.search("([0-9]+.[0-9]+)", stdout_lines[0])
        if ver_match:
            ver = ver_match.group(1)
        else:
            ver = None
        prefix = stdout_lines[1]
        if sys.platform == "win32":
            libdir = join(prefix, "Lib")
        else:
            libdir = join(prefix, "lib", "python" + ver)
        sitelibdir = join(libdir, "site-packages")
        sys_path = stdout_lines[2:]
        return ver, prefix, libdir, sitelibdir, sys_path
Ejemplo n.º 25
0
 def assertTriggerMatches(self, markedup_content, lang=None,
                          implicit=True, env=None, **fields):
     if lang is None:
         lang = self.lang
     buf, trg = self._get_buf_and_trg(markedup_content, lang,
                                      implicit=implicit, env=env)
     if trg is None:
         self.fail("unexpectedly did not find a %s trigger, buffer:\n%s"
                   % (lang, indent(markedup_content)))
     if "pos" in fields:
         fields["pos"] = self.adjust_pos(fields["pos"])
     for name, value in fields.items():
         try:
             actual_value = getattr(trg, name)
         except AttributeError:
             actual_value = trg.extra[name]
         self.assertEqual(actual_value, value,
                          "unexpected %s trigger '%s' value: expected %r, "
                          "got %r, buffer:\n%s"
                          % (lang, name, value, actual_value,
                             indent(markedup_content)))
Ejemplo n.º 26
0
 def assertPrecedingTriggerMatches(self, markedup_content, lang=None,
                                   **fields):
     if lang is None:
         lang = self.lang
     path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
     content, data = unmark_text(
         self.adjust_content(markedup_content))
     buf = self.mgr.buf_from_content(content, lang=lang, path=path)
     trg = buf.preceding_trg_from_pos(data["start_pos"], data["pos"])
     if trg is None:
         self.fail("unexpectedly did not find a preceding %s trigger, "
                   "buffer:\n%s" % (lang, indent(markedup_content)))
     if "pos" in fields:
         fields["pos"] = self.adjust_pos(fields["pos"])
     for name, value in fields.items():
         actual_value = getattr(trg, name)
         self.assertEqual(actual_value, value,
                          "unexpected preceding %s trigger '%s' value: expected %r, "
                          "got %r, buffer:\n%s"
                          % (lang, name, value, actual_value,
                             indent(markedup_content)))
Ejemplo n.º 27
0
 def assertCompletionsInclude2(self, buf, pos, completions, implicit=True):
     """A version of assertCompletionsInclude() where you pass in
     a Buffer instance instead of marked up content. Sometimes
     this is more convenient.
     """
     trg = buf.trg_from_pos(pos, implicit=implicit)
     if trg is None:
         markedup_content = markup_text(buf.accessor.text, pos=pos)
         self.fail("given position is not a %s trigger point, "
                   "expected completions to include %r:\n%s" %
                   (buf.lang, completions, indent(markedup_content)))
     self._assertCompletionsInclude(buf, trg, completions)
Ejemplo n.º 28
0
 def assertNoPrecedingTrigger(self, markedup_content, lang=None):
     if lang is None:
         lang = self.lang
     path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
     content, data = unmark_text(
         self.adjust_content(markedup_content))
     buf = self.mgr.buf_from_content(content, lang=lang, path=path)
     trg = buf.preceding_trg_from_pos(data["start_pos"], data["pos"])
     if trg is not None:
         self.fail("unexpectedly found a preceding %s trigger '%s' when "
                   "didn't expect one, buffer:\n%s"
                   % (lang, trg.name, indent(markedup_content)))
Ejemplo n.º 29
0
 def _assertCompletionsDoNotInclude(self, buf, trg, completions):
     markedup_content = markup_text(buf.accessor.text, pos=trg.pos)
     if isinstance(buf, CitadelBuffer):
         buf.unload()  # remove any entry from CIDB to ensure clean test
     ctlr = _CaptureEvalController()
     actual_completions = buf.cplns_from_trg(trg, ctlr=ctlr)
     completions_that_shouldnt_be_there = [
         c for c in (actual_completions or []) if c in completions
     ]
     self.failIf(completions_that_shouldnt_be_there,
         "%s completions at the given position included "
         "some unexpected values\n"
         "  shouldn't have had these: %r\n"
         "  expected none of:         %r\n"
         "  got:                      %r\n"
         "  eval log:\n%s\n"
         "  buffer:\n%s"
         % (buf.lang, completions_that_shouldnt_be_there, completions,
            actual_completions,
            indent('\n'.join('%5s: %s' % (lvl, m) for lvl, m in ctlr.log)),
            indent(markedup_content)))
Ejemplo n.º 30
0
 def assertCompletionsDoNotInclude2(self, buf, pos, completions, implicit=True):
     """A version of assertCompletionsDoNotInclude() where you pass in
     a Buffer instance instead of marked up content. Sometimes
     this is more convenient.
     """
     trg = buf.trg_from_pos(pos, implicit=implicit)
     if trg is None:
         markedup_content = markup_text(buf.accessor.text, pos=pos)
         self.fail("given position is not a %s trigger point, "
                   "expected completions to exclude %r:\n%s"
                   % (buf.lang, completions, indent(markedup_content)))
     self._assertCompletionsDoNotInclude(buf, trg, completions)
Ejemplo n.º 31
0
    def _assertCalltipMatches(self, buf, trg, markedup_content, expr, lang, flags):
        if trg is None:
            self.fail("given position is not a %s trigger point, "
                      "expected the calltip to match the following:\n"
                      "  exression:\n%s\n"
                      "  buffer:\n%s"
                      % (lang, indent(expr),
                         indent(markedup_content)))

        if isinstance(buf, CitadelBuffer):
            buf.unload()  # remove any entry from CIDB to ensure clean test
        ctlr = _CaptureEvalController()
        actual_calltips = buf.calltips_from_trg(trg, ctlr=ctlr)
        if actual_calltips and actual_calltips[0]:
            actual_calltip = actual_calltips[0]
        else:
            actual_calltip = None
        self.assertNotEquals(re.search(expr, actual_calltip, flags), None,
            "unexpected %s calltip at the given position\n"
            "  expression:\n%s\n"
            "  got:\n%s\n"
            "  eval log:\n%s\n"
            "  buffer:\n%s"
            % (trg.name, indent(expr and expr or "(none)"),
               indent(actual_calltip and actual_calltip or "(none)"),
               indent('\n'.join('%5s: %s' % (lvl, m) for lvl, m in ctlr.log)),
               indent(markedup_content)))
Ejemplo n.º 32
0
    def _assertCalltipMatches(self, buf, trg, markedup_content, expr, lang,
                              flags):
        if trg is None:
            self.fail("given position is not a %s trigger point, "
                      "expected the calltip to match the following:\n"
                      "  exression:\n%s\n"
                      "  buffer:\n%s" %
                      (lang, indent(expr), indent(markedup_content)))

        if isinstance(buf, CitadelBuffer):
            buf.unload()  # remove any entry from CIDB to ensure clean test
        ctlr = _CaptureEvalController()
        actual_calltips = buf.calltips_from_trg(trg, ctlr=ctlr)
        if actual_calltips and actual_calltips[0]:
            actual_calltip = actual_calltips[0]
        else:
            actual_calltip = None
        self.assertNotEquals(
            re.search(expr, actual_calltip, flags), None,
            "unexpected %s calltip at the given position\n"
            "  expression:\n%s\n"
            "  got:\n%s\n"
            "  eval log:\n%s\n"
            "  buffer:\n%s" %
            (trg.name, indent(expr and expr or "(none)"),
             indent(actual_calltip and actual_calltip or "(none)"),
             indent('\n'.join('%5s: %s' % (lvl, m) for lvl, m in ctlr.log)),
             indent(markedup_content)))
Ejemplo n.º 33
0
    def _python_info_from_python(self, python, env):
        """Call the given Python and return:
            (<version>, <sys.prefix>, <lib-dir>, <site-lib-dir>, <sys.path>)

        TODO: Unicode path issues?
        """
        import process
        argv = [python, "-c", self.info_cmd]
        log.debug("run `%s -c ...'", python)
        p = process.ProcessOpen(argv, env=env.get_all_envvars(), stdin=None)
        stdout, stderr = p.communicate()
        stdout_lines = stdout.splitlines(0)
        retval = p.returncode
        if retval:
            log.warn(
                "failed to determine Python info:\n"
                "  path: %s\n"
                "  retval: %s\n"
                "  stdout:\n%s\n"
                "  stderr:\n%s\n", python, retval,
                indent('\n'.join(stdout_lines)), indent(stderr))

        # We are only to rely on the first 2 digits being in the form x.y.
        ver_match = re.search("([0-9]+.[0-9]+)", stdout_lines[0])
        if ver_match:
            ver = ver_match.group(1)
        else:
            ver = None
        prefix = stdout_lines[1]
        if sys.platform == "win32":
            libdir = join(prefix, "Lib")
        else:
            libdir = join(prefix, "lib", "python" + ver)
        sitelibdir = join(libdir, "site-packages")
        sys_path = stdout_lines[2:]
        return ver, prefix, libdir, sitelibdir, sys_path
Ejemplo n.º 34
0
 def assertScopeLpathIs(self, markedup_content, lpath, lang=None):
     if lang is None:
         lang = self.lang
     path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
     content, data = unmark_text(self.adjust_content(markedup_content))
     buf = self.mgr.buf_from_content(content, lang=lang, path=path)
     buf.scan(skip_scan_time_check=True)
     actual_blob, actual_lpath = buf.scoperef_from_pos(data["pos"])
     self.failUnlessEqual(
         lpath, actual_lpath,
         "unexpected %s scope lookup path (lpath) at the given position\n"
         "  expected: %r\n"
         "  got:      %r\n"
         "  buffer:\n%s" %
         (self.lang, lpath, actual_lpath, indent(markedup_content)))
Ejemplo n.º 35
0
 def assertScopeLpathIs(self, markedup_content, lpath, lang=None):
     if lang is None:
         lang = self.lang
     path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
     content, data = unmark_text(
         self.adjust_content(markedup_content))
     buf = self.mgr.buf_from_content(content, lang=lang, path=path)
     buf.scan(skip_scan_time_check=True)
     actual_blob, actual_lpath = buf.scoperef_from_pos(data["pos"])
     self.failUnlessEqual(lpath, actual_lpath,
                          "unexpected %s scope lookup path (lpath) at the given position\n"
                          "  expected: %r\n"
                          "  got:      %r\n"
                          "  buffer:\n%s"
                          % (self.lang, lpath,
                             actual_lpath,
                             indent(markedup_content)))
Ejemplo n.º 36
0
    def assertCITDLExprUnderPosIs(
        self, markedup_content, citdl_expr, lang=None,
        prefix_filter=None, implicit=True, trigger_name=None,
            **fields):
        """Assert that the CITDL expression at the current position
        is as expected.

        This uses buf.citdl_expr_under_pos() -- or, for Perl,
        buf.citdl_expr_and_prefix_filter_from_trg().
        Note: This API is a mess right now. C.f. bug 65776.

        The "prefix_filter" optional argument can be used for Perl to test
        the value its relevant function returns.
        """
        if lang is None:
            lang = self.lang
        content, data = unmark_text(
            self.adjust_content(markedup_content))
        path = os.path.join("<Unsaved>", "rand%d" % random.randint(0, 100))
        buf = self.mgr.buf_from_content(content, lang=lang, path=path)
        langintel = self.mgr.langintel_from_lang(lang)
        if trigger_name is None:
            trigger_name = "fakey-completion-type"

        if lang == "Perl":
            trg = Trigger(lang, TRG_FORM_DEFN, trigger_name,
                          data["pos"], implicit=implicit, length=0,
                          **fields)
            actual_citdl_expr, actual_prefix_filter \
                = langintel.citdl_expr_and_prefix_filter_from_trg(buf, trg)
        else:
            # actual_citdl_expr = langintel.citdl_expr_under_pos(buf,
            # data["pos"])
            trg = Trigger(lang, TRG_FORM_DEFN, trigger_name,
                          data["pos"], implicit=implicit,
                          **fields)
            actual_citdl_expr = langintel.citdl_expr_from_trg(buf, trg)
        self.assertEqual(actual_citdl_expr, citdl_expr,
                         "unexpected actual %s CITDL expr under pos:\n"
                         "  expected: %r\n"
                         "  got:      %r\n"
                         "  buffer:\n%s"
                         % (lang, citdl_expr, actual_citdl_expr,
                            indent(markedup_content)))
        if prefix_filter is not None:
            XXX  # TODO: compare prefix_filter to given value
Ejemplo n.º 37
0
    def preceding_trg_from_pos(self,
                               buf,
                               pos,
                               curr_pos,
                               preceding_trg_terminators=None,
                               DEBUG=False):
        accessor = buf.accessor
        if preceding_trg_terminators is None:
            preceding_trg_terminators = self.preceding_trg_terminators
        if DEBUG:
            print banner("preceding_trg_from_pos(pos=%r, curr_pos=%r)" %
                         (pos, curr_pos))
            print indent(
                markup_text(accessor.text, pos=curr_pos, start_pos=pos))
            print banner(None, '-')

        # Skip over comments and strings in our checking, unless we are
        # in one of these styles for the whole range. This is so an explicit
        # trigger in a comment (or, e.g., a doc string) will work, but
        # the appearance of small comments or strings in code will not mess
        # things up.
        comment_and_string_styles = dict(
            (s, True) for s in buf.comment_styles() + buf.string_styles())
        skip_styles = {}
        start_style = accessor.style_at_pos(pos - 1)
        EOL_CHARS = tuple("\n\r")

        # Limiting simplification: Only backtrack a max of 200 chars.
        # Can increase that if necessary. The problem is detecting a
        # statement boundary backwards in langs like Python and Ruby
        # where you can't rely on ';' (actually
        # `preceding_trg_terminators').
        limit = max(1, pos - 200)

        # First stage. We only consider autocomplete trigger (i.e.
        # trg.form==TRG_FORM_COMPLETION) if within range of the
        # curr_pos. Here "within range" means you don't have to more the
        # cursor to show the autocomplete UI.
        first_stage_limit = curr_pos
        for (char, style) in accessor.gen_char_and_style_back(
                curr_pos - 1, limit - 1):
            if not isident(char):
                break
            first_stage_limit -= 1
        if DEBUG:
            print "[stage 1] first_stage_limit=%d (prev_ch=%r)"\
                  % (first_stage_limit,
                     (first_stage_limit > 0
                      and accessor.char_at_pos(first_stage_limit-1)
                      or None))
        p = pos
        if p >= first_stage_limit:
            for (prev_ch, prev_style) in accessor.gen_char_and_style_back(
                    p - 1, first_stage_limit - 2):
                if (not skip_styles and prev_style != start_style
                        # EOLs in comments seem to always be style 0. Don't count
                        # them.
                        and prev_ch not in EOL_CHARS):
                    if DEBUG:
                        print "[stage 1] have seen a style change (%d -> %d), " \
                              "now skipping strings and comments" \
                              % (start_style, prev_style)
                    skip_styles = comment_and_string_styles
                if DEBUG:
                    print "[stage 1] consider pos %2d: prev_ch=%r (%d) --"\
                          % (p, prev_ch, prev_style),
                if prev_style in skip_styles:
                    if DEBUG: print "comment or string, skip it"
                elif self._is_terminating_char(prev_ch, prev_style,
                                               preceding_trg_terminators):
                    if DEBUG: print "in `preceding_trg_terminators': break"
                    return None
                elif prev_ch in self.trg_chars:
                    if DEBUG: print "trigger char, try it"
                    trg = buf.trg_from_pos(p, implicit=False)
                    if trg:
                        if DEBUG: print "[stage 1] %s" % trg
                        return trg
                    p -= 1
                    break
                elif DEBUG:
                    print "not a trigger char, skip it"
                p -= 1
        if DEBUG:
            print "[stage 1] end of possible autocomplete trigger range"

        # Second stage. We only consider calltip triggers now
        # (self.calltip_trg_chars).
        #
        # As well, ignore enclosed paren sections to make sure we are
        # in-range. For example, we shouldn't trigger on "bar(" here:
        #   foo(bar("skip", "this", "arg", "list"), <|>)
        close_paren_count = 0
        for (prev_ch,
             prev_style) in accessor.gen_char_and_style_back(p - 1, limit - 2):
            if (not skip_styles and prev_style != start_style
                    # EOLs in comments seem to always be style 0. Don't count
                    # them.
                    and prev_ch not in EOL_CHARS):
                if DEBUG:
                    print "[stage 2] seen a style change (%d -> %d), now " \
                          "skipping strings and comments" \
                          % (start_style, prev_style)
                skip_styles = comment_and_string_styles

            if DEBUG:
                print "[stage 2] consider pos %2d: prev_ch=%r (%d) --"\
                      % (p, prev_ch, prev_style),
            if prev_style in skip_styles:
                if DEBUG: print "comment or string, skip it"
            elif prev_ch == ')':
                close_paren_count += 1
                if DEBUG: print "close paren: count=%d" % close_paren_count
            elif close_paren_count and prev_ch == '(':
                close_paren_count -= 1
                if DEBUG: print "open paren: count=%d" % close_paren_count
            elif self._is_terminating_char(prev_ch, prev_style,
                                           preceding_trg_terminators):
                if DEBUG: print "in `preceding_trg_terminators': break"
                return None
            elif prev_ch in self.calltip_trg_chars:
                if DEBUG: print "trigger char, try it"
                trg = buf.trg_from_pos(p, implicit=False)
                if trg:
                    if DEBUG: print "[stage 2] %s" % trg
                    return trg
            elif DEBUG:
                print "not a trigger char, skip it"
            p -= 1

        return None
Ejemplo n.º 38
0
    def curr_calltip_arg_range(self,
                               buf,
                               trg_pos,
                               calltip,
                               curr_pos,
                               DEBUG=False):
        """Return that range in the calltip of the "current" arg.
        I.e. what argument is currently being entered.
        
            "buf" is the buffer object on which this is being done.
            "trg_pos" is the trigger position.
            "calltip" is the full calltip text.
            "curr_pos" is the current position in the buffer.
            
        Returns a range: (start, end)
        Set `start == -1` to cancel the calltip, i.e. if the entered text
        has closed the call region.

        The default implementation uses:
            self.calltip_region_terminators
        to handle languages with calltip signatures with the following
        characteristics:
        - uses '(' and ')' to bound the argument list (though because of
          support for ';' statement termination, this isn't absolutely
          required)
        - uses a comma to separate arguments
        - basic block delimiters are {}, (), and []

        For example:
            foo()
            blam(a, b)
            range([start,] stop[, step]) -> list of integers
            bar(arg1, *args, **kwargs)
            flash(boom, bang=42)
        """
        # Dev Notes:
        # - Eventually should pass in the trigger to aid in processing.
        # - TODO figure out dependence on buf.comment_styles() and
        #   buf.string_styles()
        accessor = buf.accessor
        if DEBUG:
            print banner("curr_calltip_arg_range")
            print "calltip:\n%s" % indent(calltip)
            print "buffer:\n%s" % indent(
                markup_text(accessor.text, trg_pos=trg_pos, pos=curr_pos))

        # Start from the trigger position and walk forward to the current
        # pos: counting args and looking for termination of the calltip
        # region.
        skip_styles = dict(
            (s, True) for s in buf.comment_styles() + buf.string_styles())
        if accessor.style_at_pos(trg_pos - 1) in skip_styles:
            skip_styles = {}
        comma_count = 0
        blocks = {
            # Map a block start token to its block end token.
            '(': ')',
            '[': ']',
            '{': '}',
        }
        block_stack = []
        p = trg_pos
        for ch, style in accessor.gen_char_and_style(trg_pos, curr_pos):
            if DEBUG: print "pos %2d: %r (%2s) --" % (p, ch, style),
            if style in skip_styles:
                if DEBUG: print "skip"
            elif ch in blocks:
                if DEBUG: print "open block"
                block_stack.append(blocks[ch])
            elif block_stack:
                if ch == block_stack[-1]:
                    if DEBUG: print "close block"
                    block_stack.pop()
                elif ch in self.calltip_region_terminators:
                    if DEBUG: print "end of call region: (-1, -1)"
                    return (-1, -1)
                elif DEBUG:
                    print "ignore (in block)"
            elif ch == ',':
                if DEBUG: print "next arg"
                comma_count += 1
            elif ch in self.calltip_region_terminators and \
                 self.calltip_verify_termination(accessor, ch, trg_pos, curr_pos):
                if DEBUG: print "end of call region: (-1, -1)"
                return (-1, -1)
            elif DEBUG:
                print "ignore"
            p += 1

        # Parse the signature from the calltip. If there is no signature
        # then we default to not indicating any arg range.
        if self._parsed_calltip_cache[0] == calltip:
            parsed = self._parsed_calltip_cache[1]
        else:
            parsed = _parse_calltip(calltip, DEBUG)
            self._parsed_calltip_cache = (calltip, parsed)
        if parsed is None:
            if DEBUG: print "couldn't parse any calltip: (0, 0)"
            return (0, 0)
        signature, name, args = parsed
        if DEBUG:
            print "parsed calltip:\n  signature:\n%s\n  name:\n%s\n  args:\n%s"\
                  % (indent(signature), indent(name), indent(pformat(args)))

        if not args:
            if DEBUG: print "no args in signature: (0, 0)"
            return (0, 0)
        elif comma_count >= len(args):
            #XXX ellipsis
            if DEBUG: print "more commas than args: ellipsis?"
            span = args[-1].span  # default to last arg
        else:
            span = args[comma_count].span

        if DEBUG:
            print "curr calltip range (%s, %s):" % (span[0], span[1])
            print indent(signature)
            print "    %s%s" % (' ' * span[0], '-' * (span[1] - span[0]))
        return span
    def preceding_trg_from_pos(self, buf, pos, curr_pos,
                               preceding_trg_terminators=None, DEBUG=False):
        accessor = buf.accessor
        if preceding_trg_terminators is None:
            preceding_trg_terminators = self.preceding_trg_terminators
        if DEBUG:
            print banner("preceding_trg_from_pos(pos=%r, curr_pos=%r)"
                          % (pos, curr_pos))
            print indent(markup_text(accessor.text, pos=curr_pos,
                                     start_pos=pos))
            print banner(None, '-')

        # Skip over comments and strings in our checking, unless we are
        # in one of these styles for the whole range. This is so an explicit
        # trigger in a comment (or, e.g., a doc string) will work, but
        # the appearance of small comments or strings in code will not mess
        # things up.
        comment_and_string_styles = dict(
            (s, True) for s in buf.comment_styles() + buf.string_styles())
        skip_styles = {}
        start_style = accessor.style_at_pos(pos-1)
        EOL_CHARS = tuple("\n\r")

        # Limiting simplification: Only backtrack a max of 200 chars.
        # Can increase that if necessary. The problem is detecting a
        # statement boundary backwards in langs like Python and Ruby
        # where you can't rely on ';' (actually
        # `preceding_trg_terminators').
        limit = max(1, pos - 200)
        
        # First stage. We only consider autocomplete trigger (i.e.
        # trg.form==TRG_FORM_COMPLETION) if within range of the
        # curr_pos. Here "within range" means you don't have to more the
        # cursor to show the autocomplete UI.
        first_stage_limit = curr_pos
        for (char, style) in accessor.gen_char_and_style_back(curr_pos-1,
                                                              limit-1):
            if not isident(char):
                break
            first_stage_limit -= 1
        if DEBUG:
            print "[stage 1] first_stage_limit=%d (prev_ch=%r)"\
                  % (first_stage_limit,
                     (first_stage_limit > 0
                      and accessor.char_at_pos(first_stage_limit-1)
                      or None))
        p = pos
        if p >= first_stage_limit:
            for (prev_ch, prev_style) in accessor.gen_char_and_style_back(p-1,
                                                          first_stage_limit-2):
                if (not skip_styles and prev_style != start_style
                    # EOLs in comments seem to always be style 0. Don't count
                    # them.
                    and prev_ch not in EOL_CHARS):
                    if DEBUG:
                        print "[stage 1] have seen a style change (%d -> %d), " \
                              "now skipping strings and comments" \
                              % (start_style, prev_style)
                    skip_styles = comment_and_string_styles
                if DEBUG:
                    print "[stage 1] consider pos %2d: prev_ch=%r (%d) --"\
                          % (p, prev_ch, prev_style),
                if prev_style in skip_styles:
                    if DEBUG: print "comment or string, skip it"
                elif self._is_terminating_char(prev_ch, prev_style,
                                               preceding_trg_terminators):
                    if DEBUG: print "in `preceding_trg_terminators': break"
                    return None
                elif prev_ch in self.trg_chars:
                    if DEBUG: print "trigger char, try it"
                    trg = buf.trg_from_pos(p, implicit=False)
                    if trg:
                        if DEBUG: print "[stage 1] %s" % trg
                        return trg
                    p -= 1
                    break
                elif DEBUG:
                    print "not a trigger char, skip it"
                p -= 1
        if DEBUG:
            print "[stage 1] end of possible autocomplete trigger range"

        # Second stage. We only consider calltip triggers now
        # (self.calltip_trg_chars).
        # 
        # As well, ignore enclosed paren sections to make sure we are
        # in-range. For example, we shouldn't trigger on "bar(" here:
        #   foo(bar("skip", "this", "arg", "list"), <|>)
        close_paren_count = 0
        for (prev_ch, prev_style) in accessor.gen_char_and_style_back(p-1, limit-2):
            if (not skip_styles and prev_style != start_style
                # EOLs in comments seem to always be style 0. Don't count
                # them.
                and prev_ch not in EOL_CHARS):
                if DEBUG:
                    print "[stage 2] seen a style change (%d -> %d), now " \
                          "skipping strings and comments" \
                          % (start_style, prev_style)
                skip_styles = comment_and_string_styles

            if DEBUG:
                print "[stage 2] consider pos %2d: prev_ch=%r (%d) --"\
                      % (p, prev_ch, prev_style),
            if prev_style in skip_styles:
                if DEBUG: print "comment or string, skip it"
            elif prev_ch == ')':
                close_paren_count += 1
                if DEBUG: print "close paren: count=%d" % close_paren_count
            elif close_paren_count and prev_ch == '(':
                close_paren_count -= 1
                if DEBUG: print "open paren: count=%d" % close_paren_count
            elif self._is_terminating_char(prev_ch, prev_style,
                                           preceding_trg_terminators):
                if DEBUG: print "in `preceding_trg_terminators': break"
                return None
            elif prev_ch in self.calltip_trg_chars:
                if DEBUG: print "trigger char, try it"
                trg = buf.trg_from_pos(p, implicit=False)
                if trg:
                    if DEBUG: print "[stage 2] %s" % trg
                    return trg
            elif DEBUG:
                print "not a trigger char, skip it"
            p -= 1

        return None
    def curr_calltip_arg_range(self, buf, trg_pos, calltip, curr_pos,
                               DEBUG=False):
        """Return that range in the calltip of the "current" arg.
        I.e. what argument is currently being entered.
        
            "buf" is the buffer object on which this is being done.
            "trg_pos" is the trigger position.
            "calltip" is the full calltip text.
            "curr_pos" is the current position in the buffer.
            
        Returns a range: (start, end)
        Set `start == -1` to cancel the calltip, i.e. if the entered text
        has closed the call region.

        The default implementation uses:
            self.calltip_region_terminators
        to handle languages with calltip signatures with the following
        characteristics:
        - uses '(' and ')' to bound the argument list (though because of
          support for ';' statement termination, this isn't absolutely
          required)
        - uses a comma to separate arguments
        - basic block delimiters are {}, (), and []

        For example:
            foo()
            blam(a, b)
            range([start,] stop[, step]) -> list of integers
            bar(arg1, *args, **kwargs)
            flash(boom, bang=42)
        """
        # Dev Notes:
        # - Eventually should pass in the trigger to aid in processing.
        # - TODO figure out dependence on buf.comment_styles() and
        #   buf.string_styles()
        accessor = buf.accessor
        if DEBUG:
            print banner("curr_calltip_arg_range")
            print "calltip:\n%s" % indent(calltip)
            print "buffer:\n%s" % indent(markup_text(accessor.text,
                                                     trg_pos=trg_pos,
                                                     pos=curr_pos))
            
        # Start from the trigger position and walk forward to the current
        # pos: counting args and looking for termination of the calltip
        # region.
        skip_styles = dict(
            (s, True) for s in buf.comment_styles() + buf.string_styles())
        if accessor.style_at_pos(trg_pos-1) in skip_styles:
            skip_styles = {}
        comma_count = 0
        blocks = {
            # Map a block start token to its block end token.
            '(': ')', '[': ']', '{': '}',
        }
        block_stack = []
        p = trg_pos
        for ch, style in accessor.gen_char_and_style(trg_pos, curr_pos):
            if DEBUG: print "pos %2d: %r (%2s) --" % (p, ch, style),
            if style in skip_styles:
                if DEBUG: print "skip"
            elif ch in blocks:
                if DEBUG: print "open block"
                block_stack.append(blocks[ch])
            elif block_stack:
                if ch == block_stack[-1]:
                    if DEBUG: print "close block"
                    block_stack.pop()
                elif ch in self.calltip_region_terminators:
                    if DEBUG: print "end of call region: (-1, -1)"
                    return (-1, -1)
                elif DEBUG:
                    print "ignore (in block)"
            elif ch == ',':
                if DEBUG: print "next arg"
                comma_count += 1
            elif ch in self.calltip_region_terminators and \
                 self.calltip_verify_termination(accessor, ch, trg_pos, curr_pos):
                if DEBUG: print "end of call region: (-1, -1)"
                return (-1, -1)
            elif DEBUG:
                print "ignore"
            p += 1

        # Parse the signature from the calltip. If there is no signature
        # then we default to not indicating any arg range.
        if self._parsed_calltip_cache[0] == calltip:
            parsed = self._parsed_calltip_cache[1]
        else:
            parsed = _parse_calltip(calltip, DEBUG)
            self._parsed_calltip_cache = (calltip, parsed)
        if parsed is None:
            if DEBUG: print "couldn't parse any calltip: (0, 0)"
            return (0, 0)
        signature, name, args = parsed
        if DEBUG:
            print "parsed calltip:\n  signature:\n%s\n  name:\n%s\n  args:\n%s"\
                  % (indent(signature), indent(name), indent(pformat(args)))

        if not args:
            if DEBUG: print "no args in signature: (0, 0)"
            return (0, 0)
        elif comma_count >= len(args):
            #XXX ellipsis
            if DEBUG: print "more commas than args: ellipsis?"
            span = args[-1].span # default to last arg
        else:
            span = args[comma_count].span

        if DEBUG:
            print "curr calltip range (%s, %s):" % (span[0], span[1])
            print indent(signature)
            print "    %s%s" % (' '*span[0], '-'*(span[1]-span[0]))
        return span
Ejemplo n.º 41
0
    def _buf_indep_libs_from_env(self, env):
        """Create the buffer-independent list of libs."""
        cache_key = self.lang + "-libs"
        libs = env.cache.get(cache_key)
        if libs is None:
            env.add_pref_observer(self.interpreterPrefName, self._invalidate_cache)
            env.add_pref_observer(self.extraPathsPrefName,
                                  self._invalidate_cache_and_rescan_extra_dirs)
            env.add_pref_observer("codeintel_selected_catalogs",
                                  self._invalidate_cache)
            db = self.mgr.db

            ver, prefix, libdir, sitelibdir, sys_path \
                = self.python_info_from_env(env)
            libs = []

            # - extradirslib
            extra_dirs = self._extra_dirs_from_env(env)
            if extra_dirs:
                libs.append( db.get_lang_lib(self.lang, "extradirslib",
                                extra_dirs) )

            # Figure out which sys.path dirs belong to which lib.
            paths_from_libname = {"sitelib": [], "envlib": [], "stdlib": []}
            canon_sitelibdir = sitelibdir and normcase(sitelibdir) or None
            canon_prefix = prefix and normcase(prefix) or None
            canon_libdir = libdir and normcase(libdir) or ""
            canon_libdir_plat_prefix = normcase(join(libdir, "plat-"))
            canon_libdir_lib_prefix = normcase(join(libdir, "lib-"))
            for dir in sys_path:
                STATE = "envlib"
                canon_dir = normcase(dir)
                if dir == "": # -> curdirlib (already handled)
                    continue
                elif canon_dir.endswith(".zip") and isfile(dir):
                    log.warn("`%s': not handling .zip file on Python sys.path",
                             dir)
                    continue
                elif canon_dir.endswith(".egg") and isfile(dir):
                    #log.warn("`%s': not handling .egg file on Python sys.path",
                    #         dir)
                    continue
                elif canon_dir.startswith(canon_sitelibdir):
                    STATE = "sitelib"
                # Check against the known list of standard library locations.
                elif canon_dir == canon_libdir or \
                     canon_dir.startswith(canon_libdir_plat_prefix) or \
                     canon_dir.startswith(canon_libdir_lib_prefix):
                    STATE = "stdlib"
                if not exists(dir):
                    continue
                paths_from_libname[STATE].append(dir)
            log.debug("Python %s paths for each lib:\n%s",
                      ver, indent(pformat(paths_from_libname)))

            # - envlib, sitelib, cataloglib, stdlib
            if paths_from_libname["envlib"]:
                libs.append( db.get_lang_lib(self.lang, "envlib",
                                paths_from_libname["envlib"]) )
            if paths_from_libname["sitelib"]:
                libs.append( db.get_lang_lib(self.lang, "sitelib",
                                paths_from_libname["sitelib"]) )
            catalog_selections = env.get_pref("codeintel_selected_catalogs")
            libs += [
                db.get_catalog_lib(self.lang, catalog_selections),
                db.get_stdlib(self.lang, ver)
            ]
            env.cache[cache_key] = libs

        return libs
Ejemplo n.º 42
0
def _testOneInputFile(self, fpath, tags=None):
    _debug = False  # Set to true to dump status info for each test run.

    infile = os.path.join(gInputsDir, fpath)  # input
    outfile = os.path.join(gOutputsDir, fpath + '.cix')  # expected output
    tmpfile = os.path.join(gTmpDir, fpath + '.cix')  # actual output
    if not os.path.exists(os.path.dirname(tmpfile)):
        os.makedirs(os.path.dirname(tmpfile))
    errfile = os.path.join(gOutputsDir, fpath + '.error')  # expected error
    # An options file is a set of kwargs for the buf.scan()
    # method call. One key-value pair per-line like this:
    #   key=value
    # Whitespace is stripped off the value.
    optsfile = os.path.join(gInputsDir, fpath + '.options')  # input options

    if _debug:
        print()
        print("*" * 50, "codeintel '%s'" % fpath)

    # Set standard options:
    opts = {"mtime": "42"}

    # Determine input options to use, if any.
    #XXX Not used. Drop it.
    if os.path.exists(optsfile):
        for line in open(optsfile, 'r').read().splitlines(0):
            name, value = line.split('=', 1)
            value = value.strip()
            try:  # allow value to be a type other than string
                value = eval(value)
            except Exception:
                pass
            opts[name] = value
        if _debug:
            print("*" * 50, "options")
            pprint.pprint(opts)

    # Scan the file, capturing stdout and stderr and any possible
    # error.
    # - To allow testing from different dirs (resulting in changing
    #   path strings, we normalize the <file path="..."> value and any
    #   <scope ilk="blob" src="..."> attributes).
    oldStdout = sys.stdout
    oldStderr = sys.stderr
    sys.stdout = StringIO()
    sys.stderr = StringIO()
    try:
        try:
            lang = None
            if tags and "python3" in tags:
                lang = "Python3"
            buf = self.mgr.buf_from_path(infile, lang=lang)
            buf.scan(**opts)
            tree = buf.tree

            # Normalize paths.
            relnorm_infile = infile[len(dirname(gInputsDir)) + 1:]
            absnorm_infile = infile
            relnorm_infile = relnorm_infile.replace('\\', '/')
            absnorm_infile = absnorm_infile.replace('\\', '/')
            for file_elem in tree:
                file_elem.set("path", relnorm_infile)
                for blob_elem in file_elem:
                    if blob_elem.get("ilk") != "blob": continue
                    norm_src = normpath(blob_elem.get("src"))
                    norm_src = norm_src.replace('\\', '/')
                    if norm_src in (relnorm_infile, absnorm_infile):
                        blob_elem.set("src", relnorm_infile)

            tree = pretty_tree_from_tree(tree)
            # Due to the dynamic nature of the ciler errors (which often
            # includes the source code line numbers), it's difficult to check
            # that the errors are identical, so we work around this by just
            # taking the first 30 characters of the error.
            cile_error = tree[0].get("error")
            if cile_error and fpath.endswith(".js"):
                tree[0].set(
                    "error",
                    len(cile_error) < 30 and cile_error
                    or (cile_error[:30] + "..."))
            cix = ET.tostring(tree)

        except CodeIntelError as ex:
            error = traceback.format_exc()
        else:
            error = None
            if isinstance(cix, six.text_type):
                with io.open(tmpfile, mode="wt", encoding="utf-8") as fout:
                    fout.write(cix)
            else:
                with open(tmpfile, mode="wb") as fout:
                    fout.write(cix)
    finally:
        stdout = sys.stdout.getvalue()
        stderr = sys.stderr.getvalue()
        sys.stdout = oldStdout
        sys.stderr = oldStderr
    if _debug:
        print("*" * 50, "stdout")
        print(stdout)
        print("*" * 50, "stderr")
        print(stderr)
        print("*" * 50, "error")
        print(str(error))
        print("*" * 50)

    generateMissing = False
    if not os.path.exists(outfile) and generateMissing:
        with io.open(outfile, mode='wt', encoding='utf-8') as fout:
            with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
                fout.write(ftmp.read())

    # Verify that the results are as expected.
    if os.path.exists(outfile) and error:
        self.fail("scanning '%s' raised an error but success was "
                  "expected:\n%s" % (_encode_for_stdout(fpath), indent(error)))
    elif os.path.exists(outfile):
        # Convert the <file path="..."/> to the native directory separator.
        def to_native_sep(match):
            path = match.group(2).replace("\\", os.sep).replace("/", os.sep)
            return match.group(1) + path + match.group(3)

        path_pat = re.compile(r'(<file .*?path=")(.*?)(".*?>)', re.S)

        # Note that we don't really care about line endings here, so we read
        # both files in universal newlines mode (i.e. translate to \n)
        # and normalize '&#10;', '&#13;' and '&apos;'
        with io.open(outfile, mode='rt', encoding='utf-8') as fout:
            expected = path_pat.sub(to_native_sep, fout.read())
            expected = expected.replace('&#xA;', '&#10;').replace(
                '&#xD;', '&#13;').replace('&apos;', '\'')
        with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
            actual = path_pat.sub(to_native_sep, ftmp.read())
            actual = actual.replace('&#xA;', '&#10;').replace(
                '&#xD;', '&#13;').replace('&apos;', '\'')

        if expected != actual:
            do_fail = True
            # Useful temporary thing while XML output format is changing.
            #if os.stat("../support/xmldiff.py"):
            #    rc = os.system('python ../support/xmldiff.py "%s" "%s"' % (outfile, tmpfile))
            #    if rc == 0:
            #        do_fail = False
            if do_fail:
                diff = list(
                    difflib.ndiff(expected.splitlines(1),
                                  actual.splitlines(1)))
                diff = _diffContext(diff, 2)
                if diff:
                    error_str = "%r != %r:\n --- %s\n +++ %s\n%s" \
                                % (outfile, tmpfile, outfile, tmpfile,
                                   ''.join(diff))
                    if gMaxDiffOutput > 0 and gMaxNumLines > 0:
                        if len(error_str) > gMaxDiffOutput:
                            error_lines = error_str.split("\n")
                            if len(error_lines) > gMaxNumLines:
                                error_lines = error_lines[:gMaxNumLines] + [
                                    "..."
                                ]
                            if gMaxLineLength > 0:
                                error_str = "\n".join([
                                    len(x) > gMaxLineLength
                                    and x[:gMaxLineLength] or x
                                    for x in error_lines
                                ])
                            else:
                                error_str = "\n".join(error_lines)
                    self.fail(_encode_for_stdout(error_str))
    elif os.path.exists(errfile):
        # There is no reference output file. This means that processing
        # this file is expected to fail.
        expectedError = open(errfile, 'r').read()
        actualError = str(error)
        self.failUnlessEqual(actualError.strip(), expectedError.strip())
    else:
        self.fail("No reference output file or error file for '%s'." % infile)

    # Ensure next test file gets a clean codeintel.
    toDelete = []
    for modname in sys.modules:
        if modname == "codeintel" or modname.startswith("codeintel."):
            toDelete.append(modname)
    for modname in toDelete:
        del sys.modules[modname]
Ejemplo n.º 43
0
    def _buf_indep_libs_from_env(self, env):
        """Create the buffer-independent list of libs."""
        cache_key = self.lang + "-libs"
        libs = env.cache.get(cache_key)
        if libs is None:
            env.add_pref_observer(self.interpreterPrefName,
                                  self._invalidate_cache)
            env.add_pref_observer(self.extraPathsPrefName,
                                  self._invalidate_cache_and_rescan_extra_dirs)
            env.add_pref_observer("codeintel_selected_catalogs",
                                  self._invalidate_cache)
            db = self.mgr.db

            ver, prefix, libdir, sitelibdir, sys_path \
                = self.python_info_from_env(env)
            libs = []

            # - extradirslib
            extra_dirs = self._extra_dirs_from_env(env)
            if extra_dirs:
                libs.append(
                    db.get_lang_lib(self.lang, "extradirslib", extra_dirs))

            # Figure out which sys.path dirs belong to which lib.
            paths_from_libname = {"sitelib": [], "envlib": [], "stdlib": []}
            canon_sitelibdir = sitelibdir and normcase(sitelibdir) or None
            canon_prefix = prefix and normcase(prefix) or None
            canon_libdir = normcase(libdir)
            canon_libdir_plat_prefix = normcase(join(libdir, "plat-"))
            canon_libdir_lib_prefix = normcase(join(libdir, "lib-"))
            for dir in sys_path:
                STATE = "envlib"
                canon_dir = normcase(dir)
                if dir == "":  # -> curdirlib (already handled)
                    continue
                elif canon_dir.endswith(".zip") and isfile(dir):
                    log.warn("`%s': not handling .zip file on Python sys.path",
                             dir)
                    continue
                elif canon_dir.endswith(".egg") and isfile(dir):
                    #log.warn("`%s': not handling .egg file on Python sys.path",
                    #         dir)
                    continue
                elif canon_dir.startswith(canon_sitelibdir):
                    STATE = "sitelib"
                # Check against the known list of standard library locations.
                elif canon_dir == canon_libdir or \
                     canon_dir.startswith(canon_libdir_plat_prefix) or \
                     canon_dir.startswith(canon_libdir_lib_prefix):
                    STATE = "stdlib"
                if not exists(dir):
                    continue
                paths_from_libname[STATE].append(dir)
            log.debug("Python %s paths for each lib:\n%s", ver,
                      indent(pformat(paths_from_libname)))

            # - envlib, sitelib, cataloglib, stdlib
            if paths_from_libname["envlib"]:
                libs.append(
                    db.get_lang_lib(self.lang, "envlib",
                                    paths_from_libname["envlib"]))
            if paths_from_libname["sitelib"]:
                libs.append(
                    db.get_lang_lib(self.lang, "sitelib",
                                    paths_from_libname["sitelib"]))
            catalog_selections = env.get_pref("codeintel_selected_catalogs")
            libs += [
                db.get_catalog_lib(self.lang, catalog_selections),
                db.get_stdlib(self.lang, ver)
            ]
            env.cache[cache_key] = libs

        return libs
Ejemplo n.º 44
0
        stderr = sys.stderr.getvalue()
        sys.stdout = oldStdout
        sys.stderr = oldStderr
    if _debug:
        print "*" * 50, "stdout"
        print stdout
        print "*" * 50, "stderr"
        print stderr
        print "*" * 50, "error"
        print str(error)
        print "*" * 50

    # Verify that the results are as expected.
    if os.path.exists(outfile) and error:
        self.fail("scanning '%s' raised an error but success was "
                  "expected:\n%s" % (_encode_for_stdout(fpath), indent(error)))
    elif os.path.exists(outfile):
        # Convert the <file path="..."/> to the native directory separator.
        def to_native_sep(match):
            path = match.group(2).replace("\\", os.sep).replace("/", os.sep)
            return match.group(1) + path + match.group(3)

        path_pat = re.compile(r'(<file .*?path=")(.*?)(".*?>)', re.S)

        # Note that we don't really care about line endings here, so we read
        # both files in universal newlines mode (i.e. translate to \n)
        with io.open(outfile, mode='rt', encoding='utf-8') as fout:
            expected = path_pat.sub(to_native_sep, fout.read())
        with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
            actual = path_pat.sub(to_native_sep, ftmp.read())
Ejemplo n.º 45
0
        stderr = sys.stderr.getvalue()
        sys.stdout = oldStdout
        sys.stderr = oldStderr
    if _debug:
        print "*"*50, "stdout"
        print stdout
        print "*"*50, "stderr"
        print stderr
        print "*"*50, "error"
        print str(error)
        print "*" * 50

    # Verify that the results are as expected.
    if os.path.exists(outfile) and error:
        self.fail("scanning '%s' raised an error but success was "
                  "expected:\n%s" % (_encode_for_stdout(fpath), indent(error)))
    elif os.path.exists(outfile):
        # Convert the <file path="..."/> to the native directory separator.
        def to_native_sep(match):
            path = match.group(2).replace("\\", os.sep).replace("/", os.sep)
            return match.group(1)+path+match.group(3)
        path_pat = re.compile(r'(<file .*?path=")(.*?)(".*?>)', re.S)

        # Note that we don't really care about line endings here, so we read
        # both files in universal newlines mode (i.e. translate to \n)
        with io.open(outfile, mode='rt', encoding='utf-8') as fout:
            expected = path_pat.sub(to_native_sep, fout.read())
        with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
            actual = path_pat.sub(to_native_sep, ftmp.read())

        if expected != actual:
Ejemplo n.º 46
0
def _testOneInputFile(self, fpath, tags=None):
    _debug = False  # Set to true to dump status info for each test run.

    infile = os.path.join(gInputsDir, fpath) # input
    outfile = os.path.join(gOutputsDir, fpath+'.cix') # expected output
    tmpfile = os.path.join(gTmpDir, fpath+'.cix') # actual output
    if not os.path.exists(os.path.dirname(tmpfile)):
        os.makedirs(os.path.dirname(tmpfile))
    errfile = os.path.join(gOutputsDir, fpath+'.error')  # expected error
    # An options file is a set of kwargs for the buf.scan()
    # method call. One key-value pair per-line like this:
    #   key=value
    # Whitespace is stripped off the value.
    optsfile = os.path.join(gInputsDir, fpath+'.options') # input options
    
    if _debug:
        print()
        print("*"*50, "codeintel '%s'" % fpath)

    # Set standard options:
    opts = {"mtime": "42"}

    # Determine input options to use, if any.
    #XXX Not used. Drop it.
    if os.path.exists(optsfile):
        for line in open(optsfile, 'r').read().splitlines(0):
            name, value = line.split('=', 1)
            value = value.strip()
            try: # allow value to be a type other than string
                value = eval(value)
            except Exception:
                pass
            opts[name] = value
        if _debug:
            print("*"*50, "options")
            pprint.pprint(opts)

    # Scan the file, capturing stdout and stderr and any possible
    # error.
    # - To allow testing from different dirs (resulting in changing
    #   path strings, we normalize the <file path="..."> value and any
    #   <scope ilk="blob" src="..."> attributes).
    oldStdout = sys.stdout
    oldStderr = sys.stderr
    sys.stdout = StringIO()
    sys.stderr = StringIO()
    try:
        try:
            lang = None
            if tags and "python3" in tags:
                lang = "Python3"
            buf = self.mgr.buf_from_path(infile, lang=lang)
            buf.scan(**opts)
            tree = buf.tree

            # Normalize paths.
            relnorm_infile = infile[len(dirname(gInputsDir))+1:]
            absnorm_infile = infile
            relnorm_infile = relnorm_infile.replace('\\', '/')
            absnorm_infile = absnorm_infile.replace('\\', '/')
            for file_elem in tree:
                file_elem.set("path", relnorm_infile)
                for blob_elem in file_elem:
                    if blob_elem.get("ilk") != "blob": continue
                    norm_src = normpath(blob_elem.get("src"))
                    norm_src = norm_src.replace('\\', '/')
                    if norm_src in (relnorm_infile, absnorm_infile):
                        blob_elem.set("src", relnorm_infile)

            tree = pretty_tree_from_tree(tree)
            # Due to the dynamic nature of the ciler errors (which often
            # includes the source code line numbers), it's difficult to check
            # that the errors are identical, so we work around this by just
            # taking the first 30 characters of the error.
            cile_error = tree[0].get("error")
            if cile_error and fpath.endswith(".js"):
                tree[0].set("error", len(cile_error) < 30 and cile_error or (cile_error[:30] + "..."))
            cix = ET.tostring(tree)

        except CodeIntelError as ex:
            error = traceback.format_exc()
        else:
            error = None
            if isinstance(cix, six.text_type):
                with io.open(tmpfile, mode="wt", encoding="utf-8") as fout:
                    fout.write(cix)
            else:
                with open(tmpfile, mode="wb") as fout:
                    fout.write(cix)
    finally:
        stdout = sys.stdout.getvalue()
        stderr = sys.stderr.getvalue()
        sys.stdout = oldStdout
        sys.stderr = oldStderr
    if _debug:
        print("*"*50, "stdout")
        print(stdout)
        print("*"*50, "stderr")
        print(stderr)
        print("*"*50, "error")
        print(str(error))
        print("*" * 50)

    generateMissing = False
    if not os.path.exists(outfile) and generateMissing:
        with io.open(outfile, mode='wt', encoding='utf-8') as fout:
            with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
                fout.write(ftmp.read())

    # Verify that the results are as expected.
    if os.path.exists(outfile) and error:
        self.fail("scanning '%s' raised an error but success was "
                  "expected:\n%s" % (_encode_for_stdout(fpath), indent(error)))
    elif os.path.exists(outfile):
        # Convert the <file path="..."/> to the native directory separator.
        def to_native_sep(match):
            path = match.group(2).replace("\\", os.sep).replace("/", os.sep)
            return match.group(1)+path+match.group(3)
        path_pat = re.compile(r'(<file .*?path=")(.*?)(".*?>)', re.S)

        # Note that we don't really care about line endings here, so we read
        # both files in universal newlines mode (i.e. translate to \n)
        # and normalize '&#10;', '&#13;' and '&apos;'
        with io.open(outfile, mode='rt', encoding='utf-8') as fout:
            expected = path_pat.sub(to_native_sep, fout.read())
            expected = expected.replace('&#xA;', '&#10;').replace('&#xD;', '&#13;').replace('&apos;', '\'')
        with io.open(tmpfile, mode='rt', encoding='utf-8') as ftmp:
            actual = path_pat.sub(to_native_sep, ftmp.read())
            actual = actual.replace('&#xA;', '&#10;').replace('&#xD;', '&#13;').replace('&apos;', '\'')
        
        if expected != actual:
            do_fail = True
            # Useful temporary thing while XML output format is changing.
            #if os.stat("../support/xmldiff.py"):
            #    rc = os.system('python ../support/xmldiff.py "%s" "%s"' % (outfile, tmpfile))
            #    if rc == 0:
            #        do_fail = False
            if do_fail:
                diff = list(difflib.ndiff(expected.splitlines(1),
                                          actual.splitlines(1)))
                diff = _diffContext(diff, 2)
                if diff:
                    error_str = "%r != %r:\n --- %s\n +++ %s\n%s" \
                                % (outfile, tmpfile, outfile, tmpfile,
                                   ''.join(diff))
                    if gMaxDiffOutput > 0 and gMaxNumLines > 0:
                        if len(error_str) > gMaxDiffOutput:
                            error_lines = error_str.split("\n")
                            if len(error_lines) > gMaxNumLines:
                                error_lines = error_lines[:gMaxNumLines] + ["..."]
                            if gMaxLineLength > 0:
                                error_str = "\n".join([len(x) > gMaxLineLength and x[:gMaxLineLength] or x
                                                   for x in error_lines])
                            else:
                                error_str = "\n".join(error_lines)
                    self.fail(_encode_for_stdout(error_str))
    elif os.path.exists(errfile):
        # There is no reference output file. This means that processing
        # this file is expected to fail.
        expectedError = open(errfile, 'r').read()
        actualError = str(error)
        self.failUnlessEqual(actualError.strip(), expectedError.strip())
    else:
        self.fail("No reference output file or error file for '%s'." % infile)

    # Ensure next test file gets a clean codeintel.
    toDelete = []
    for modname in sys.modules:
        if modname == "codeintel" or modname.startswith("codeintel."):
            toDelete.append(modname)
    for modname in toDelete:
        del sys.modules[modname]