def _validate_tree_with_macros( self, document: Document, xml_tree: etree.ElementTree) -> List[Diagnostic]: """Validates the document after loading all the macros referenced and expands them. Args: document (Document): [description] xml_tree (etree.ElementTree): [description] Returns: List[Diagnostic]: [description] """ error_range = None try: error_range = self._get_macros_range(document, xml_tree) expanded_tool_tree, _ = xml_macros.load_with_references( document.path) expanded_xml = self._remove_macros(expanded_tool_tree) root = expanded_xml.getroot() self.xsd_schema.assertValid(root) return [] except etree.DocumentInvalid as e: diagnostics = [ Diagnostic(error_range, f"Validation error on macro: {error.message}", source=self.server_name) for error in e.error_log.filter_from_errors() ] return diagnostics except etree.XMLSyntaxError as e: result = Diagnostic(error_range, f"Syntax error on macro: {e.msg}", source=self.server_name) return [result]
def _validate(ls, params): """ Validate a document and publish diagnostics """ text_doc = ls.workspace.get_document(params.textDocument.uri) source = text_doc.source diagnostics = [] line_index = -1 # test comment for line in source.splitlines(): logging.debug("*** %s", line) line_index += 1 col_index = -1 while True: if col_index == -1: col_index = str.find(line, "e") else: col_index = str.find(line, "e", col_index + 1) if col_index == -1: logging.debug("The remainder of line %s is efree", line_index) break msg = "Character e at column {}".format(col_index) d = Diagnostic(Range(Position(line_index, col_index), Position(line_index, col_index + 1)), msg, source=type(server).__name__) diagnostics.append(d) logging.debug("e at line %s, col %s", line_index, col_index) ls.publish_diagnostics(text_doc.uri, diagnostics)
def write(self, line): """This method lets us catch output from Sphinx.""" match = PROBLEM_PATTERN.match(line) if match: filepath = match.group("file") severity = PROBLEM_SEVERITY.get(match.group("type"), PROBLEM_SEVERITY["ERROR"]) diagnostics = self.diagnostics.get(filepath, None) if diagnostics is None: diagnostics = DiagnosticList() try: line_number = int(match.group("line")) except (TypeError, ValueError) as exc: self.logger.debug("Unable to parse line number: '%s'", match.group("line")) self.logger.debug(exc) line_number = 1 range_ = Range(Position(line_number - 1, 0), Position(line_number, 0)) diagnostics.append( Diagnostic(range_, match.group("message"), severity=severity, source="sphinx")) self.diagnostics[filepath] = diagnostics self.sphinx_log.info(line)
def _build_diagnostics( self, error_log: etree._ListErrorLog, xml_tree: Optional[etree.ElementTree] = None) -> List[Diagnostic]: """Gets a list of diagnostics from the XSD validation error log. Args: error_log (etree._ListErrorLog): The error log generated after XSD schema validation. xml_tree (Optional[etree.ElementTree], optional): The element tree associated with the error log. Defaults to None. Raises: ExpandMacrosFoundException: This is raised when a macro ``expand`` element is found in the error log. The ``expand`` element is not part of the XSD so, when this exception is raised, we know that the document must expand the macros before validation. Returns: List[Diagnostic]: The list of resulting diagnostic items found in the error log. """ diagnostics = [] for error in error_log.filter_from_errors(): if "expand" in error.message: raise ExpandMacrosFoundException(xml_tree) result = Diagnostic( Range( Position(error.line - 1, error.column), Position(error.line - 1, error.column), ), error.message, source=self.server_name, ) diagnostics.append(result) return diagnostics
def _make_diagnostic(linter, node, message): start_pos = Position(node.parse_tree.loc) end_pos = Position(node.parse_tree.loc + 1) return Diagnostic(Range(start_pos, end_pos), message=message, code=linter.code, severity=linter.severity)
def to_diagnostic(issue: Issue): '''Convert the Stibium Issue object to a pygls Diagnostic object''' severity = DiagnosticSeverity.Error if issue.severity == IssueSeverity.Warning: severity = DiagnosticSeverity.Warning return Diagnostic(range=pygls_range(issue.range), message=issue.message, severity=severity)
def _validate(ls, params): ls.show_message_log("Validating CP2K input...") diagnostics = [] text_doc = ls.workspace.get_document(params.textDocument.uri) parser = CP2KInputParser(DEFAULT_CP2K_INPUT_XML) with open(text_doc.path, "r") as fhandle: try: parser.parse(fhandle) except (TokenizerError, ParserError) as exc: ctx = exc.args[1] line = ctx["line"].rstrip() msg = f"Syntax error: {exc.args[0]}" if exc.__cause__: msg += f"({exc.__cause__})" linenr = ctx["linenr"] - 1 colnr = ctx["colnr"] if colnr is not None: count = 0 # number of underline chars after (positiv) or before (negative) the marker if ref_colnr given nchars = colnr # relevant line length if ctx["ref_colnr"] is not None: count = ctx["ref_colnr"] - ctx["colnr"] nchars = min(ctx["ref_colnr"], ctx["colnr"]) # correct if ref comes before if ctx["colnrs"] is not None: # shift by the number of left-stripped ws # ctx["colnrs"] contains the left shift for each possibly continued line nchars += ctx["colnrs"][ 0] # assume no line-continuation for now # at least do one context count = max(1, count) erange = Range(Position(linenr, colnr + 1 - count), Position(linenr, colnr + 1)) else: erange = Range(Position(linenr, 1), Position(linenr, len(line))) diagnostics += [ Diagnostic(erange, msg, source=type(cp2k_inp_server).__name__, related_information=[]) ] ls.publish_diagnostics(text_doc.uri, diagnostics)
def lsp_diagnostic(error: jedi.api.errors.SyntaxError) -> Diagnostic: """Get LSP Diagnostic from Jedi SyntaxError""" return Diagnostic( range=Range( start=Position(line=error.line - 1, character=error.column), end=Position(line=error.line - 1, character=error.column), ), message=str(error).strip("<>"), severity=DiagnosticSeverity.Error, source="jedi", )
def _diagnostic(msg: str, line = 1, col = 1, end_line = None, end_col = sys.maxsize, severity = DiagnosticSeverity.Error): if end_line is None: end_line = line return Diagnostic( Range( Position(line - 1, col - 1), Position(end_line - 1, end_col - 1), ), msg, severity=severity, )
def _create_diagnostics(doc: TextXDocument) -> List[Diagnostic]: """Creates diagnostics from TextXError objects. Args: doc: document to validate Returns: A list of diagnostics Raises: None """ return [ Diagnostic(_get_diagnostic_range(err), _get_diagnostic_message(err)) for err in validate(doc.get_metamodel(True), doc.source, doc.path) ]
def validate(self, source): self.itpr = check(source) diagnostics = [] logging.debug(f'itpr errors: {self.itpr.errors}') for item in self.itpr.errors: l1 = item['lineno'] - 1 c1 = item['col_offset'] l2 = item['end_lineno'] - 1 c2 = item['end_col_offset'] msg = item['error'].message diagnostics.append( Diagnostic(range=Range(Position(l1, c1), Position(l2, c2)), message=msg, source="PDChecker")) return diagnostics
def _validate_sql(sqlfile, uri): diagnostics = [] try: statements = parse_sql(sqlfile) except ParseError as e: pos = char_pos_to_position(sqlfile, e.location) diagnostics.append( Diagnostic(Range(pos, pos), message=e.args[0], severity=DiagnosticSeverity.Error, source=type(pg_language_server).__name__)) return diagnostics for statement in statements: for diag in lint(statement, None, None): diagnostics.append(diag) return diagnostics
def _build_diagnostics_from_syntax_error( self, error: etree.XMLSyntaxError) -> List[Diagnostic]: """Builds a Diagnostic element from a XMLSyntaxError. Args: error (etree.XMLSyntaxError): The syntax error. Returns: Diagnostic: The converted Diagnostic item. """ result = Diagnostic( Range(Position(error.lineno - 1, error.position[0] - 1), Position(error.lineno - 1, error.position[1] - 1)), error.msg, source=self.server_name, ) return [result]
def _diagnose(english_server: LanguageServer, params): # Get document from workspace text_doc = english_server.workspace.get_document(params.textDocument.uri) raw_results = proselint.tools.lint(text_doc.source) diagnostics = [] for _, message, line, col, _, _, length, _, _ in raw_results: diagnostics += [ Diagnostic( range=Range(Position(line, col - 1), Position(line, col + length)), message=message, severity=DiagnosticSeverity.Warning, source="eng-lsp", ) ] # Send diagnostics english_server.publish_diagnostics(params.textDocument.uri, diagnostics)
def _validate_json(source): """Validates json file.""" diagnostics = [] try: json.loads(source) except JSONDecodeError as err: msg = err.msg col = err.colno line = err.lineno d = Diagnostic(Range(Position(line - 1, col - 1), Position(line - 1, col)), msg, source=type(json_server).__name__) diagnostics.append(d) return diagnostics
def on_error(e: UnexpectedToken): diagnostics.append( Diagnostic( Range(Position(e.line - 1, e.column - 1), Position(e.line - 1, e.column)), user_repr(e))) return True
def report_predictions(self, data: ProcessedFile, candidates_logprobs: List[List[Tuple[str, float]]]): # First, figure out what to suggest good_names: List[Lemma] = [] bad_names_and_suggestions: List[Tuple[Lemma, str, float]] = [] bad_names_no_suggestion: List[Tuple[Lemma, str, float]] = [] for lemma, pred in zip(data.lemmas, candidates_logprobs): acceptable_names = [ n for n, s in pred[:self.no_suggestion_if_in_top_k] ] if lemma.name in acceptable_names: good_names.append(lemma) else: top_suggestion, logprob = pred[0] score = np.exp(logprob) if score < self.min_suggestion_likelihood: bad_names_no_suggestion.append( (lemma, top_suggestion, score)) else: bad_names_and_suggestions.append( (lemma, top_suggestion, score)) total = len(good_names) + len(bad_names_and_suggestions) + len( bad_names_no_suggestion) self.show_message( f"{data.path}: Analyzed {total} lemma names, " f"{len(good_names)} ({len(good_names)/total:.1%}) conform to the learned naming conventions. " f"Roosterize made {len(bad_names_and_suggestions)} suggestions.") # Publish suggestions as diagnostics uri = data.path.as_uri() diagnostics = [] if len(bad_names_and_suggestions) > 0: for lemma, suggestion, score in sorted(bad_names_and_suggestions, key=lambda x: x[2], reverse=True): beg_line, beg_col, end_line, end_col = self.get_lemma_name_position( lemma) diagnostics.append( Diagnostic( range=Range(Position(beg_line, beg_col), Position(end_line, end_col)), message= f"Suggestion: {suggestion} (likelihood: {score:.2f})", source="Roosterize", severity=DiagnosticSeverity.Warning, )) if len(bad_names_no_suggestion) > 0: for lemma, suggestion, score in sorted(bad_names_no_suggestion, key=lambda x: x[2], reverse=True): beg_line, beg_col, end_line, end_col = self.get_lemma_name_position( lemma) diagnostics.append( Diagnostic( range=Range(Position(beg_line, beg_col), Position(end_line, end_col)), message= f"Suggestion: {suggestion} (likelihood: {score:.2f})", source="Roosterize", severity=DiagnosticSeverity.Information, )) self.ls.publish_diagnostics(uri, diagnostics)
return server @pytest.mark.parametrize("err, msg, st_line, st_col, end_line, end_col", [ ([], None, None, None, None, None), ([TextXSyntaxError('test1', 0, 0)], 'test1', 0, 0, 0, 500), ([TextXSyntaxError('test2', 5, 5)], 'test2', 4, 0, 4, 500), ]) def test_create_diagnostics(doc, err, msg, st_line, st_col, end_line, end_col): with mock.patch(VALIDATE_FUNC_TARGET, return_value=err): diags = diag._create_diagnostics(None, doc) # Case when model is valid if err == []: assert diags == [] else: assert diags[0].message == msg assert diags[0].range.start.line == st_line assert diags[0].range.start.character == st_col assert diags[0].range.end.line == end_line assert diags[0].range.end.character == end_col @pytest.mark.parametrize( "diags, ", [([]), ([Diagnostic(Range(Position(5, 5), Position(5, 5)), 'test')])]) def test_send_diagnostics(server, doc, diags): with mock.patch(SERVER_CREATE_DIAGNOSTICS_TARGET, return_value=diags): diag.send_diagnostics(server, None, doc) server.publish_diagnostics.assert_called_with(doc.uri, diags)
def _diagnostic(msg: str, pos: SourcePosition = None, severity = DiagnosticSeverity.Error): return Diagnostic(_get_range(pos), msg, severity=severity)
) def test_create_diagnostics(doc, err, msg, st_line, st_col, end_line, end_col): with mock.patch(VALIDATE_FUNC_TARGET, return_value=err): diags = diag._create_diagnostics(doc) # Case when model is valid if err == []: assert diags == [] else: assert diags[0].message == msg assert diags[0].range.start.line == st_line assert diags[0].range.start.character == st_col assert diags[0].range.end.line == end_line assert diags[0].range.end.character == end_col def test_get_diagnostic_message(): err = TextXSyntaxError("test", 2, 2) err.args = () msg = diag._get_diagnostic_message(err) assert msg == str(err) @pytest.mark.parametrize( "diags, ", [([]), ([Diagnostic(Range(Position(5, 5), Position(5, 5)), "test")])]) def test_send_diagnostics(server, doc, diags): with mock.patch(SERVER_CREATE_DIAGNOSTICS_TARGET, return_value=diags): diag.send_diagnostics(server, doc) server.publish_diagnostics.assert_called_with(doc.uri, diags)
def publish_diagnostics(ls, params): # pylint: disable=unused-variable builtin_notifications.put(PublishDiagnosticsParams(params.uri, [ Diagnostic(d.range, d.message) for d in params.diagnostics ]))
def syntaxError(self, recognizer, offendingSymbol: Token, line, column, msg, e): err = Diagnostic( range=Range(Position(line, column), Position(line, column+len(offendingSymbol.text))), message=msg ) self.errors.append(err)
def _create_diagnostics(lang_temp, doc): """Creates diagnostics from TextXError objects.""" return [ Diagnostic(_get_diagnostic_range(err), _get_diagnostic_message(err)) for err in validate(lang_temp, doc.source) ]