def function_lens(self) -> List[FuncLenIssue]: if isinstance(self._previous_node, (ast.FunctionDef, ast.AsyncFunctionDef)): func_length = self._find_func_len( self._previous_node.lineno, self._n_lines + 1, ) issue_type = PythonAstInspector.choose_issue_type( FUNC_LEN_ORIGIN_CLASS) self._function_lens.append( FuncLenIssue( file_path=self._file_path, line_no=self._previous_node.lineno, column_no=self._previous_node.col_offset, description=get_func_len_tip(), origin_class=FUNC_LEN_ORIGIN_CLASS, inspector_type=self._inspector_type, func_len=func_length, type=issue_type, difficulty=IssueDifficulty.get_by_issue_type(issue_type), )) self._previous_node = None return self._function_lens
def mi_parse(cls, mi_output: str) -> List[BaseIssue]: """ Parses the results of the 'mi' command. Description: https://radon.readthedocs.io/en/latest/commandline.html#the-mi-command :param mi_output: 'mi' command output. :return: list of issues. """ row_re = re.compile(r'^(.*) - \w \((.*)\)$', re.M) issues: List[BaseIssue] = [] for groups in row_re.findall(mi_output): file_path = Path(groups[0]) maintainability_lack = convert_percentage_of_value_to_lack_of_value( float(groups[1])) issue_type = cls.choose_issue_type(MAINTAINABILITY_ORIGIN_CLASS) issue_data = IssueData.get_base_issue_data_dict( file_path, cls.inspector_type, origin_class=MAINTAINABILITY_ORIGIN_CLASS, ) issue_data[ IssueData.DESCRIPTION.value] = get_maintainability_index_tip() issue_data[ IssueData.MAINTAINABILITY_LACK.value] = maintainability_lack issue_data[IssueData.ISSUE_TYPE.value] = issue_type issue_data[IssueData.DIFFICULTY. value] = IssueDifficulty.get_by_issue_type(issue_type) issues.append(MaintainabilityLackIssue(**issue_data)) return issues
def visit(self, node): if isinstance(self._previous_node, (ast.FunctionDef, ast.AsyncFunctionDef)): func_length = self._find_func_len( self._previous_node.lineno, node.lineno, ) issue_type = PythonAstInspector.choose_issue_type( FUNC_LEN_ORIGIN_CLASS) self._function_lens.append( FuncLenIssue( file_path=self._file_path, line_no=self._previous_node.lineno, column_no=self._previous_node.col_offset, description=get_func_len_tip(), origin_class=FUNC_LEN_ORIGIN_CLASS, inspector_type=self._inspector_type, func_len=func_length, type=issue_type, difficulty=IssueDifficulty.get_by_issue_type(issue_type), )) self._previous_node = node if not isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)): super().visit(node)
def visit(self, node: ast.AST): if not isinstance(node, ast.BoolOp): super().visit(node) return length = 0 for inner_node in ast.walk(node): if isinstance(inner_node, ast.BoolOp): length += len(inner_node.values) - 1 issue_type = PythonAstInspector.choose_issue_type( BOOL_EXPR_LEN_ORIGIN_CLASS) self.bool_expression_lens.append( BoolExprLenIssue( file_path=self._file_path, line_no=node.lineno, column_no=node.col_offset, description=get_bool_expr_len_tip(), origin_class=BOOL_EXPR_LEN_ORIGIN_CLASS, inspector_type=self._inspector_type, bool_expr_len=length, type=issue_type, difficulty=IssueDifficulty.get_by_issue_type(issue_type), ))
def parse_output(self, output_path: Path) -> List[BaseIssue]: """ Parses the PMD output, which is a csv file, and returns a list of the issues found there. If the passed path is not a file, an empty list is returned. """ if not output_path.is_file(): logger.error('%s: error - no output file' % self.inspector_type.value) return [] with open(str(output_path)) as out_file: reader = csv.DictReader(out_file) return [ CodeIssue( file_path=Path(row['File']), line_no=int(row['Line']), column_no=1, type=self.choose_issue_type(row['Rule']), origin_class=row['Rule'], description=row['Description'], inspector_type=self.inspector_type, difficulty=IssueDifficulty.get_by_issue_type( self.choose_issue_type(row['Rule'])), ) for row in reader ]
def convert_json_to_issues(issues_json: List[dict]) -> List[PenaltyIssue]: issues = [] for issue in issues_json: issues.append( PenaltyIssue( origin_class=issue[OutputJsonFields.CODE.value], description=issue[OutputJsonFields.TEXT.value], line_no=int(issue[OutputJsonFields.LINE_NUMBER.value]), column_no=int(issue[OutputJsonFields.COLUMN_NUMBER.value]), type=IssueType(issue[OutputJsonFields.CATEGORY.value]), file_path=Path(), inspector_type=InspectorType.UNDEFINED, influence_on_penalty=issue.get( OutputJsonFields.INFLUENCE_ON_PENALTY.value, 0), difficulty=IssueDifficulty( issue.get(OutputJsonFields.DIFFICULTY.value, IssueDifficulty.HARD.value)), ), ) return issues
def parse(cls, output: str) -> List[CodeIssue]: fatal_category = 'F' info_category = 'I' row_re = re.compile(r'^(.*):(\d+):(\d+):([IRCWEF]\d+):(.*)$', re.M) issues: List[CodeIssue] = [] for groups in row_re.findall(output): if groups[1] == info_category: continue if groups[1] == fatal_category: logger.error('pylint encountered fatal error') return issues origin_class = groups[3] description = groups[4] if origin_class == 'R0915': description = add_complexity_tip(description) issue_type = cls.choose_issue_type(groups[3]) if issue_type not in cls.supported_issue_types: logger.error('pylint: unsupported issue type %s', issue_type.__name__) continue issues.append( CodeIssue( file_path=Path(groups[0]), line_no=int(groups[1]), column_no=int(groups[2]) + 1, origin_class=origin_class, description=description, inspector_type=cls.inspector_type, type=issue_type, difficulty=IssueDifficulty.get_by_issue_type(issue_type), )) return issues
def parse(cls, output: str) -> List[BaseIssue]: row_re = re.compile(r'^(.*):(\d+):(\d+):([A-Z]+\d{3}):(.*)$', re.M) cc_description_re = re.compile(r"'(.+)' is too complex \((\d+)\)") cohesion_description_re = re.compile( r"class has low \((\d*\.?\d*)%\) cohesion") line_len_description_re = re.compile( r"line too long \((\d+) > \d+ characters\)") issues: List[BaseIssue] = [] for groups in row_re.findall(output): description = groups[4] origin_class = groups[3] cc_match = cc_description_re.match(description) cohesion_match = cohesion_description_re.match(description) line_len_match = line_len_description_re.match(description) file_path = Path(groups[0]) line_no = int(groups[1]) column_number = int(groups[2]) if int(groups[2]) > 0 else 1 issue_data = IssueData.get_base_issue_data_dict( file_path, cls.inspector_type, line_number=line_no, column_number=column_number, origin_class=origin_class) if cc_match is not None: # mccabe: cyclomatic complexity issue_type = IssueType.CYCLOMATIC_COMPLEXITY issue_data[IssueData.DESCRIPTION. value] = get_cyclomatic_complexity_tip() issue_data[IssueData.CYCLOMATIC_COMPLEXITY.value] = int( cc_match.groups()[1]) issue_data[IssueData.ISSUE_TYPE.value] = issue_type issue_data[IssueData.DIFFICULTY. value] = IssueDifficulty.get_by_issue_type( issue_type) issues.append(CyclomaticComplexityIssue(**issue_data)) elif cohesion_match is not None: # flake8-cohesion issue_type = IssueType.COHESION issue_data[ IssueData.DESCRIPTION. value] = f'{get_cohesion_tip(f"{description.capitalize()}.")}' issue_data[ IssueData.COHESION_LACK. value] = convert_percentage_of_value_to_lack_of_value( float(cohesion_match.group(1)), ) issue_data[IssueData.ISSUE_TYPE.value] = issue_type issue_data[IssueData.DIFFICULTY. value] = IssueDifficulty.get_by_issue_type( issue_type) issues.append(CohesionIssue(**issue_data)) elif line_len_match is not None: issue_type = IssueType.LINE_LEN issue_data[IssueData.DESCRIPTION.value] = get_line_len_tip() issue_data[IssueData.LINE_LEN.value] = int( line_len_match.groups()[0]) issue_data[IssueData.ISSUE_TYPE.value] = IssueType.LINE_LEN issue_data[IssueData.DIFFICULTY. value] = IssueDifficulty.get_by_issue_type( issue_type) issues.append(LineLenIssue(**issue_data)) else: issue_type = cls.choose_issue_type(origin_class) issue_data[IssueData.ISSUE_TYPE.value] = issue_type issue_data[IssueData.DIFFICULTY. value] = IssueDifficulty.get_by_issue_type( issue_type) # Magic number if origin_class == 'WPS432': issue_data[IssueData.DESCRIPTION. value] = get_magic_number_tip(description) # Bad assign pattern elif origin_class == 'WPS350': issue_data[IssueData.DESCRIPTION. value] = get_augmented_assign_pattern_tip() else: issue_data[IssueData.DESCRIPTION.value] = description issues.append(CodeIssue(**issue_data)) return issues