def _complexity(self, filepaths): all_complexity = 0 for filepath in filepaths: file_obj = open(join(self.repo_dir_path, filepath)) results = sorted_results(cc_visit(file_obj.read())) complexities = [i.complexity for i in results] complexity = sum(complexities) / (len(complexities) or 1) all_complexity += complexity return all_complexity, cc_rank(all_complexity)
def radon_test(f): filename = 'a1/a1_solution_' + f + '.py' with open(filename) as file: source = file.read() cv = ComplexityVisitor.from_code(source) res = sorted_results(cv.functions + cv.classes, order=LINES) output = {} for r in res: # print(f'Function: {r.name}, CC: {r.complexity}') output['CC'] = r.complexity res = analyze(source) # pprint(res) basic = {'loc': res[0], 'lloc': res[1], 'sloc': res[2], 'comments': res[3], 'multi': res[4], 'blank': res[5], 'single_comment': res[6]} output['Lines'] = basic config = Config(min='A', max='F', exclude=None, ignore=None, no_assert=False, show_closures=False, order=LINES) ch = CCHarvester([filename], config) res = ch.results x = json.loads(ch.as_json()) # pprint(x) res = h_visit(source) hals = {'h1': res[0], 'h2': res[1], 'N1': res[2], 'N2': res[3], 'vocabulary': res[4], 'length': res[5], 'calculated_length': res[6], 'volume': res[7], 'difficulty': res[8], 'effort': res[9], 'time': res[10], 'bugs': res[11]} output['Halstead'] = hals pprint({f: output})
def analyse(self): output = {} cv = ComplexityVisitor.from_code(self._source) res = sorted_results(cv.functions + cv.classes, order=LINES) # should be one result, since giving one function # if len(res) > 1: # raise ValueError('Complexity Analysis returned multiple results') output['cc'] = res[0].complexity res = analyze(self._source) lines_comments = dict(res._asdict()) output.update(lines_comments) res = h_visit(self._source) hals = dict(res._asdict()) output.update(hals) self._res = output
def analyze_cc(paths, exclude, min, max, order_function): '''Analyze the files located under `paths`. :param paths: A list of paths to analyze. :param exclude: A comma-separated string of fnmatch patterns. :param min: The minimum rank to output. :param max: The maximum rank to output. :param order_function: Can be `SCORE`, `LINES` or `ALPHA`, to sort the results respectively by CC score, line number or name.''' for name in iter_filenames(paths, exclude): with open(name) as fobj: try: results = sorted_results(cc_visit(fobj.read()), order_function) yield name, list(_filter_by_rank(results, min, max)) except Exception as e: log(name, indent=1) log_error(e, indent=1) continue
def cc(min='A', max='F', show_complexity=False, average=False, exclude=None, order='SCORE', *paths): '''Analyze the given Python modules and compute Cyclomatic Complexity (CC). The output can be filtered using the *min* and *max* flags. In addition to that, by default complexity score is not displayed. :param min: The minimum complexity to display (default to A). :param max: The maximum complexity to display (default to F). :param show_complexity: Whether or not to show the actual complexity score together with the A-F rank. Default to False. :param average: If True, at the end of the analysis display the average complexity. Default to False. :param paths: The modules or packages to analyze. ''' min = min.upper() max = max.upper() average_cc = .0 analyzed = 0 order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE')) for name in iter_filenames(paths, exclude or []): with open(name) as fobj: try: results = sorted_results(cc_visit(fobj.read()), order_function) except Exception as e: log('{0}\n{1}ERROR: {2}', name, ' ' * 4, str(e)) continue cc, blocks = _print_cc_results(name, results, min, max, show_complexity) average_cc += cc analyzed += blocks if average and analyzed: cc = average_cc / analyzed ranked_cc = cc_rank(cc) log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed) log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc], ranked_cc, cc, RESET)
def analyze_cc(paths, exclude, ignore, order_function, no_assert): """Analyze the files located under `paths`. :param paths: A list of paths to analyze. :param exclude: A comma-separated string of fnmatch patterns. :param ignore: A comma-separated string of patterns to ignore. :param min: The minimum rank to output. :param max: The maximum rank to output. :param order_function: Can be `SCORE`, `LINES` or `ALPHA`, to sort the results respectively by CC score, line number or name. :param no_assert: If `True` assert statements will not be counted.""" for name in iter_filenames(paths, exclude, ignore): with open(name) as fobj: try: results = sorted_results(cc_visit(fobj.read(), no_assert=no_assert), order_function) yield name, results except Exception as e: log(name) log_error(e, indent=1) continue
def analyze_cc(paths, exclude, ignore, order_function, no_assert): '''Analyze the files located under `paths`. :param paths: A list of paths to analyze. :param exclude: A comma-separated string of fnmatch patterns. :param ignore: A comma-separated string of patterns to ignore. :param min: The minimum rank to output. :param max: The maximum rank to output. :param order_function: Can be `SCORE`, `LINES` or `ALPHA`, to sort the results respectively by CC score, line number or name. :param no_assert: If `True` assert statements will not be counted.''' for name in iter_filenames(paths, exclude, ignore): with open(name) as fobj: try: results = sorted_results( cc_visit(fobj.read(), no_assert=no_assert), order_function) yield name, results except Exception as e: log(name) log_error(e, indent=1) continue
def getBufferErrors(sourceCode): """Provides a list of warnings/errors for the given source code""" sourceCode += '\n' # First, compile into an AST and handle syntax errors. try: tree = compile(sourceCode, "<string>", "exec", PyCF_ONLY_AST) except SyntaxError as value: # If there's an encoding problem with the file, the text is None. if value.text is None: return {}, [] return {value.lineno: [value.args[0]]}, [] except (ValueError, TypeError) as value: # ValueError may happened in case of invalid \x escape character # E.g. http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674797 # TypeError may happened in case of null characters in a file # E.g. http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674796 msg = str(value) if msg == "": return {-1: ["Could not compile buffer: unknown error"]}, [] return {-1: ["Could not compile buffer: " + msg]}, [] # Okay, it's syntactically valid. Now check it. check = Checker(tree, "<string>") results = {} lines = sourceCode.splitlines() for warning in check.messages: if isinstance(warning.lineno, int): lineno = warning.lineno else: # By some reasons I see ast NAME node here (pyflakes 0.7.3) lineno = warning.lineno.lineno if not IGNORE_REGEXP.search(lines[lineno - 1]): if lineno in results: results[lineno].append(warning.message % warning.message_args) else: results[lineno] = [warning.message % warning.message_args] # Radon: CC complexity as the second value return results, sorted_results(cc_visit_ast(tree))
def cc(min='A', max='F', show_complexity=False, average=False, exclude=None, order='SCORE', json=False, *paths): '''Analyze the given Python modules and compute Cyclomatic Complexity (CC). The output can be filtered using the *min* and *max* flags. In addition to that, by default complexity score is not displayed. :param min: The minimum complexity to display (default to A). :param max: The maximum complexity to display (default to F). :param show_complexity: Whether or not to show the actual complexity score together with the A-F rank. Default to False. :param average: If True, at the end of the analysis display the average complexity. Default to False. :param paths: The modules or packages to analyze. ''' if json: return cc_json(*paths) min = min.upper() max = max.upper() average_cc = .0 analyzed = 0 order_function = getattr(cc_mod, order.upper(), getattr(cc_mod, 'SCORE')) cc_data = analyze_cc(*paths) for name, results in cc_data.iteritems(): results = sorted_results(results, order_function) cc, blocks = _print_cc_results(name, results, min, max, show_complexity) average_cc += cc analyzed += blocks if average and analyzed: cc = average_cc / analyzed ranked_cc = cc_rank(cc) log('\n{0} blocks (classes, functions, methods) analyzed.', analyzed) log('Average complexity: {0}{1} ({2}){3}', RANKS_COLORS[ranked_cc], ranked_cc, cc, RESET)
# get cc blocks blocks = cc_visit(source) # get MI score mi = mi_visit(source, True) mi_dict[mi] = filename # get raw metrics raw = analyze(source) hal_vol, complexity, logic_lines, com_lines = mi_parameters(source) param_dict[complexity] = (complexity, filename, hal_vol, logic_lines, com_lines) # get metrics for each file file_complexity = sorted_results(blocks) try: file_methods.append(re.findall('(?<=.Function\(name=\')[^\']' '*(?=\')', str(file_complexity))) file_complexities.append(re.findall('(?<=.complexity=)\d*', str(file_complexity))) except IndexError: continue mask = np.arange(len(file_methods[0])) for complexity, method in zip(file_complexities, file_methods): try: for i in mask: methods_dictionary[int(complexity[i])] = method[i] methods_complexity_print(filename, methods_dictionary)
def gobble(self, fobj): '''Analyze the content of the file object.''' r = cc_visit(fobj.read(), no_assert=self.config.no_assert) if self.config.show_closures: r = add_inner_blocks(r) return sorted_results(r, order=self.config.order)
def gobble(self, fobj): '''Analyze the content of the file object.''' r = cc_visit(fobj.read(), no_assert=self.config.no_assert) return sorted_results(r, order=self.config.order)